blob: 28a837b6069dfd2adc9ff6fd8958fffb13c3a9f6 [file] [log] [blame]
Jens Axboe21b4aa52019-03-06 09:03:50 -07001#ifndef LIB_URING_H
2#define LIB_URING_H
3
Jens Axboe004d5642019-05-22 08:59:12 -06004#ifdef __cplusplus
5extern "C" {
6#endif
7
Jens Axboe21b4aa52019-03-06 09:03:50 -07008#include <sys/uio.h>
9#include <signal.h>
10#include <string.h>
11#include "../../include/uapi/linux/io_uring.h"
Jens Axboe004d5642019-05-22 08:59:12 -060012#include <inttypes.h>
Jiufei Xue5769a352020-06-17 17:53:55 +080013#include <linux/swab.h>
Jens Axboe004d5642019-05-22 08:59:12 -060014#include "barrier.h"
Jens Axboe21b4aa52019-03-06 09:03:50 -070015
16/*
17 * Library interface to io_uring
18 */
19struct io_uring_sq {
20 unsigned *khead;
21 unsigned *ktail;
22 unsigned *kring_mask;
23 unsigned *kring_entries;
24 unsigned *kflags;
25 unsigned *kdropped;
26 unsigned *array;
27 struct io_uring_sqe *sqes;
28
29 unsigned sqe_head;
30 unsigned sqe_tail;
31
32 size_t ring_sz;
33};
34
35struct io_uring_cq {
36 unsigned *khead;
37 unsigned *ktail;
38 unsigned *kring_mask;
39 unsigned *kring_entries;
40 unsigned *koverflow;
41 struct io_uring_cqe *cqes;
42
43 size_t ring_sz;
44};
45
46struct io_uring {
47 struct io_uring_sq sq;
48 struct io_uring_cq cq;
49 int ring_fd;
50};
51
52/*
53 * System calls
54 */
55extern int io_uring_setup(unsigned entries, struct io_uring_params *p);
Jens Axboe004d5642019-05-22 08:59:12 -060056extern int io_uring_enter(int fd, unsigned to_submit,
Jens Axboe21b4aa52019-03-06 09:03:50 -070057 unsigned min_complete, unsigned flags, sigset_t *sig);
58extern int io_uring_register(int fd, unsigned int opcode, void *arg,
59 unsigned int nr_args);
60
61/*
62 * Library interface
63 */
64extern int io_uring_queue_init(unsigned entries, struct io_uring *ring,
65 unsigned flags);
66extern int io_uring_queue_mmap(int fd, struct io_uring_params *p,
67 struct io_uring *ring);
68extern void io_uring_queue_exit(struct io_uring *ring);
Jens Axboe004d5642019-05-22 08:59:12 -060069extern int io_uring_peek_cqe(struct io_uring *ring,
Jens Axboe21b4aa52019-03-06 09:03:50 -070070 struct io_uring_cqe **cqe_ptr);
Jens Axboe004d5642019-05-22 08:59:12 -060071extern int io_uring_wait_cqe(struct io_uring *ring,
Jens Axboe21b4aa52019-03-06 09:03:50 -070072 struct io_uring_cqe **cqe_ptr);
73extern int io_uring_submit(struct io_uring *ring);
74extern struct io_uring_sqe *io_uring_get_sqe(struct io_uring *ring);
75
76/*
Jens Axboe004d5642019-05-22 08:59:12 -060077 * Must be called after io_uring_{peek,wait}_cqe() after the cqe has
78 * been processed by the application.
79 */
80static inline void io_uring_cqe_seen(struct io_uring *ring,
81 struct io_uring_cqe *cqe)
82{
83 if (cqe) {
84 struct io_uring_cq *cq = &ring->cq;
85
86 (*cq->khead)++;
87 /*
88 * Ensure that the kernel sees our new head, the kernel has
89 * the matching read barrier.
90 */
91 write_barrier();
92 }
93}
94
95/*
Jens Axboe21b4aa52019-03-06 09:03:50 -070096 * Command prep helpers
97 */
98static inline void io_uring_sqe_set_data(struct io_uring_sqe *sqe, void *data)
99{
100 sqe->user_data = (unsigned long) data;
101}
102
Jens Axboe004d5642019-05-22 08:59:12 -0600103static inline void *io_uring_cqe_get_data(struct io_uring_cqe *cqe)
104{
105 return (void *) (uintptr_t) cqe->user_data;
106}
107
Jens Axboe21b4aa52019-03-06 09:03:50 -0700108static inline void io_uring_prep_rw(int op, struct io_uring_sqe *sqe, int fd,
Jens Axboe004d5642019-05-22 08:59:12 -0600109 const void *addr, unsigned len,
110 off_t offset)
Jens Axboe21b4aa52019-03-06 09:03:50 -0700111{
112 memset(sqe, 0, sizeof(*sqe));
113 sqe->opcode = op;
114 sqe->fd = fd;
115 sqe->off = offset;
116 sqe->addr = (unsigned long) addr;
117 sqe->len = len;
118}
119
120static inline void io_uring_prep_readv(struct io_uring_sqe *sqe, int fd,
Jens Axboe004d5642019-05-22 08:59:12 -0600121 const struct iovec *iovecs,
122 unsigned nr_vecs, off_t offset)
Jens Axboe21b4aa52019-03-06 09:03:50 -0700123{
124 io_uring_prep_rw(IORING_OP_READV, sqe, fd, iovecs, nr_vecs, offset);
125}
126
127static inline void io_uring_prep_read_fixed(struct io_uring_sqe *sqe, int fd,
128 void *buf, unsigned nbytes,
129 off_t offset)
130{
131 io_uring_prep_rw(IORING_OP_READ_FIXED, sqe, fd, buf, nbytes, offset);
132}
133
134static inline void io_uring_prep_writev(struct io_uring_sqe *sqe, int fd,
Jens Axboe004d5642019-05-22 08:59:12 -0600135 const struct iovec *iovecs,
136 unsigned nr_vecs, off_t offset)
Jens Axboe21b4aa52019-03-06 09:03:50 -0700137{
138 io_uring_prep_rw(IORING_OP_WRITEV, sqe, fd, iovecs, nr_vecs, offset);
139}
140
141static inline void io_uring_prep_write_fixed(struct io_uring_sqe *sqe, int fd,
Jens Axboe004d5642019-05-22 08:59:12 -0600142 const void *buf, unsigned nbytes,
Jens Axboe21b4aa52019-03-06 09:03:50 -0700143 off_t offset)
144{
145 io_uring_prep_rw(IORING_OP_WRITE_FIXED, sqe, fd, buf, nbytes, offset);
146}
147
148static inline void io_uring_prep_poll_add(struct io_uring_sqe *sqe, int fd,
Jiufei Xue5769a352020-06-17 17:53:55 +0800149 unsigned poll_mask)
Jens Axboe21b4aa52019-03-06 09:03:50 -0700150{
151 memset(sqe, 0, sizeof(*sqe));
152 sqe->opcode = IORING_OP_POLL_ADD;
153 sqe->fd = fd;
Jiufei Xue5769a352020-06-17 17:53:55 +0800154#if __BYTE_ORDER == __BIG_ENDIAN
155 poll_mask = __swahw32(poll_mask);
156#endif
Jens Axboe21b4aa52019-03-06 09:03:50 -0700157 sqe->poll_events = poll_mask;
158}
159
160static inline void io_uring_prep_poll_remove(struct io_uring_sqe *sqe,
161 void *user_data)
162{
163 memset(sqe, 0, sizeof(*sqe));
164 sqe->opcode = IORING_OP_POLL_REMOVE;
165 sqe->addr = (unsigned long) user_data;
166}
167
168static inline void io_uring_prep_fsync(struct io_uring_sqe *sqe, int fd,
Jens Axboe004d5642019-05-22 08:59:12 -0600169 unsigned fsync_flags)
Jens Axboe21b4aa52019-03-06 09:03:50 -0700170{
171 memset(sqe, 0, sizeof(*sqe));
172 sqe->opcode = IORING_OP_FSYNC;
173 sqe->fd = fd;
Jens Axboe004d5642019-05-22 08:59:12 -0600174 sqe->fsync_flags = fsync_flags;
Jens Axboe21b4aa52019-03-06 09:03:50 -0700175}
176
Jens Axboe004d5642019-05-22 08:59:12 -0600177static inline void io_uring_prep_nop(struct io_uring_sqe *sqe)
178{
179 memset(sqe, 0, sizeof(*sqe));
180 sqe->opcode = IORING_OP_NOP;
181}
182
183#ifdef __cplusplus
184}
185#endif
186
Jens Axboe21b4aa52019-03-06 09:03:50 -0700187#endif