blob: 5f305c86b892007dcaa4a9ae786ad5a5a7aa858f [file] [log] [blame]
Jens Axboe21b4aa52019-03-06 09:03:50 -07001#ifndef LIB_URING_H
2#define LIB_URING_H
3
Jens Axboe004d5642019-05-22 08:59:12 -06004#ifdef __cplusplus
5extern "C" {
6#endif
7
Jens Axboe21b4aa52019-03-06 09:03:50 -07008#include <sys/uio.h>
9#include <signal.h>
10#include <string.h>
11#include "../../include/uapi/linux/io_uring.h"
Jens Axboe004d5642019-05-22 08:59:12 -060012#include <inttypes.h>
13#include "barrier.h"
Jens Axboe21b4aa52019-03-06 09:03:50 -070014
15/*
16 * Library interface to io_uring
17 */
18struct io_uring_sq {
19 unsigned *khead;
20 unsigned *ktail;
21 unsigned *kring_mask;
22 unsigned *kring_entries;
23 unsigned *kflags;
24 unsigned *kdropped;
25 unsigned *array;
26 struct io_uring_sqe *sqes;
27
28 unsigned sqe_head;
29 unsigned sqe_tail;
30
31 size_t ring_sz;
32};
33
34struct io_uring_cq {
35 unsigned *khead;
36 unsigned *ktail;
37 unsigned *kring_mask;
38 unsigned *kring_entries;
39 unsigned *koverflow;
40 struct io_uring_cqe *cqes;
41
42 size_t ring_sz;
43};
44
45struct io_uring {
46 struct io_uring_sq sq;
47 struct io_uring_cq cq;
48 int ring_fd;
49};
50
51/*
52 * System calls
53 */
54extern int io_uring_setup(unsigned entries, struct io_uring_params *p);
Jens Axboe004d5642019-05-22 08:59:12 -060055extern int io_uring_enter(int fd, unsigned to_submit,
Jens Axboe21b4aa52019-03-06 09:03:50 -070056 unsigned min_complete, unsigned flags, sigset_t *sig);
57extern int io_uring_register(int fd, unsigned int opcode, void *arg,
58 unsigned int nr_args);
59
60/*
61 * Library interface
62 */
63extern int io_uring_queue_init(unsigned entries, struct io_uring *ring,
64 unsigned flags);
65extern int io_uring_queue_mmap(int fd, struct io_uring_params *p,
66 struct io_uring *ring);
67extern void io_uring_queue_exit(struct io_uring *ring);
Jens Axboe004d5642019-05-22 08:59:12 -060068extern int io_uring_peek_cqe(struct io_uring *ring,
Jens Axboe21b4aa52019-03-06 09:03:50 -070069 struct io_uring_cqe **cqe_ptr);
Jens Axboe004d5642019-05-22 08:59:12 -060070extern int io_uring_wait_cqe(struct io_uring *ring,
Jens Axboe21b4aa52019-03-06 09:03:50 -070071 struct io_uring_cqe **cqe_ptr);
72extern int io_uring_submit(struct io_uring *ring);
73extern struct io_uring_sqe *io_uring_get_sqe(struct io_uring *ring);
74
75/*
Jens Axboe004d5642019-05-22 08:59:12 -060076 * Must be called after io_uring_{peek,wait}_cqe() after the cqe has
77 * been processed by the application.
78 */
79static inline void io_uring_cqe_seen(struct io_uring *ring,
80 struct io_uring_cqe *cqe)
81{
82 if (cqe) {
83 struct io_uring_cq *cq = &ring->cq;
84
85 (*cq->khead)++;
86 /*
87 * Ensure that the kernel sees our new head, the kernel has
88 * the matching read barrier.
89 */
90 write_barrier();
91 }
92}
93
94/*
Jens Axboe21b4aa52019-03-06 09:03:50 -070095 * Command prep helpers
96 */
97static inline void io_uring_sqe_set_data(struct io_uring_sqe *sqe, void *data)
98{
99 sqe->user_data = (unsigned long) data;
100}
101
Jens Axboe004d5642019-05-22 08:59:12 -0600102static inline void *io_uring_cqe_get_data(struct io_uring_cqe *cqe)
103{
104 return (void *) (uintptr_t) cqe->user_data;
105}
106
Jens Axboe21b4aa52019-03-06 09:03:50 -0700107static inline void io_uring_prep_rw(int op, struct io_uring_sqe *sqe, int fd,
Jens Axboe004d5642019-05-22 08:59:12 -0600108 const void *addr, unsigned len,
109 off_t offset)
Jens Axboe21b4aa52019-03-06 09:03:50 -0700110{
111 memset(sqe, 0, sizeof(*sqe));
112 sqe->opcode = op;
113 sqe->fd = fd;
114 sqe->off = offset;
115 sqe->addr = (unsigned long) addr;
116 sqe->len = len;
117}
118
119static inline void io_uring_prep_readv(struct io_uring_sqe *sqe, int fd,
Jens Axboe004d5642019-05-22 08:59:12 -0600120 const struct iovec *iovecs,
121 unsigned nr_vecs, off_t offset)
Jens Axboe21b4aa52019-03-06 09:03:50 -0700122{
123 io_uring_prep_rw(IORING_OP_READV, sqe, fd, iovecs, nr_vecs, offset);
124}
125
126static inline void io_uring_prep_read_fixed(struct io_uring_sqe *sqe, int fd,
127 void *buf, unsigned nbytes,
128 off_t offset)
129{
130 io_uring_prep_rw(IORING_OP_READ_FIXED, sqe, fd, buf, nbytes, offset);
131}
132
133static inline void io_uring_prep_writev(struct io_uring_sqe *sqe, int fd,
Jens Axboe004d5642019-05-22 08:59:12 -0600134 const struct iovec *iovecs,
135 unsigned nr_vecs, off_t offset)
Jens Axboe21b4aa52019-03-06 09:03:50 -0700136{
137 io_uring_prep_rw(IORING_OP_WRITEV, sqe, fd, iovecs, nr_vecs, offset);
138}
139
140static inline void io_uring_prep_write_fixed(struct io_uring_sqe *sqe, int fd,
Jens Axboe004d5642019-05-22 08:59:12 -0600141 const void *buf, unsigned nbytes,
Jens Axboe21b4aa52019-03-06 09:03:50 -0700142 off_t offset)
143{
144 io_uring_prep_rw(IORING_OP_WRITE_FIXED, sqe, fd, buf, nbytes, offset);
145}
146
147static inline void io_uring_prep_poll_add(struct io_uring_sqe *sqe, int fd,
148 short poll_mask)
149{
150 memset(sqe, 0, sizeof(*sqe));
151 sqe->opcode = IORING_OP_POLL_ADD;
152 sqe->fd = fd;
153 sqe->poll_events = poll_mask;
154}
155
156static inline void io_uring_prep_poll_remove(struct io_uring_sqe *sqe,
157 void *user_data)
158{
159 memset(sqe, 0, sizeof(*sqe));
160 sqe->opcode = IORING_OP_POLL_REMOVE;
161 sqe->addr = (unsigned long) user_data;
162}
163
164static inline void io_uring_prep_fsync(struct io_uring_sqe *sqe, int fd,
Jens Axboe004d5642019-05-22 08:59:12 -0600165 unsigned fsync_flags)
Jens Axboe21b4aa52019-03-06 09:03:50 -0700166{
167 memset(sqe, 0, sizeof(*sqe));
168 sqe->opcode = IORING_OP_FSYNC;
169 sqe->fd = fd;
Jens Axboe004d5642019-05-22 08:59:12 -0600170 sqe->fsync_flags = fsync_flags;
Jens Axboe21b4aa52019-03-06 09:03:50 -0700171}
172
Jens Axboe004d5642019-05-22 08:59:12 -0600173static inline void io_uring_prep_nop(struct io_uring_sqe *sqe)
174{
175 memset(sqe, 0, sizeof(*sqe));
176 sqe->opcode = IORING_OP_NOP;
177}
178
179#ifdef __cplusplus
180}
181#endif
182
Jens Axboe21b4aa52019-03-06 09:03:50 -0700183#endif