Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | #ifndef _LINUX_PIPE_FS_I_H |
| 3 | #define _LINUX_PIPE_FS_I_H |
| 4 | |
Jens Axboe | 35f3d14 | 2010-05-20 10:43:18 +0200 | [diff] [blame] | 5 | #define PIPE_DEF_BUFFERS 16 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | |
Jens Axboe | 1432873 | 2006-05-03 10:35:26 +0200 | [diff] [blame] | 7 | #define PIPE_BUF_FLAG_LRU 0x01 /* page is on the LRU */ |
| 8 | #define PIPE_BUF_FLAG_ATOMIC 0x02 /* was atomically mapped */ |
| 9 | #define PIPE_BUF_FLAG_GIFT 0x04 /* page is a gift */ |
Linus Torvalds | 9883035 | 2012-04-29 13:12:42 -0700 | [diff] [blame] | 10 | #define PIPE_BUF_FLAG_PACKET 0x08 /* read() as a packet */ |
Christoph Hellwig | f6dd975 | 2020-05-20 17:58:12 +0200 | [diff] [blame] | 11 | #define PIPE_BUF_FLAG_CAN_MERGE 0x10 /* can merge buffers */ |
Linus Torvalds | 6c32978 | 2020-06-13 09:56:21 -0700 | [diff] [blame] | 12 | #define PIPE_BUF_FLAG_WHOLE 0x20 /* read() must return entire buffer or error */ |
David Howells | e7d553d | 2020-01-14 17:07:12 +0000 | [diff] [blame] | 13 | #ifdef CONFIG_WATCH_QUEUE |
Linus Torvalds | 6c32978 | 2020-06-13 09:56:21 -0700 | [diff] [blame] | 14 | #define PIPE_BUF_FLAG_LOSS 0x40 /* Message loss happened after this buffer */ |
David Howells | e7d553d | 2020-01-14 17:07:12 +0000 | [diff] [blame] | 15 | #endif |
Jens Axboe | 3e7ee3e | 2006-04-02 23:11:04 +0200 | [diff] [blame] | 16 | |
Jens Axboe | 0845718 | 2007-06-12 20:51:32 +0200 | [diff] [blame] | 17 | /** |
| 18 | * struct pipe_buffer - a linux kernel pipe buffer |
| 19 | * @page: the page containing the data for the pipe buffer |
| 20 | * @offset: offset of data inside the @page |
| 21 | * @len: length of data inside the @page |
| 22 | * @ops: operations associated with this buffer. See @pipe_buf_operations. |
| 23 | * @flags: pipe buffer flags. See above. |
| 24 | * @private: private data owned by the ops. |
| 25 | **/ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | struct pipe_buffer { |
| 27 | struct page *page; |
| 28 | unsigned int offset, len; |
Eric Dumazet | d4c3cca | 2006-12-13 00:34:04 -0800 | [diff] [blame] | 29 | const struct pipe_buf_operations *ops; |
Jens Axboe | 3e7ee3e | 2006-04-02 23:11:04 +0200 | [diff] [blame] | 30 | unsigned int flags; |
Jens Axboe | 497f962 | 2007-06-11 12:00:45 +0200 | [diff] [blame] | 31 | unsigned long private; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 | }; |
| 33 | |
Jens Axboe | 0845718 | 2007-06-12 20:51:32 +0200 | [diff] [blame] | 34 | /** |
| 35 | * struct pipe_inode_info - a linux kernel pipe |
Al Viro | 72b0d9a | 2013-03-21 02:32:24 -0400 | [diff] [blame] | 36 | * @mutex: mutex protecting the whole thing |
Randy Dunlap | 0bf999f | 2020-02-09 19:36:14 -0800 | [diff] [blame] | 37 | * @rd_wait: reader wait point in case of empty pipe |
| 38 | * @wr_wait: writer wait point in case of full pipe |
David Howells | 8cefc10 | 2019-11-15 13:30:32 +0000 | [diff] [blame] | 39 | * @head: The point of buffer production |
| 40 | * @tail: The point of buffer consumption |
David Howells | e7d553d | 2020-01-14 17:07:12 +0000 | [diff] [blame] | 41 | * @note_loss: The next read() should insert a data-lost message |
David Howells | 6718b6f | 2019-10-16 16:47:32 +0100 | [diff] [blame] | 42 | * @max_usage: The maximum number of slots that may be used in the ring |
David Howells | 8cefc10 | 2019-11-15 13:30:32 +0000 | [diff] [blame] | 43 | * @ring_size: total number of buffers (should be a power of 2) |
David Howells | c73be61 | 2020-01-14 17:07:11 +0000 | [diff] [blame] | 44 | * @nr_accounted: The amount this pipe accounts for in user->pipe_bufs |
Jens Axboe | 0845718 | 2007-06-12 20:51:32 +0200 | [diff] [blame] | 45 | * @tmp_page: cached released page |
| 46 | * @readers: number of current readers of this pipe |
| 47 | * @writers: number of current writers of this pipe |
Masanari Iida | e227867 | 2014-02-18 22:54:36 +0900 | [diff] [blame] | 48 | * @files: number of struct file referring this pipe (protected by ->i_lock) |
Jens Axboe | 0845718 | 2007-06-12 20:51:32 +0200 | [diff] [blame] | 49 | * @r_counter: reader counter |
| 50 | * @w_counter: writer counter |
| 51 | * @fasync_readers: reader side fasync |
| 52 | * @fasync_writers: writer side fasync |
Jens Axboe | 0845718 | 2007-06-12 20:51:32 +0200 | [diff] [blame] | 53 | * @bufs: the circular array of pipe buffers |
Willy Tarreau | 759c011 | 2016-01-18 16:36:09 +0100 | [diff] [blame] | 54 | * @user: the user who created this pipe |
David Howells | c73be61 | 2020-01-14 17:07:11 +0000 | [diff] [blame] | 55 | * @watch_queue: If this pipe is a watch_queue, this is the stuff for that |
Jens Axboe | 0845718 | 2007-06-12 20:51:32 +0200 | [diff] [blame] | 56 | **/ |
Jens Axboe | 17374ff | 2007-06-04 15:03:12 +0200 | [diff] [blame] | 57 | struct pipe_inode_info { |
Al Viro | 72b0d9a | 2013-03-21 02:32:24 -0400 | [diff] [blame] | 58 | struct mutex mutex; |
Linus Torvalds | 0ddad21 | 2019-12-09 09:48:27 -0800 | [diff] [blame] | 59 | wait_queue_head_t rd_wait, wr_wait; |
David Howells | 8cefc10 | 2019-11-15 13:30:32 +0000 | [diff] [blame] | 60 | unsigned int head; |
| 61 | unsigned int tail; |
David Howells | 6718b6f | 2019-10-16 16:47:32 +0100 | [diff] [blame] | 62 | unsigned int max_usage; |
David Howells | 8cefc10 | 2019-11-15 13:30:32 +0000 | [diff] [blame] | 63 | unsigned int ring_size; |
David Howells | e7d553d | 2020-01-14 17:07:12 +0000 | [diff] [blame] | 64 | #ifdef CONFIG_WATCH_QUEUE |
| 65 | bool note_loss; |
| 66 | #endif |
David Howells | c73be61 | 2020-01-14 17:07:11 +0000 | [diff] [blame] | 67 | unsigned int nr_accounted; |
Jens Axboe | 17374ff | 2007-06-04 15:03:12 +0200 | [diff] [blame] | 68 | unsigned int readers; |
| 69 | unsigned int writers; |
Al Viro | ba5bb14 | 2013-03-21 02:21:19 -0400 | [diff] [blame] | 70 | unsigned int files; |
Jens Axboe | 17374ff | 2007-06-04 15:03:12 +0200 | [diff] [blame] | 71 | unsigned int r_counter; |
| 72 | unsigned int w_counter; |
Jens Axboe | 35f3d14 | 2010-05-20 10:43:18 +0200 | [diff] [blame] | 73 | struct page *tmp_page; |
Jens Axboe | 17374ff | 2007-06-04 15:03:12 +0200 | [diff] [blame] | 74 | struct fasync_struct *fasync_readers; |
| 75 | struct fasync_struct *fasync_writers; |
Jens Axboe | 35f3d14 | 2010-05-20 10:43:18 +0200 | [diff] [blame] | 76 | struct pipe_buffer *bufs; |
Willy Tarreau | 759c011 | 2016-01-18 16:36:09 +0100 | [diff] [blame] | 77 | struct user_struct *user; |
David Howells | c73be61 | 2020-01-14 17:07:11 +0000 | [diff] [blame] | 78 | #ifdef CONFIG_WATCH_QUEUE |
| 79 | struct watch_queue *watch_queue; |
| 80 | #endif |
Jens Axboe | 17374ff | 2007-06-04 15:03:12 +0200 | [diff] [blame] | 81 | }; |
| 82 | |
Jens Axboe | f84d751 | 2006-05-01 19:59:03 +0200 | [diff] [blame] | 83 | /* |
| 84 | * Note on the nesting of these functions: |
| 85 | * |
Jens Axboe | cac36bb0 | 2007-06-14 13:10:48 +0200 | [diff] [blame] | 86 | * ->confirm() |
Christoph Hellwig | c928f64 | 2020-05-20 17:58:16 +0200 | [diff] [blame] | 87 | * ->try_steal() |
Jens Axboe | f84d751 | 2006-05-01 19:59:03 +0200 | [diff] [blame] | 88 | * |
Christoph Hellwig | c928f64 | 2020-05-20 17:58:16 +0200 | [diff] [blame] | 89 | * That is, ->try_steal() must be called on a confirmed buffer. See below for |
| 90 | * the meaning of each operation. Also see the kerneldoc in fs/pipe.c for the |
| 91 | * pipe and generic variants of these hooks. |
Jens Axboe | f84d751 | 2006-05-01 19:59:03 +0200 | [diff] [blame] | 92 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 93 | struct pipe_buf_operations { |
Jens Axboe | 0845718 | 2007-06-12 20:51:32 +0200 | [diff] [blame] | 94 | /* |
Jens Axboe | 0845718 | 2007-06-12 20:51:32 +0200 | [diff] [blame] | 95 | * ->confirm() verifies that the data in the pipe buffer is there |
| 96 | * and that the contents are good. If the pages in the pipe belong |
| 97 | * to a file system, we may need to wait for IO completion in this |
| 98 | * hook. Returns 0 for good, or a negative error value in case of |
Christoph Hellwig | b8d9e7f | 2020-05-20 17:58:15 +0200 | [diff] [blame] | 99 | * error. If not present all pages are considered good. |
Jens Axboe | 0845718 | 2007-06-12 20:51:32 +0200 | [diff] [blame] | 100 | */ |
Jens Axboe | cac36bb0 | 2007-06-14 13:10:48 +0200 | [diff] [blame] | 101 | int (*confirm)(struct pipe_inode_info *, struct pipe_buffer *); |
Jens Axboe | 0845718 | 2007-06-12 20:51:32 +0200 | [diff] [blame] | 102 | |
| 103 | /* |
| 104 | * When the contents of this pipe buffer has been completely |
| 105 | * consumed by a reader, ->release() is called. |
| 106 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 107 | void (*release)(struct pipe_inode_info *, struct pipe_buffer *); |
Jens Axboe | 0845718 | 2007-06-12 20:51:32 +0200 | [diff] [blame] | 108 | |
| 109 | /* |
| 110 | * Attempt to take ownership of the pipe buffer and its contents. |
Christoph Hellwig | c928f64 | 2020-05-20 17:58:16 +0200 | [diff] [blame] | 111 | * ->try_steal() returns %true for success, in which case the contents |
| 112 | * of the pipe (the buf->page) is locked and now completely owned by the |
| 113 | * caller. The page may then be transferred to a different mapping, the |
| 114 | * most often used case is insertion into different file address space |
| 115 | * cache. |
Jens Axboe | 0845718 | 2007-06-12 20:51:32 +0200 | [diff] [blame] | 116 | */ |
Christoph Hellwig | c928f64 | 2020-05-20 17:58:16 +0200 | [diff] [blame] | 117 | bool (*try_steal)(struct pipe_inode_info *, struct pipe_buffer *); |
Jens Axboe | 0845718 | 2007-06-12 20:51:32 +0200 | [diff] [blame] | 118 | |
| 119 | /* |
| 120 | * Get a reference to the pipe buffer. |
| 121 | */ |
Matthew Wilcox | 15fab63 | 2019-04-05 14:02:10 -0700 | [diff] [blame] | 122 | bool (*get)(struct pipe_inode_info *, struct pipe_buffer *); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 123 | }; |
| 124 | |
Miklos Szeredi | 7bf2d1d | 2016-09-27 10:45:12 +0200 | [diff] [blame] | 125 | /** |
David Howells | 8cefc10 | 2019-11-15 13:30:32 +0000 | [diff] [blame] | 126 | * pipe_empty - Return true if the pipe is empty |
| 127 | * @head: The pipe ring head pointer |
| 128 | * @tail: The pipe ring tail pointer |
| 129 | */ |
| 130 | static inline bool pipe_empty(unsigned int head, unsigned int tail) |
| 131 | { |
| 132 | return head == tail; |
| 133 | } |
| 134 | |
| 135 | /** |
| 136 | * pipe_occupancy - Return number of slots used in the pipe |
| 137 | * @head: The pipe ring head pointer |
| 138 | * @tail: The pipe ring tail pointer |
| 139 | */ |
| 140 | static inline unsigned int pipe_occupancy(unsigned int head, unsigned int tail) |
| 141 | { |
| 142 | return head - tail; |
| 143 | } |
| 144 | |
| 145 | /** |
| 146 | * pipe_full - Return true if the pipe is full |
| 147 | * @head: The pipe ring head pointer |
| 148 | * @tail: The pipe ring tail pointer |
| 149 | * @limit: The maximum amount of slots available. |
| 150 | */ |
| 151 | static inline bool pipe_full(unsigned int head, unsigned int tail, |
| 152 | unsigned int limit) |
| 153 | { |
| 154 | return pipe_occupancy(head, tail) >= limit; |
| 155 | } |
| 156 | |
| 157 | /** |
| 158 | * pipe_space_for_user - Return number of slots available to userspace |
| 159 | * @head: The pipe ring head pointer |
| 160 | * @tail: The pipe ring tail pointer |
| 161 | * @pipe: The pipe info structure |
| 162 | */ |
| 163 | static inline unsigned int pipe_space_for_user(unsigned int head, unsigned int tail, |
| 164 | struct pipe_inode_info *pipe) |
| 165 | { |
| 166 | unsigned int p_occupancy, p_space; |
| 167 | |
| 168 | p_occupancy = pipe_occupancy(head, tail); |
David Howells | 6718b6f | 2019-10-16 16:47:32 +0100 | [diff] [blame] | 169 | if (p_occupancy >= pipe->max_usage) |
David Howells | 8cefc10 | 2019-11-15 13:30:32 +0000 | [diff] [blame] | 170 | return 0; |
| 171 | p_space = pipe->ring_size - p_occupancy; |
David Howells | 6718b6f | 2019-10-16 16:47:32 +0100 | [diff] [blame] | 172 | if (p_space > pipe->max_usage) |
| 173 | p_space = pipe->max_usage; |
David Howells | 8cefc10 | 2019-11-15 13:30:32 +0000 | [diff] [blame] | 174 | return p_space; |
| 175 | } |
| 176 | |
| 177 | /** |
Miklos Szeredi | 7bf2d1d | 2016-09-27 10:45:12 +0200 | [diff] [blame] | 178 | * pipe_buf_get - get a reference to a pipe_buffer |
| 179 | * @pipe: the pipe that the buffer belongs to |
| 180 | * @buf: the buffer to get a reference to |
Matthew Wilcox | 15fab63 | 2019-04-05 14:02:10 -0700 | [diff] [blame] | 181 | * |
| 182 | * Return: %true if the reference was successfully obtained. |
Miklos Szeredi | 7bf2d1d | 2016-09-27 10:45:12 +0200 | [diff] [blame] | 183 | */ |
Matthew Wilcox | 15fab63 | 2019-04-05 14:02:10 -0700 | [diff] [blame] | 184 | static inline __must_check bool pipe_buf_get(struct pipe_inode_info *pipe, |
Miklos Szeredi | 7bf2d1d | 2016-09-27 10:45:12 +0200 | [diff] [blame] | 185 | struct pipe_buffer *buf) |
| 186 | { |
Matthew Wilcox | 15fab63 | 2019-04-05 14:02:10 -0700 | [diff] [blame] | 187 | return buf->ops->get(pipe, buf); |
Miklos Szeredi | 7bf2d1d | 2016-09-27 10:45:12 +0200 | [diff] [blame] | 188 | } |
| 189 | |
Miklos Szeredi | a779638 | 2016-09-27 10:45:12 +0200 | [diff] [blame] | 190 | /** |
| 191 | * pipe_buf_release - put a reference to a pipe_buffer |
| 192 | * @pipe: the pipe that the buffer belongs to |
| 193 | * @buf: the buffer to put a reference to |
| 194 | */ |
| 195 | static inline void pipe_buf_release(struct pipe_inode_info *pipe, |
| 196 | struct pipe_buffer *buf) |
| 197 | { |
| 198 | const struct pipe_buf_operations *ops = buf->ops; |
| 199 | |
| 200 | buf->ops = NULL; |
| 201 | ops->release(pipe, buf); |
| 202 | } |
| 203 | |
Miklos Szeredi | fba597d | 2016-09-27 10:45:12 +0200 | [diff] [blame] | 204 | /** |
| 205 | * pipe_buf_confirm - verify contents of the pipe buffer |
| 206 | * @pipe: the pipe that the buffer belongs to |
| 207 | * @buf: the buffer to confirm |
| 208 | */ |
| 209 | static inline int pipe_buf_confirm(struct pipe_inode_info *pipe, |
| 210 | struct pipe_buffer *buf) |
| 211 | { |
Christoph Hellwig | b8d9e7f | 2020-05-20 17:58:15 +0200 | [diff] [blame] | 212 | if (!buf->ops->confirm) |
| 213 | return 0; |
Miklos Szeredi | fba597d | 2016-09-27 10:45:12 +0200 | [diff] [blame] | 214 | return buf->ops->confirm(pipe, buf); |
| 215 | } |
| 216 | |
Miklos Szeredi | ca76f5b | 2016-09-27 10:45:12 +0200 | [diff] [blame] | 217 | /** |
Christoph Hellwig | c928f64 | 2020-05-20 17:58:16 +0200 | [diff] [blame] | 218 | * pipe_buf_try_steal - attempt to take ownership of a pipe_buffer |
Miklos Szeredi | ca76f5b | 2016-09-27 10:45:12 +0200 | [diff] [blame] | 219 | * @pipe: the pipe that the buffer belongs to |
| 220 | * @buf: the buffer to attempt to steal |
| 221 | */ |
Christoph Hellwig | c928f64 | 2020-05-20 17:58:16 +0200 | [diff] [blame] | 222 | static inline bool pipe_buf_try_steal(struct pipe_inode_info *pipe, |
| 223 | struct pipe_buffer *buf) |
Miklos Szeredi | ca76f5b | 2016-09-27 10:45:12 +0200 | [diff] [blame] | 224 | { |
Christoph Hellwig | c928f64 | 2020-05-20 17:58:16 +0200 | [diff] [blame] | 225 | if (!buf->ops->try_steal) |
| 226 | return false; |
| 227 | return buf->ops->try_steal(pipe, buf); |
Miklos Szeredi | ca76f5b | 2016-09-27 10:45:12 +0200 | [diff] [blame] | 228 | } |
| 229 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 230 | /* Differs from PIPE_BUF in that PIPE_SIZE is the length of the actual |
| 231 | memory allocation, whereas PIPE_BUF makes atomicity guarantees. */ |
| 232 | #define PIPE_SIZE PAGE_SIZE |
| 233 | |
Miklos Szeredi | 61e0d47 | 2009-04-14 19:48:41 +0200 | [diff] [blame] | 234 | /* Pipe lock and unlock operations */ |
| 235 | void pipe_lock(struct pipe_inode_info *); |
| 236 | void pipe_unlock(struct pipe_inode_info *); |
| 237 | void pipe_double_lock(struct pipe_inode_info *, struct pipe_inode_info *); |
| 238 | |
Eric Biggers | 4c2e4be | 2018-02-06 15:41:45 -0800 | [diff] [blame] | 239 | extern unsigned int pipe_max_size; |
Willy Tarreau | 759c011 | 2016-01-18 16:36:09 +0100 | [diff] [blame] | 240 | extern unsigned long pipe_user_pages_hard; |
| 241 | extern unsigned long pipe_user_pages_soft; |
Jens Axboe | ff9da69 | 2010-06-03 14:54:39 +0200 | [diff] [blame] | 242 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 243 | /* Drop the inode semaphore and wait for a pipe event, atomically */ |
Ingo Molnar | 3a326a2 | 2006-04-10 15:18:35 +0200 | [diff] [blame] | 244 | void pipe_wait(struct pipe_inode_info *pipe); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 245 | |
Al Viro | 7bee130 | 2013-03-21 11:04:15 -0400 | [diff] [blame] | 246 | struct pipe_inode_info *alloc_pipe_info(void); |
Al Viro | 4b8a8f1 | 2013-03-21 11:06:46 -0400 | [diff] [blame] | 247 | void free_pipe_info(struct pipe_inode_info *); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 248 | |
Jens Axboe | f84d751 | 2006-05-01 19:59:03 +0200 | [diff] [blame] | 249 | /* Generic pipe buffer ops functions */ |
Matthew Wilcox | 15fab63 | 2019-04-05 14:02:10 -0700 | [diff] [blame] | 250 | bool generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *); |
Christoph Hellwig | c928f64 | 2020-05-20 17:58:16 +0200 | [diff] [blame] | 251 | bool generic_pipe_buf_try_steal(struct pipe_inode_info *, struct pipe_buffer *); |
Miklos Szeredi | 6818173 | 2009-05-07 15:37:36 +0200 | [diff] [blame] | 252 | void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *); |
Jens Axboe | f84d751 | 2006-05-01 19:59:03 +0200 | [diff] [blame] | 253 | |
Miklos Szeredi | 28a625c | 2014-01-22 19:36:57 +0100 | [diff] [blame] | 254 | extern const struct pipe_buf_operations nosteal_pipe_buf_ops; |
| 255 | |
David Howells | c73be61 | 2020-01-14 17:07:11 +0000 | [diff] [blame] | 256 | #ifdef CONFIG_WATCH_QUEUE |
| 257 | unsigned long account_pipe_buffers(struct user_struct *user, |
| 258 | unsigned long old, unsigned long new); |
| 259 | bool too_many_pipe_buffers_soft(unsigned long user_bufs); |
| 260 | bool too_many_pipe_buffers_hard(unsigned long user_bufs); |
| 261 | bool pipe_is_unprivileged_user(void); |
| 262 | #endif |
| 263 | |
Jens Axboe | 35f3d14 | 2010-05-20 10:43:18 +0200 | [diff] [blame] | 264 | /* for F_SETPIPE_SZ and F_GETPIPE_SZ */ |
David Howells | c73be61 | 2020-01-14 17:07:11 +0000 | [diff] [blame] | 265 | #ifdef CONFIG_WATCH_QUEUE |
| 266 | int pipe_resize_ring(struct pipe_inode_info *pipe, unsigned int nr_slots); |
| 267 | #endif |
Jens Axboe | 35f3d14 | 2010-05-20 10:43:18 +0200 | [diff] [blame] | 268 | long pipe_fcntl(struct file *, unsigned int, unsigned long arg); |
David Howells | c73be61 | 2020-01-14 17:07:11 +0000 | [diff] [blame] | 269 | struct pipe_inode_info *get_pipe_info(struct file *file, bool for_splice); |
Linus Torvalds | c66fb34 | 2010-11-28 14:09:57 -0800 | [diff] [blame] | 270 | |
Al Viro | e4fad8e | 2012-07-21 15:33:25 +0400 | [diff] [blame] | 271 | int create_pipe_files(struct file **, int); |
Eric Biggers | 96e99be40 | 2018-02-06 15:42:00 -0800 | [diff] [blame] | 272 | unsigned int round_pipe_size(unsigned long size); |
Al Viro | e4fad8e | 2012-07-21 15:33:25 +0400 | [diff] [blame] | 273 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 274 | #endif |