blob: 5becca9be867ce029cb0f98667fac974303d188b [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * POSIX message queues filesystem for Linux.
3 *
4 * Copyright (C) 2003,2004 Krzysztof Benedyczak (golbi@mat.uni.torun.pl)
Michal Wronskif66e9282006-10-03 23:23:27 +02005 * Michal Wronski (michal.wronski@gmail.com)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * Spinlocks: Mohamed Abbas (abbas.mohamed@intel.com)
8 * Lockless receive & send, fd based notify:
Manfred Spraul239521f2014-01-27 17:07:04 -08009 * Manfred Spraul (manfred@colorfullife.com)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010 *
George C. Wilson20ca73b2006-05-24 16:09:55 -050011 * Audit: George Wilson (ltcgcw@us.ibm.com)
12 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 * This file is released under the GPL.
14 */
15
Randy.Dunlapc59ede72006-01-11 12:17:46 -080016#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/init.h>
18#include <linux/pagemap.h>
19#include <linux/file.h>
20#include <linux/mount.h>
David Howells935c69122018-11-01 23:07:25 +000021#include <linux/fs_context.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/namei.h>
23#include <linux/sysctl.h>
24#include <linux/poll.h>
25#include <linux/mqueue.h>
26#include <linux/msg.h>
27#include <linux/skbuff.h>
Doug Ledford5b5c4d12012-05-31 16:26:30 -070028#include <linux/vmalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/netlink.h>
30#include <linux/syscalls.h>
George C. Wilson20ca73b2006-05-24 16:09:55 -050031#include <linux/audit.h>
Jesper Juhl7ed20e12005-05-01 08:59:14 -070032#include <linux/signal.h>
Ingo Molnar5f921ae2006-03-26 01:37:17 -080033#include <linux/mutex.h>
Pavel Emelyanovb4888932007-10-18 23:40:14 -070034#include <linux/nsproxy.h>
35#include <linux/pid.h>
Serge E. Hallyn614b84c2009-04-06 19:01:08 -070036#include <linux/ipc_namespace.h>
Serge E. Hallyn6b550f92012-01-10 15:11:37 -080037#include <linux/user_namespace.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090038#include <linux/slab.h>
Ingo Molnar84f001e2017-02-01 16:36:40 +010039#include <linux/sched/wake_q.h>
Ingo Molnar3f07c012017-02-08 18:51:30 +010040#include <linux/sched/signal.h>
Ingo Molnar8703e8a2017-02-08 18:51:30 +010041#include <linux/sched/user.h>
Ingo Molnar5f921ae2006-03-26 01:37:17 -080042
Linus Torvalds1da177e2005-04-16 15:20:36 -070043#include <net/sock.h>
44#include "util.h"
45
David Howells935c69122018-11-01 23:07:25 +000046struct mqueue_fs_context {
47 struct ipc_namespace *ipc_ns;
48};
49
Linus Torvalds1da177e2005-04-16 15:20:36 -070050#define MQUEUE_MAGIC 0x19800202
51#define DIRENT_SIZE 20
52#define FILENT_SIZE 80
53
54#define SEND 0
55#define RECV 1
56
57#define STATE_NONE 0
Davidlohr Buesofa6004a2015-05-04 07:02:46 -070058#define STATE_READY 1
Linus Torvalds1da177e2005-04-16 15:20:36 -070059
Doug Ledfordd6629852012-05-31 16:26:35 -070060struct posix_msg_tree_node {
61 struct rb_node rb_node;
62 struct list_head msg_list;
63 int priority;
64};
65
Manfred Spraulc5b2cbd2020-02-03 17:34:36 -080066/*
67 * Locking:
68 *
69 * Accesses to a message queue are synchronized by acquiring info->lock.
70 *
71 * There are two notable exceptions:
72 * - The actual wakeup of a sleeping task is performed using the wake_q
73 * framework. info->lock is already released when wake_up_q is called.
74 * - The exit codepaths after sleeping check ext_wait_queue->state without
75 * any locks. If it is STATE_READY, then the syscall is completed without
76 * acquiring info->lock.
77 *
78 * MQ_BARRIER:
79 * To achieve proper release/acquire memory barrier pairing, the state is set to
80 * STATE_READY with smp_store_release(), and it is read with READ_ONCE followed
81 * by smp_acquire__after_ctrl_dep(). In addition, wake_q_add_safe() is used.
82 *
83 * This prevents the following races:
84 *
85 * 1) With the simple wake_q_add(), the task could be gone already before
86 * the increase of the reference happens
87 * Thread A
88 * Thread B
89 * WRITE_ONCE(wait.state, STATE_NONE);
90 * schedule_hrtimeout()
91 * wake_q_add(A)
92 * if (cmpxchg()) // success
93 * ->state = STATE_READY (reordered)
94 * <timeout returns>
95 * if (wait.state == STATE_READY) return;
96 * sysret to user space
97 * sys_exit()
98 * get_task_struct() // UaF
99 *
100 * Solution: Use wake_q_add_safe() and perform the get_task_struct() before
101 * the smp_store_release() that does ->state = STATE_READY.
102 *
103 * 2) Without proper _release/_acquire barriers, the woken up task
104 * could read stale data
105 *
106 * Thread A
107 * Thread B
108 * do_mq_timedreceive
109 * WRITE_ONCE(wait.state, STATE_NONE);
110 * schedule_hrtimeout()
111 * state = STATE_READY;
112 * <timeout returns>
113 * if (wait.state == STATE_READY) return;
114 * msg_ptr = wait.msg; // Access to stale data!
115 * receiver->msg = message; (reordered)
116 *
117 * Solution: use _release and _acquire barriers.
118 *
119 * 3) There is intentionally no barrier when setting current->state
120 * to TASK_INTERRUPTIBLE: spin_unlock(&info->lock) provides the
121 * release memory barrier, and the wakeup is triggered when holding
122 * info->lock, i.e. spin_lock(&info->lock) provided a pairing
123 * acquire memory barrier.
124 */
125
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126struct ext_wait_queue { /* queue of sleeping tasks */
127 struct task_struct *task;
128 struct list_head list;
129 struct msg_msg *msg; /* ptr of loaded message */
130 int state; /* one of STATE_* values */
131};
132
133struct mqueue_inode_info {
134 spinlock_t lock;
135 struct inode vfs_inode;
136 wait_queue_head_t wait_q;
137
Doug Ledfordd6629852012-05-31 16:26:35 -0700138 struct rb_root msg_tree;
Davidlohr Buesoa5091fd2019-05-14 15:46:26 -0700139 struct rb_node *msg_tree_rightmost;
Doug Ledfordce2d52c2012-05-31 16:26:38 -0700140 struct posix_msg_tree_node *node_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141 struct mq_attr attr;
142
143 struct sigevent notify;
Manfred Spraul239521f2014-01-27 17:07:04 -0800144 struct pid *notify_owner;
Oleg Nesterovb5f20062020-05-07 18:35:39 -0700145 u32 notify_self_exec_id;
Eric W. Biederman6f9ac6d2011-11-16 22:57:55 -0800146 struct user_namespace *notify_user_ns;
Alexey Gladkov6e52a9f2021-04-22 14:27:12 +0200147 struct ucounts *ucounts; /* user who created, for accounting */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148 struct sock *notify_sock;
149 struct sk_buff *notify_cookie;
150
151 /* for tasks waiting for free space and messages, respectively */
152 struct ext_wait_queue e_wait_q[2];
153
154 unsigned long qsize; /* size of queue in memory (sum of all msgs) */
155};
156
David Howells935c69122018-11-01 23:07:25 +0000157static struct file_system_type mqueue_fs_type;
Arjan van de Ven92e1d5b2007-02-12 00:55:39 -0800158static const struct inode_operations mqueue_dir_inode_operations;
Arjan van de Ven9a321442007-02-12 00:55:35 -0800159static const struct file_operations mqueue_file_operations;
Alexey Dobriyanb87221d2009-09-21 17:01:09 -0700160static const struct super_operations mqueue_super_ops;
David Howells935c69122018-11-01 23:07:25 +0000161static const struct fs_context_operations mqueue_fs_context_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162static void remove_notification(struct mqueue_inode_info *info);
163
Christoph Lametere18b8902006-12-06 20:33:20 -0800164static struct kmem_cache *mqueue_inode_cachep;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165
Manfred Spraul239521f2014-01-27 17:07:04 -0800166static struct ctl_table_header *mq_sysctl_table;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167
168static inline struct mqueue_inode_info *MQUEUE_I(struct inode *inode)
169{
170 return container_of(inode, struct mqueue_inode_info, vfs_inode);
171}
172
Serge E. Hallyn7eafd7c2009-04-06 19:01:10 -0700173/*
174 * This routine should be called with the mq_lock held.
175 */
176static inline struct ipc_namespace *__get_ns_from_inode(struct inode *inode)
Serge E. Hallyn614b84c2009-04-06 19:01:08 -0700177{
Serge E. Hallyn7eafd7c2009-04-06 19:01:10 -0700178 return get_ipc_ns(inode->i_sb->s_fs_info);
Serge E. Hallyn614b84c2009-04-06 19:01:08 -0700179}
180
Serge E. Hallyn7eafd7c2009-04-06 19:01:10 -0700181static struct ipc_namespace *get_ns_from_inode(struct inode *inode)
Serge E. Hallyn614b84c2009-04-06 19:01:08 -0700182{
Serge E. Hallyn7eafd7c2009-04-06 19:01:10 -0700183 struct ipc_namespace *ns;
184
185 spin_lock(&mq_lock);
186 ns = __get_ns_from_inode(inode);
187 spin_unlock(&mq_lock);
188 return ns;
Serge E. Hallyn614b84c2009-04-06 19:01:08 -0700189}
190
Doug Ledfordd6629852012-05-31 16:26:35 -0700191/* Auxiliary functions to manipulate messages' list */
192static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info)
193{
194 struct rb_node **p, *parent = NULL;
195 struct posix_msg_tree_node *leaf;
Davidlohr Buesoa5091fd2019-05-14 15:46:26 -0700196 bool rightmost = true;
Doug Ledfordd6629852012-05-31 16:26:35 -0700197
198 p = &info->msg_tree.rb_node;
199 while (*p) {
200 parent = *p;
201 leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node);
202
203 if (likely(leaf->priority == msg->m_type))
204 goto insert_msg;
Davidlohr Buesoa5091fd2019-05-14 15:46:26 -0700205 else if (msg->m_type < leaf->priority) {
Doug Ledfordd6629852012-05-31 16:26:35 -0700206 p = &(*p)->rb_left;
Davidlohr Buesoa5091fd2019-05-14 15:46:26 -0700207 rightmost = false;
208 } else
Doug Ledfordd6629852012-05-31 16:26:35 -0700209 p = &(*p)->rb_right;
210 }
Doug Ledfordce2d52c2012-05-31 16:26:38 -0700211 if (info->node_cache) {
212 leaf = info->node_cache;
213 info->node_cache = NULL;
214 } else {
215 leaf = kmalloc(sizeof(*leaf), GFP_ATOMIC);
216 if (!leaf)
217 return -ENOMEM;
Doug Ledfordce2d52c2012-05-31 16:26:38 -0700218 INIT_LIST_HEAD(&leaf->msg_list);
Doug Ledfordce2d52c2012-05-31 16:26:38 -0700219 }
Doug Ledfordd6629852012-05-31 16:26:35 -0700220 leaf->priority = msg->m_type;
Davidlohr Buesoa5091fd2019-05-14 15:46:26 -0700221
222 if (rightmost)
223 info->msg_tree_rightmost = &leaf->rb_node;
224
Doug Ledfordd6629852012-05-31 16:26:35 -0700225 rb_link_node(&leaf->rb_node, parent, p);
226 rb_insert_color(&leaf->rb_node, &info->msg_tree);
Doug Ledfordd6629852012-05-31 16:26:35 -0700227insert_msg:
228 info->attr.mq_curmsgs++;
229 info->qsize += msg->m_ts;
230 list_add_tail(&msg->m_list, &leaf->msg_list);
231 return 0;
232}
233
Davidlohr Buesoa5091fd2019-05-14 15:46:26 -0700234static inline void msg_tree_erase(struct posix_msg_tree_node *leaf,
235 struct mqueue_inode_info *info)
236{
237 struct rb_node *node = &leaf->rb_node;
238
239 if (info->msg_tree_rightmost == node)
240 info->msg_tree_rightmost = rb_prev(node);
241
242 rb_erase(node, &info->msg_tree);
Somala Swaraj43afe4d2020-04-06 20:12:53 -0700243 if (info->node_cache)
Davidlohr Buesoa5091fd2019-05-14 15:46:26 -0700244 kfree(leaf);
Somala Swaraj43afe4d2020-04-06 20:12:53 -0700245 else
Davidlohr Buesoa5091fd2019-05-14 15:46:26 -0700246 info->node_cache = leaf;
Davidlohr Buesoa5091fd2019-05-14 15:46:26 -0700247}
248
Doug Ledfordd6629852012-05-31 16:26:35 -0700249static inline struct msg_msg *msg_get(struct mqueue_inode_info *info)
250{
Davidlohr Buesoa5091fd2019-05-14 15:46:26 -0700251 struct rb_node *parent = NULL;
Doug Ledfordd6629852012-05-31 16:26:35 -0700252 struct posix_msg_tree_node *leaf;
253 struct msg_msg *msg;
254
255try_again:
Davidlohr Buesoa5091fd2019-05-14 15:46:26 -0700256 /*
257 * During insert, low priorities go to the left and high to the
258 * right. On receive, we want the highest priorities first, so
259 * walk all the way to the right.
260 */
261 parent = info->msg_tree_rightmost;
Doug Ledfordd6629852012-05-31 16:26:35 -0700262 if (!parent) {
263 if (info->attr.mq_curmsgs) {
264 pr_warn_once("Inconsistency in POSIX message queue, "
265 "no tree element, but supposedly messages "
266 "should exist!\n");
267 info->attr.mq_curmsgs = 0;
268 }
269 return NULL;
270 }
271 leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node);
Doug Ledfordce2d52c2012-05-31 16:26:38 -0700272 if (unlikely(list_empty(&leaf->msg_list))) {
Doug Ledfordd6629852012-05-31 16:26:35 -0700273 pr_warn_once("Inconsistency in POSIX message queue, "
274 "empty leaf node but we haven't implemented "
275 "lazy leaf delete!\n");
Davidlohr Buesoa5091fd2019-05-14 15:46:26 -0700276 msg_tree_erase(leaf, info);
Doug Ledfordd6629852012-05-31 16:26:35 -0700277 goto try_again;
278 } else {
279 msg = list_first_entry(&leaf->msg_list,
280 struct msg_msg, m_list);
281 list_del(&msg->m_list);
282 if (list_empty(&leaf->msg_list)) {
Davidlohr Buesoa5091fd2019-05-14 15:46:26 -0700283 msg_tree_erase(leaf, info);
Doug Ledfordd6629852012-05-31 16:26:35 -0700284 }
285 }
286 info->attr.mq_curmsgs--;
287 info->qsize -= msg->m_ts;
288 return msg;
289}
290
Serge E. Hallyn7eafd7c2009-04-06 19:01:10 -0700291static struct inode *mqueue_get_inode(struct super_block *sb,
Al Viro1b9d5ff72011-07-24 14:18:20 -0400292 struct ipc_namespace *ipc_ns, umode_t mode,
Serge E. Hallyn7eafd7c2009-04-06 19:01:10 -0700293 struct mq_attr *attr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294{
295 struct inode *inode;
Jiri Slabyd40dcdb2011-07-26 16:08:47 -0700296 int ret = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297
298 inode = new_inode(sb);
Jiri Slaby04715202011-07-26 16:08:46 -0700299 if (!inode)
300 goto err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301
Jiri Slaby04715202011-07-26 16:08:46 -0700302 inode->i_ino = get_next_ino();
303 inode->i_mode = mode;
304 inode->i_uid = current_fsuid();
305 inode->i_gid = current_fsgid();
Deepa Dinamani078cd822016-09-14 07:48:04 -0700306 inode->i_mtime = inode->i_ctime = inode->i_atime = current_time(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307
Jiri Slaby04715202011-07-26 16:08:46 -0700308 if (S_ISREG(mode)) {
309 struct mqueue_inode_info *info;
Doug Ledfordd6629852012-05-31 16:26:35 -0700310 unsigned long mq_bytes, mq_treesize;
André Goddard Rosac8308b12010-02-23 04:04:23 -0300311
Jiri Slaby04715202011-07-26 16:08:46 -0700312 inode->i_fop = &mqueue_file_operations;
313 inode->i_size = FILENT_SIZE;
314 /* mqueue specific info */
315 info = MQUEUE_I(inode);
316 spin_lock_init(&info->lock);
317 init_waitqueue_head(&info->wait_q);
318 INIT_LIST_HEAD(&info->e_wait_q[0].list);
319 INIT_LIST_HEAD(&info->e_wait_q[1].list);
320 info->notify_owner = NULL;
Eric W. Biederman6f9ac6d2011-11-16 22:57:55 -0800321 info->notify_user_ns = NULL;
Jiri Slaby04715202011-07-26 16:08:46 -0700322 info->qsize = 0;
Alexey Gladkov6e52a9f2021-04-22 14:27:12 +0200323 info->ucounts = NULL; /* set when all is ok */
Doug Ledfordd6629852012-05-31 16:26:35 -0700324 info->msg_tree = RB_ROOT;
Davidlohr Buesoa5091fd2019-05-14 15:46:26 -0700325 info->msg_tree_rightmost = NULL;
Doug Ledfordce2d52c2012-05-31 16:26:38 -0700326 info->node_cache = NULL;
Jiri Slaby04715202011-07-26 16:08:46 -0700327 memset(&info->attr, 0, sizeof(info->attr));
KOSAKI Motohirocef01842012-05-31 16:26:33 -0700328 info->attr.mq_maxmsg = min(ipc_ns->mq_msg_max,
329 ipc_ns->mq_msg_default);
330 info->attr.mq_msgsize = min(ipc_ns->mq_msgsize_max,
331 ipc_ns->mq_msgsize_default);
Jiri Slaby04715202011-07-26 16:08:46 -0700332 if (attr) {
333 info->attr.mq_maxmsg = attr->mq_maxmsg;
334 info->attr.mq_msgsize = attr->mq_msgsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335 }
Doug Ledfordd6629852012-05-31 16:26:35 -0700336 /*
337 * We used to allocate a static array of pointers and account
338 * the size of that array as well as one msg_msg struct per
339 * possible message into the queue size. That's no longer
340 * accurate as the queue is now an rbtree and will grow and
341 * shrink depending on usage patterns. We can, however, still
342 * account one msg_msg struct per message, but the nodes are
343 * allocated depending on priority usage, and most programs
344 * only use one, or a handful, of priorities. However, since
345 * this is pinned memory, we need to assume worst case, so
346 * that means the min(mq_maxmsg, max_priorities) * struct
347 * posix_msg_tree_node.
348 */
Al Viro05c1b292017-12-01 17:43:43 -0500349
350 ret = -EINVAL;
351 if (info->attr.mq_maxmsg <= 0 || info->attr.mq_msgsize <= 0)
352 goto out_inode;
353 if (capable(CAP_SYS_RESOURCE)) {
354 if (info->attr.mq_maxmsg > HARD_MSGMAX ||
355 info->attr.mq_msgsize > HARD_MSGSIZEMAX)
356 goto out_inode;
357 } else {
358 if (info->attr.mq_maxmsg > ipc_ns->mq_msg_max ||
359 info->attr.mq_msgsize > ipc_ns->mq_msgsize_max)
360 goto out_inode;
361 }
362 ret = -EOVERFLOW;
363 /* check for overflow */
364 if (info->attr.mq_msgsize > ULONG_MAX/info->attr.mq_maxmsg)
365 goto out_inode;
Doug Ledfordd6629852012-05-31 16:26:35 -0700366 mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
367 min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
368 sizeof(struct posix_msg_tree_node);
Al Viro05c1b292017-12-01 17:43:43 -0500369 mq_bytes = info->attr.mq_maxmsg * info->attr.mq_msgsize;
370 if (mq_bytes + mq_treesize < mq_bytes)
371 goto out_inode;
372 mq_bytes += mq_treesize;
Alexey Gladkov6e52a9f2021-04-22 14:27:12 +0200373 info->ucounts = get_ucounts(current_ucounts());
374 if (info->ucounts) {
375 long msgqueue;
Jiri Slaby04715202011-07-26 16:08:46 -0700376
Alexey Gladkov6e52a9f2021-04-22 14:27:12 +0200377 spin_lock(&mq_lock);
378 msgqueue = inc_rlimit_ucounts(info->ucounts, UCOUNT_RLIMIT_MSGQUEUE, mq_bytes);
379 if (msgqueue == LONG_MAX || msgqueue > rlimit(RLIMIT_MSGQUEUE)) {
380 dec_rlimit_ucounts(info->ucounts, UCOUNT_RLIMIT_MSGQUEUE, mq_bytes);
381 spin_unlock(&mq_lock);
382 put_ucounts(info->ucounts);
383 info->ucounts = NULL;
384 /* mqueue_evict_inode() releases info->messages */
385 ret = -EMFILE;
386 goto out_inode;
387 }
388 spin_unlock(&mq_lock);
389 }
Jiri Slaby04715202011-07-26 16:08:46 -0700390 } else if (S_ISDIR(mode)) {
391 inc_nlink(inode);
392 /* Some things misbehave if size == 0 on a directory */
393 inode->i_size = 2 * DIRENT_SIZE;
394 inode->i_op = &mqueue_dir_inode_operations;
395 inode->i_fop = &simple_dir_operations;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396 }
Jiri Slaby04715202011-07-26 16:08:46 -0700397
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398 return inode;
399out_inode:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400 iput(inode);
Jiri Slaby04715202011-07-26 16:08:46 -0700401err:
Jiri Slabyd40dcdb2011-07-26 16:08:47 -0700402 return ERR_PTR(ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403}
404
David Howells935c69122018-11-01 23:07:25 +0000405static int mqueue_fill_super(struct super_block *sb, struct fs_context *fc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406{
407 struct inode *inode;
Eric W. Biedermancfb2f6f2018-03-24 11:28:14 -0500408 struct ipc_namespace *ns = sb->s_fs_info;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409
Eric W. Biedermana2982cc2016-06-09 15:34:02 -0500410 sb->s_iflags |= SB_I_NOEXEC | SB_I_NODEV;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300411 sb->s_blocksize = PAGE_SIZE;
412 sb->s_blocksize_bits = PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413 sb->s_magic = MQUEUE_MAGIC;
414 sb->s_op = &mqueue_super_ops;
415
Al Viro48fde702012-01-08 22:15:13 -0500416 inode = mqueue_get_inode(sb, ns, S_IFDIR | S_ISVTX | S_IRWXUGO, NULL);
417 if (IS_ERR(inode))
418 return PTR_ERR(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419
Al Viro48fde702012-01-08 22:15:13 -0500420 sb->s_root = d_make_root(inode);
421 if (!sb->s_root)
422 return -ENOMEM;
423 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424}
425
David Howells935c69122018-11-01 23:07:25 +0000426static int mqueue_get_tree(struct fs_context *fc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427{
David Howells935c69122018-11-01 23:07:25 +0000428 struct mqueue_fs_context *ctx = fc->fs_private;
429
Al Viro533770c2019-09-03 19:05:48 -0400430 return get_tree_keyed(fc, mqueue_fill_super, ctx->ipc_ns);
David Howells935c69122018-11-01 23:07:25 +0000431}
432
433static void mqueue_fs_context_free(struct fs_context *fc)
434{
435 struct mqueue_fs_context *ctx = fc->fs_private;
436
Al Viro709a643d2019-05-12 17:46:05 -0400437 put_ipc_ns(ctx->ipc_ns);
David Howells935c69122018-11-01 23:07:25 +0000438 kfree(ctx);
439}
440
441static int mqueue_init_fs_context(struct fs_context *fc)
442{
443 struct mqueue_fs_context *ctx;
444
445 ctx = kzalloc(sizeof(struct mqueue_fs_context), GFP_KERNEL);
446 if (!ctx)
447 return -ENOMEM;
448
449 ctx->ipc_ns = get_ipc_ns(current->nsproxy->ipc_ns);
Al Viro709a643d2019-05-12 17:46:05 -0400450 put_user_ns(fc->user_ns);
451 fc->user_ns = get_user_ns(ctx->ipc_ns->user_ns);
David Howells935c69122018-11-01 23:07:25 +0000452 fc->fs_private = ctx;
453 fc->ops = &mqueue_fs_context_ops;
454 return 0;
455}
456
457static struct vfsmount *mq_create_mount(struct ipc_namespace *ns)
458{
459 struct mqueue_fs_context *ctx;
460 struct fs_context *fc;
461 struct vfsmount *mnt;
462
463 fc = fs_context_for_mount(&mqueue_fs_type, SB_KERNMOUNT);
464 if (IS_ERR(fc))
465 return ERR_CAST(fc);
466
467 ctx = fc->fs_private;
468 put_ipc_ns(ctx->ipc_ns);
469 ctx->ipc_ns = get_ipc_ns(ns);
Al Viro709a643d2019-05-12 17:46:05 -0400470 put_user_ns(fc->user_ns);
471 fc->user_ns = get_user_ns(ctx->ipc_ns->user_ns);
David Howells935c69122018-11-01 23:07:25 +0000472
473 mnt = fc_mount(fc);
474 put_fs_context(fc);
475 return mnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476}
477
Alexey Dobriyan51cc5062008-07-25 19:45:34 -0700478static void init_once(void *foo)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479{
480 struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo;
481
Christoph Lametera35afb82007-05-16 22:10:57 -0700482 inode_init_once(&p->vfs_inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483}
484
485static struct inode *mqueue_alloc_inode(struct super_block *sb)
486{
487 struct mqueue_inode_info *ei;
488
Christoph Lametere94b1762006-12-06 20:33:17 -0800489 ei = kmem_cache_alloc(mqueue_inode_cachep, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490 if (!ei)
491 return NULL;
492 return &ei->vfs_inode;
493}
494
Al Viro015d7952019-04-15 22:30:30 -0400495static void mqueue_free_inode(struct inode *inode)
Nick Pigginfa0d7e3d2011-01-07 17:49:49 +1100496{
Nick Pigginfa0d7e3d2011-01-07 17:49:49 +1100497 kmem_cache_free(mqueue_inode_cachep, MQUEUE_I(inode));
498}
499
Al Viro6d8af642010-06-05 16:29:45 -0400500static void mqueue_evict_inode(struct inode *inode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501{
502 struct mqueue_inode_info *info;
Serge E. Hallyn7eafd7c2009-04-06 19:01:10 -0700503 struct ipc_namespace *ipc_ns;
Li Rongqingd6a29462019-05-14 15:46:20 -0700504 struct msg_msg *msg, *nmsg;
505 LIST_HEAD(tmp_msg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506
Jan Karadbd57682012-05-03 14:48:02 +0200507 clear_inode(inode);
Al Viro6d8af642010-06-05 16:29:45 -0400508
509 if (S_ISDIR(inode->i_mode))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510 return;
Al Viro6d8af642010-06-05 16:29:45 -0400511
Serge E. Hallyn7eafd7c2009-04-06 19:01:10 -0700512 ipc_ns = get_ns_from_inode(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513 info = MQUEUE_I(inode);
514 spin_lock(&info->lock);
Doug Ledfordd6629852012-05-31 16:26:35 -0700515 while ((msg = msg_get(info)) != NULL)
Li Rongqingd6a29462019-05-14 15:46:20 -0700516 list_add_tail(&msg->m_list, &tmp_msg);
Doug Ledfordce2d52c2012-05-31 16:26:38 -0700517 kfree(info->node_cache);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518 spin_unlock(&info->lock);
519
Li Rongqingd6a29462019-05-14 15:46:20 -0700520 list_for_each_entry_safe(msg, nmsg, &tmp_msg, m_list) {
521 list_del(&msg->m_list);
522 free_msg(msg);
523 }
524
Alexey Gladkov6e52a9f2021-04-22 14:27:12 +0200525 if (info->ucounts) {
Kees Cooka318f122019-07-16 16:30:21 -0700526 unsigned long mq_bytes, mq_treesize;
527
528 /* Total amount of bytes accounted for the mqueue */
529 mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
530 min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
531 sizeof(struct posix_msg_tree_node);
532
533 mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
534 info->attr.mq_msgsize);
535
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536 spin_lock(&mq_lock);
Alexey Gladkov6e52a9f2021-04-22 14:27:12 +0200537 dec_rlimit_ucounts(info->ucounts, UCOUNT_RLIMIT_MSGQUEUE, mq_bytes);
Serge E. Hallyn7eafd7c2009-04-06 19:01:10 -0700538 /*
539 * get_ns_from_inode() ensures that the
540 * (ipc_ns = sb->s_fs_info) is either a valid ipc_ns
541 * to which we now hold a reference, or it is NULL.
542 * We can't put it here under mq_lock, though.
543 */
544 if (ipc_ns)
545 ipc_ns->mq_queues_count--;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 spin_unlock(&mq_lock);
Alexey Gladkov6e52a9f2021-04-22 14:27:12 +0200547 put_ucounts(info->ucounts);
548 info->ucounts = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549 }
Serge E. Hallyn7eafd7c2009-04-06 19:01:10 -0700550 if (ipc_ns)
551 put_ipc_ns(ipc_ns);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552}
553
Al Viroeecec192017-12-01 17:26:05 -0500554static int mqueue_create_attr(struct dentry *dentry, umode_t mode, void *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555{
Al Viroeecec192017-12-01 17:26:05 -0500556 struct inode *dir = dentry->d_parent->d_inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557 struct inode *inode;
Al Viroeecec192017-12-01 17:26:05 -0500558 struct mq_attr *attr = arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559 int error;
Serge E. Hallyn7eafd7c2009-04-06 19:01:10 -0700560 struct ipc_namespace *ipc_ns;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561
562 spin_lock(&mq_lock);
Serge E. Hallyn7eafd7c2009-04-06 19:01:10 -0700563 ipc_ns = __get_ns_from_inode(dir);
564 if (!ipc_ns) {
565 error = -EACCES;
566 goto out_unlock;
567 }
Davidlohr Buesof3713fd2014-02-25 15:01:45 -0800568
569 if (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max &&
570 !capable(CAP_SYS_RESOURCE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571 error = -ENOSPC;
Serge E. Hallyn614b84c2009-04-06 19:01:08 -0700572 goto out_unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573 }
Serge E. Hallyn614b84c2009-04-06 19:01:08 -0700574 ipc_ns->mq_queues_count++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575 spin_unlock(&mq_lock);
576
Serge E. Hallyn7eafd7c2009-04-06 19:01:10 -0700577 inode = mqueue_get_inode(dir->i_sb, ipc_ns, mode, attr);
Jiri Slabyd40dcdb2011-07-26 16:08:47 -0700578 if (IS_ERR(inode)) {
579 error = PTR_ERR(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580 spin_lock(&mq_lock);
Serge E. Hallyn614b84c2009-04-06 19:01:08 -0700581 ipc_ns->mq_queues_count--;
582 goto out_unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583 }
584
Serge E. Hallyn7eafd7c2009-04-06 19:01:10 -0700585 put_ipc_ns(ipc_ns);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586 dir->i_size += DIRENT_SIZE;
Deepa Dinamani078cd822016-09-14 07:48:04 -0700587 dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588
589 d_instantiate(dentry, inode);
590 dget(dentry);
591 return 0;
Serge E. Hallyn614b84c2009-04-06 19:01:08 -0700592out_unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593 spin_unlock(&mq_lock);
Serge E. Hallyn7eafd7c2009-04-06 19:01:10 -0700594 if (ipc_ns)
595 put_ipc_ns(ipc_ns);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596 return error;
597}
598
Christian Brauner549c7292021-01-21 14:19:43 +0100599static int mqueue_create(struct user_namespace *mnt_userns, struct inode *dir,
600 struct dentry *dentry, umode_t mode, bool excl)
Al Viroeecec192017-12-01 17:26:05 -0500601{
602 return mqueue_create_attr(dentry, mode, NULL);
603}
604
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605static int mqueue_unlink(struct inode *dir, struct dentry *dentry)
606{
David Howells75c3cfa2015-03-17 22:26:12 +0000607 struct inode *inode = d_inode(dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608
Deepa Dinamani078cd822016-09-14 07:48:04 -0700609 dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610 dir->i_size -= DIRENT_SIZE;
Manfred Spraul239521f2014-01-27 17:07:04 -0800611 drop_nlink(inode);
612 dput(dentry);
613 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614}
615
616/*
617* This is routine for system read from queue file.
618* To avoid mess with doing here some sort of mq_receive we allow
619* to read only queue size & notification info (the only values
620* that are interesting from user point of view and aren't accessible
621* through std routines)
622*/
623static ssize_t mqueue_read_file(struct file *filp, char __user *u_data,
Akinobu Mitaf1a43f92008-07-25 01:48:07 -0700624 size_t count, loff_t *off)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625{
Al Viro496ad9a2013-01-23 17:07:38 -0500626 struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627 char buffer[FILENT_SIZE];
Akinobu Mitaf1a43f92008-07-25 01:48:07 -0700628 ssize_t ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629
630 spin_lock(&info->lock);
631 snprintf(buffer, sizeof(buffer),
632 "QSIZE:%-10lu NOTIFY:%-5d SIGNO:%-5d NOTIFY_PID:%-6d\n",
633 info->qsize,
634 info->notify_owner ? info->notify.sigev_notify : 0,
635 (info->notify_owner &&
636 info->notify.sigev_notify == SIGEV_SIGNAL) ?
637 info->notify.sigev_signo : 0,
Pavel Emelyanov6c5f3e72008-02-08 04:19:20 -0800638 pid_vnr(info->notify_owner));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639 spin_unlock(&info->lock);
640 buffer[sizeof(buffer)-1] = '\0';
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641
Akinobu Mitaf1a43f92008-07-25 01:48:07 -0700642 ret = simple_read_from_buffer(u_data, count, off, buffer,
643 strlen(buffer));
644 if (ret <= 0)
645 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646
Deepa Dinamani078cd822016-09-14 07:48:04 -0700647 file_inode(filp)->i_atime = file_inode(filp)->i_ctime = current_time(file_inode(filp));
Akinobu Mitaf1a43f92008-07-25 01:48:07 -0700648 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649}
650
Miklos Szeredi75e1fcc2006-06-23 02:05:12 -0700651static int mqueue_flush_file(struct file *filp, fl_owner_t id)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652{
Al Viro496ad9a2013-01-23 17:07:38 -0500653 struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654
655 spin_lock(&info->lock);
Cedric Le Goatera03fcb72006-10-02 02:17:26 -0700656 if (task_tgid(current) == info->notify_owner)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657 remove_notification(info);
658
659 spin_unlock(&info->lock);
660 return 0;
661}
662
Al Viro9dd95742017-07-03 00:42:43 -0400663static __poll_t mqueue_poll_file(struct file *filp, struct poll_table_struct *poll_tab)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664{
Al Viro496ad9a2013-01-23 17:07:38 -0500665 struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
Al Viro9dd95742017-07-03 00:42:43 -0400666 __poll_t retval = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667
668 poll_wait(filp, &info->wait_q, poll_tab);
669
670 spin_lock(&info->lock);
671 if (info->attr.mq_curmsgs)
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800672 retval = EPOLLIN | EPOLLRDNORM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673
674 if (info->attr.mq_curmsgs < info->attr.mq_maxmsg)
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800675 retval |= EPOLLOUT | EPOLLWRNORM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676 spin_unlock(&info->lock);
677
678 return retval;
679}
680
681/* Adds current to info->e_wait_q[sr] before element with smaller prio */
682static void wq_add(struct mqueue_inode_info *info, int sr,
683 struct ext_wait_queue *ewp)
684{
685 struct ext_wait_queue *walk;
686
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687 list_for_each_entry(walk, &info->e_wait_q[sr].list, list) {
Jonathan Haws68e34f42018-02-06 15:40:52 -0800688 if (walk->task->prio <= current->prio) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689 list_add_tail(&ewp->list, &walk->list);
690 return;
691 }
692 }
693 list_add_tail(&ewp->list, &info->e_wait_q[sr].list);
694}
695
696/*
697 * Puts current task to sleep. Caller must hold queue lock. After return
698 * lock isn't held.
699 * sr: SEND or RECV
700 */
701static int wq_sleep(struct mqueue_inode_info *info, int sr,
Carsten Emde9ca7d8e2010-04-02 22:40:20 +0200702 ktime_t *timeout, struct ext_wait_queue *ewp)
Luc Van Oostenryckeac0b1c2017-02-27 14:28:21 -0800703 __releases(&info->lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704{
705 int retval;
706 signed long time;
707
708 wq_add(info, sr, ewp);
709
710 for (;;) {
Manfred Spraulc5b2cbd2020-02-03 17:34:36 -0800711 /* memory barrier not required, we hold info->lock */
Davidlohr Buesofa6004a2015-05-04 07:02:46 -0700712 __set_current_state(TASK_INTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713
714 spin_unlock(&info->lock);
Wanlong Gao32ea8452011-10-31 17:06:35 -0700715 time = schedule_hrtimeout_range_clock(timeout, 0,
716 HRTIMER_MODE_ABS, CLOCK_REALTIME);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717
Manfred Spraulc5b2cbd2020-02-03 17:34:36 -0800718 if (READ_ONCE(ewp->state) == STATE_READY) {
719 /* see MQ_BARRIER for purpose/pairing */
720 smp_acquire__after_ctrl_dep();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721 retval = 0;
722 goto out;
723 }
724 spin_lock(&info->lock);
Manfred Spraulc5b2cbd2020-02-03 17:34:36 -0800725
726 /* we hold info->lock, so no memory barrier required */
727 if (READ_ONCE(ewp->state) == STATE_READY) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728 retval = 0;
729 goto out_unlock;
730 }
731 if (signal_pending(current)) {
732 retval = -ERESTARTSYS;
733 break;
734 }
735 if (time == 0) {
736 retval = -ETIMEDOUT;
737 break;
738 }
739 }
740 list_del(&ewp->list);
741out_unlock:
742 spin_unlock(&info->lock);
743out:
744 return retval;
745}
746
747/*
748 * Returns waiting task that should be serviced first or NULL if none exists
749 */
750static struct ext_wait_queue *wq_get_first_waiter(
751 struct mqueue_inode_info *info, int sr)
752{
753 struct list_head *ptr;
754
755 ptr = info->e_wait_q[sr].list.prev;
756 if (ptr == &info->e_wait_q[sr].list)
757 return NULL;
758 return list_entry(ptr, struct ext_wait_queue, list);
759}
760
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761
762static inline void set_cookie(struct sk_buff *skb, char code)
763{
Manfred Spraul239521f2014-01-27 17:07:04 -0800764 ((char *)skb->data)[NOTIFY_COOKIE_LEN-1] = code;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765}
766
767/*
768 * The next function is only to split too long sys_mq_timedsend
769 */
770static void __do_notify(struct mqueue_inode_info *info)
771{
772 /* notification
773 * invoked when there is registered process and there isn't process
774 * waiting synchronously for message AND state of queue changed from
775 * empty to not empty. Here we are sure that no one is waiting
776 * synchronously. */
777 if (info->notify_owner &&
778 info->attr.mq_curmsgs == 1) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779 switch (info->notify.sigev_notify) {
780 case SIGEV_NONE:
781 break;
Oleg Nesterovb5f20062020-05-07 18:35:39 -0700782 case SIGEV_SIGNAL: {
783 struct kernel_siginfo sig_i;
784 struct task_struct *task;
785
786 /* do_mq_notify() accepts sigev_signo == 0, why?? */
787 if (!info->notify.sigev_signo)
788 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789
Eric W. Biedermanfaf1f222018-01-05 17:27:42 -0600790 clear_siginfo(&sig_i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791 sig_i.si_signo = info->notify.sigev_signo;
792 sig_i.si_errno = 0;
793 sig_i.si_code = SI_MESGQ;
794 sig_i.si_value = info->notify.sigev_value;
Serge E. Hallyn6b550f92012-01-10 15:11:37 -0800795 rcu_read_lock();
Oleg Nesterovb5f20062020-05-07 18:35:39 -0700796 /* map current pid/uid into info->owner's namespaces */
Sukadev Bhattiprolua6684992009-01-07 18:08:50 -0800797 sig_i.si_pid = task_tgid_nr_ns(current,
798 ns_of_pid(info->notify_owner));
Oleg Nesterovb5f20062020-05-07 18:35:39 -0700799 sig_i.si_uid = from_kuid_munged(info->notify_user_ns,
800 current_uid());
801 /*
802 * We can't use kill_pid_info(), this signal should
803 * bypass check_kill_permission(). It is from kernel
804 * but si_fromuser() can't know this.
805 * We do check the self_exec_id, to avoid sending
806 * signals to programs that don't expect them.
807 */
808 task = pid_task(info->notify_owner, PIDTYPE_TGID);
809 if (task && task->self_exec_id ==
810 info->notify_self_exec_id) {
811 do_send_sig_info(info->notify.sigev_signo,
812 &sig_i, task, PIDTYPE_TGID);
813 }
Serge E. Hallyn6b550f92012-01-10 15:11:37 -0800814 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815 break;
Oleg Nesterovb5f20062020-05-07 18:35:39 -0700816 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817 case SIGEV_THREAD:
818 set_cookie(info->notify_cookie, NOTIFY_WOKENUP);
Denis V. Lunev7ee015e2007-10-10 21:14:03 -0700819 netlink_sendskb(info->notify_sock, info->notify_cookie);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820 break;
821 }
822 /* after notification unregisters process */
Cedric Le Goatera03fcb72006-10-02 02:17:26 -0700823 put_pid(info->notify_owner);
Eric W. Biederman6f9ac6d2011-11-16 22:57:55 -0800824 put_user_ns(info->notify_user_ns);
Cedric Le Goatera03fcb72006-10-02 02:17:26 -0700825 info->notify_owner = NULL;
Eric W. Biederman6f9ac6d2011-11-16 22:57:55 -0800826 info->notify_user_ns = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827 }
828 wake_up(&info->wait_q);
829}
830
Arnd Bergmann21fc5382018-04-13 13:58:00 +0200831static int prepare_timeout(const struct __kernel_timespec __user *u_abs_timeout,
Deepa Dinamanib9047722017-08-02 19:51:11 -0700832 struct timespec64 *ts)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833{
Deepa Dinamanib9047722017-08-02 19:51:11 -0700834 if (get_timespec64(ts, u_abs_timeout))
Carsten Emde9ca7d8e2010-04-02 22:40:20 +0200835 return -EFAULT;
Deepa Dinamanib9047722017-08-02 19:51:11 -0700836 if (!timespec64_valid(ts))
Carsten Emde9ca7d8e2010-04-02 22:40:20 +0200837 return -EINVAL;
Carsten Emde9ca7d8e2010-04-02 22:40:20 +0200838 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700839}
840
841static void remove_notification(struct mqueue_inode_info *info)
842{
Cedric Le Goatera03fcb72006-10-02 02:17:26 -0700843 if (info->notify_owner != NULL &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844 info->notify.sigev_notify == SIGEV_THREAD) {
845 set_cookie(info->notify_cookie, NOTIFY_REMOVED);
Denis V. Lunev7ee015e2007-10-10 21:14:03 -0700846 netlink_sendskb(info->notify_sock, info->notify_cookie);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847 }
Cedric Le Goatera03fcb72006-10-02 02:17:26 -0700848 put_pid(info->notify_owner);
Eric W. Biederman6f9ac6d2011-11-16 22:57:55 -0800849 put_user_ns(info->notify_user_ns);
Cedric Le Goatera03fcb72006-10-02 02:17:26 -0700850 info->notify_owner = NULL;
Eric W. Biederman6f9ac6d2011-11-16 22:57:55 -0800851 info->notify_user_ns = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852}
853
Al Viro066cc812017-12-01 17:51:39 -0500854static int prepare_open(struct dentry *dentry, int oflag, int ro,
855 umode_t mode, struct filename *name,
Serge E. Hallyn614b84c2009-04-06 19:01:08 -0700856 struct mq_attr *attr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857{
David Howells745ca242008-11-14 10:39:22 +1100858 static const int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE,
859 MAY_READ | MAY_WRITE };
Al Viro765927b2012-06-26 21:58:53 +0400860 int acc;
Al Viro066cc812017-12-01 17:51:39 -0500861
Al Viro9b20d7f2017-12-01 17:57:02 -0500862 if (d_really_is_negative(dentry)) {
863 if (!(oflag & O_CREAT))
Al Viro066cc812017-12-01 17:51:39 -0500864 return -ENOENT;
Al Viro9b20d7f2017-12-01 17:57:02 -0500865 if (ro)
866 return ro;
867 audit_inode_parent_hidden(name, dentry->d_parent);
868 return vfs_mkobj(dentry, mode & ~current_umask(),
869 mqueue_create_attr, attr);
Al Viro066cc812017-12-01 17:51:39 -0500870 }
Al Viro9b20d7f2017-12-01 17:57:02 -0500871 /* it already existed */
872 audit_inode(name, dentry, 0);
873 if ((oflag & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL))
874 return -EEXIST;
Al Viro765927b2012-06-26 21:58:53 +0400875 if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY))
Al Viroaf4a5372017-12-01 17:34:22 -0500876 return -EINVAL;
Al Viro765927b2012-06-26 21:58:53 +0400877 acc = oflag2acc[oflag & O_ACCMODE];
Christian Brauner47291ba2021-01-21 14:19:24 +0100878 return inode_permission(&init_user_ns, d_inode(dentry), acc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879}
880
Al Viro0d060602017-06-27 21:32:36 -0400881static int do_mq_open(const char __user *u_name, int oflag, umode_t mode,
882 struct mq_attr *attr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700883{
Eric W. Biedermancfb2f6f2018-03-24 11:28:14 -0500884 struct vfsmount *mnt = current->nsproxy->ipc_ns->mq_mnt;
885 struct dentry *root = mnt->mnt_root;
Jeff Layton91a27b22012-10-10 15:25:28 -0400886 struct filename *name;
Al Viroa713fd72017-12-01 18:01:09 -0500887 struct path path;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888 int fd, error;
Al Viro312b90f2012-08-06 10:18:17 +0400889 int ro;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890
Al Viro0d060602017-06-27 21:32:36 -0400891 audit_mq_open(oflag, mode, attr);
George C. Wilson20ca73b2006-05-24 16:09:55 -0500892
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893 if (IS_ERR(name = getname(u_name)))
894 return PTR_ERR(name);
895
Ulrich Drepper269f2132008-05-03 15:28:45 -0400896 fd = get_unused_fd_flags(O_CLOEXEC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897 if (fd < 0)
898 goto out_putname;
899
Al Viro312b90f2012-08-06 10:18:17 +0400900 ro = mnt_want_write(mnt); /* we'll drop it in any case */
Al Viro59551022016-01-22 15:40:57 -0500901 inode_lock(d_inode(root));
Jeff Layton91a27b22012-10-10 15:25:28 -0400902 path.dentry = lookup_one_len(name->name, root, strlen(name->name));
Al Viro765927b2012-06-26 21:58:53 +0400903 if (IS_ERR(path.dentry)) {
904 error = PTR_ERR(path.dentry);
André Goddard Rosa4294a8e2010-02-23 04:04:28 -0300905 goto out_putfd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906 }
Al Viro312b90f2012-08-06 10:18:17 +0400907 path.mnt = mntget(mnt);
Al Viro066cc812017-12-01 17:51:39 -0500908 error = prepare_open(path.dentry, oflag, ro, mode, name, attr);
909 if (!error) {
910 struct file *file = dentry_open(&path, oflag, current_cred());
911 if (!IS_ERR(file))
912 fd_install(fd, file);
913 else
914 error = PTR_ERR(file);
Alexander Viro7c7dce92006-01-14 15:29:55 -0500915 }
Al Viro765927b2012-06-26 21:58:53 +0400916 path_put(&path);
Alexander Viro7c7dce92006-01-14 15:29:55 -0500917out_putfd:
Al Viro765927b2012-06-26 21:58:53 +0400918 if (error) {
919 put_unused_fd(fd);
920 fd = error;
921 }
Al Viro59551022016-01-22 15:40:57 -0500922 inode_unlock(d_inode(root));
Vladimir Davydov38d78e52013-03-22 15:04:51 -0700923 if (!ro)
924 mnt_drop_write(mnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925out_putname:
926 putname(name);
927 return fd;
928}
929
Al Viro0d060602017-06-27 21:32:36 -0400930SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode,
931 struct mq_attr __user *, u_attr)
932{
933 struct mq_attr attr;
934 if (u_attr && copy_from_user(&attr, u_attr, sizeof(struct mq_attr)))
935 return -EFAULT;
936
937 return do_mq_open(u_name, oflag, mode, u_attr ? &attr : NULL);
938}
939
Heiko Carstensd5460c92009-01-14 14:14:27 +0100940SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941{
942 int err;
Jeff Layton91a27b22012-10-10 15:25:28 -0400943 struct filename *name;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944 struct dentry *dentry;
945 struct inode *inode = NULL;
Serge E. Hallyn7eafd7c2009-04-06 19:01:10 -0700946 struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns;
Al Viro312b90f2012-08-06 10:18:17 +0400947 struct vfsmount *mnt = ipc_ns->mq_mnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948
949 name = getname(u_name);
950 if (IS_ERR(name))
951 return PTR_ERR(name);
952
Jeff Layton79f65302013-07-08 15:59:36 -0700953 audit_inode_parent_hidden(name, mnt->mnt_root);
Al Viro312b90f2012-08-06 10:18:17 +0400954 err = mnt_want_write(mnt);
955 if (err)
956 goto out_name;
Al Viro59551022016-01-22 15:40:57 -0500957 inode_lock_nested(d_inode(mnt->mnt_root), I_MUTEX_PARENT);
Jeff Layton91a27b22012-10-10 15:25:28 -0400958 dentry = lookup_one_len(name->name, mnt->mnt_root,
959 strlen(name->name));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960 if (IS_ERR(dentry)) {
961 err = PTR_ERR(dentry);
962 goto out_unlock;
963 }
964
David Howells75c3cfa2015-03-17 22:26:12 +0000965 inode = d_inode(dentry);
Al Viro312b90f2012-08-06 10:18:17 +0400966 if (!inode) {
967 err = -ENOENT;
968 } else {
Al Viro7de9c6ee2010-10-23 11:11:40 -0400969 ihold(inode);
Christian Brauner6521f892021-01-21 14:19:33 +0100970 err = vfs_unlink(&init_user_ns, d_inode(dentry->d_parent),
971 dentry, NULL);
Al Viro312b90f2012-08-06 10:18:17 +0400972 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973 dput(dentry);
974
975out_unlock:
Al Viro59551022016-01-22 15:40:57 -0500976 inode_unlock(d_inode(mnt->mnt_root));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977 if (inode)
978 iput(inode);
Al Viro312b90f2012-08-06 10:18:17 +0400979 mnt_drop_write(mnt);
980out_name:
981 putname(name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982
983 return err;
984}
985
986/* Pipelined send and receive functions.
987 *
988 * If a receiver finds no waiting message, then it registers itself in the
989 * list of waiting receivers. A sender checks that list before adding the new
990 * message into the message array. If there is a waiting receiver, then it
991 * bypasses the message array and directly hands the message over to the
Davidlohr Buesofa6004a2015-05-04 07:02:46 -0700992 * receiver. The receiver accepts the message and returns without grabbing the
993 * queue spinlock:
994 *
995 * - Set pointer to message.
996 * - Queue the receiver task for later wakeup (without the info->lock).
997 * - Update its state to STATE_READY. Now the receiver can continue.
998 * - Wake up the process after the lock is dropped. Should the process wake up
999 * before this wakeup (due to a timeout or a signal) it will either see
1000 * STATE_READY and continue or acquire the lock to check the state again.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001 *
1002 * The same algorithm is used for senders.
1003 */
1004
Davidlohr Buesoed29f172020-02-03 17:34:32 -08001005static inline void __pipelined_op(struct wake_q_head *wake_q,
1006 struct mqueue_inode_info *info,
1007 struct ext_wait_queue *this)
1008{
Varad Gautama11ddb32021-05-22 17:41:49 -07001009 struct task_struct *task;
1010
Davidlohr Buesoed29f172020-02-03 17:34:32 -08001011 list_del(&this->list);
Varad Gautama11ddb32021-05-22 17:41:49 -07001012 task = get_task_struct(this->task);
Manfred Spraulc5b2cbd2020-02-03 17:34:36 -08001013
1014 /* see MQ_BARRIER for purpose/pairing */
1015 smp_store_release(&this->state, STATE_READY);
Varad Gautama11ddb32021-05-22 17:41:49 -07001016 wake_q_add_safe(wake_q, task);
Davidlohr Buesoed29f172020-02-03 17:34:32 -08001017}
1018
Linus Torvalds1da177e2005-04-16 15:20:36 -07001019/* pipelined_send() - send a message directly to the task waiting in
1020 * sys_mq_timedreceive() (without inserting message into a queue).
1021 */
Davidlohr Buesofa6004a2015-05-04 07:02:46 -07001022static inline void pipelined_send(struct wake_q_head *wake_q,
1023 struct mqueue_inode_info *info,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001024 struct msg_msg *message,
1025 struct ext_wait_queue *receiver)
1026{
1027 receiver->msg = message;
Davidlohr Buesoed29f172020-02-03 17:34:32 -08001028 __pipelined_op(wake_q, info, receiver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001029}
1030
1031/* pipelined_receive() - if there is task waiting in sys_mq_timedsend()
1032 * gets its message and put to the queue (we have one free place for sure). */
Davidlohr Buesofa6004a2015-05-04 07:02:46 -07001033static inline void pipelined_receive(struct wake_q_head *wake_q,
1034 struct mqueue_inode_info *info)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001035{
1036 struct ext_wait_queue *sender = wq_get_first_waiter(info, SEND);
1037
1038 if (!sender) {
1039 /* for poll */
1040 wake_up_interruptible(&info->wait_q);
1041 return;
1042 }
Doug Ledfordd6629852012-05-31 16:26:35 -07001043 if (msg_insert(sender->msg, info))
1044 return;
Davidlohr Buesofa6004a2015-05-04 07:02:46 -07001045
Davidlohr Buesoed29f172020-02-03 17:34:32 -08001046 __pipelined_op(wake_q, info, sender);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001047}
1048
Al Viro0d060602017-06-27 21:32:36 -04001049static int do_mq_timedsend(mqd_t mqdes, const char __user *u_msg_ptr,
1050 size_t msg_len, unsigned int msg_prio,
Deepa Dinamanib9047722017-08-02 19:51:11 -07001051 struct timespec64 *ts)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001052{
Al Viro2903ff02012-08-28 12:52:22 -04001053 struct fd f;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001054 struct inode *inode;
1055 struct ext_wait_queue wait;
1056 struct ext_wait_queue *receiver;
1057 struct msg_msg *msg_ptr;
1058 struct mqueue_inode_info *info;
Carsten Emde9ca7d8e2010-04-02 22:40:20 +02001059 ktime_t expires, *timeout = NULL;
Doug Ledfordce2d52c2012-05-31 16:26:38 -07001060 struct posix_msg_tree_node *new_leaf = NULL;
Al Viro2903ff02012-08-28 12:52:22 -04001061 int ret = 0;
Waiman Long194a6b52016-11-17 11:46:38 -05001062 DEFINE_WAKE_Q(wake_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001063
1064 if (unlikely(msg_prio >= (unsigned long) MQ_PRIO_MAX))
1065 return -EINVAL;
1066
Al Viro0d060602017-06-27 21:32:36 -04001067 if (ts) {
Deepa Dinamanib9047722017-08-02 19:51:11 -07001068 expires = timespec64_to_ktime(*ts);
Al Viro0d060602017-06-27 21:32:36 -04001069 timeout = &expires;
1070 }
1071
1072 audit_mq_sendrecv(mqdes, msg_len, msg_prio, ts);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073
Al Viro2903ff02012-08-28 12:52:22 -04001074 f = fdget(mqdes);
1075 if (unlikely(!f.file)) {
André Goddard Rosa8d8ffef2010-02-23 04:04:26 -03001076 ret = -EBADF;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001077 goto out;
André Goddard Rosa8d8ffef2010-02-23 04:04:26 -03001078 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001079
Al Viro496ad9a2013-01-23 17:07:38 -05001080 inode = file_inode(f.file);
Al Viro2903ff02012-08-28 12:52:22 -04001081 if (unlikely(f.file->f_op != &mqueue_file_operations)) {
André Goddard Rosa8d8ffef2010-02-23 04:04:26 -03001082 ret = -EBADF;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001083 goto out_fput;
André Goddard Rosa8d8ffef2010-02-23 04:04:26 -03001084 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085 info = MQUEUE_I(inode);
Al Viro9f45f5b2014-10-31 17:44:57 -04001086 audit_file(f.file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087
Al Viro2903ff02012-08-28 12:52:22 -04001088 if (unlikely(!(f.file->f_mode & FMODE_WRITE))) {
André Goddard Rosa8d8ffef2010-02-23 04:04:26 -03001089 ret = -EBADF;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090 goto out_fput;
André Goddard Rosa8d8ffef2010-02-23 04:04:26 -03001091 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001092
1093 if (unlikely(msg_len > info->attr.mq_msgsize)) {
1094 ret = -EMSGSIZE;
1095 goto out_fput;
1096 }
1097
1098 /* First try to allocate memory, before doing anything with
1099 * existing queues. */
1100 msg_ptr = load_msg(u_msg_ptr, msg_len);
1101 if (IS_ERR(msg_ptr)) {
1102 ret = PTR_ERR(msg_ptr);
1103 goto out_fput;
1104 }
1105 msg_ptr->m_ts = msg_len;
1106 msg_ptr->m_type = msg_prio;
1107
Doug Ledfordce2d52c2012-05-31 16:26:38 -07001108 /*
1109 * msg_insert really wants us to have a valid, spare node struct so
1110 * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will
1111 * fall back to that if necessary.
1112 */
1113 if (!info->node_cache)
1114 new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL);
1115
Linus Torvalds1da177e2005-04-16 15:20:36 -07001116 spin_lock(&info->lock);
1117
Doug Ledfordce2d52c2012-05-31 16:26:38 -07001118 if (!info->node_cache && new_leaf) {
1119 /* Save our speculative allocation into the cache */
Doug Ledfordce2d52c2012-05-31 16:26:38 -07001120 INIT_LIST_HEAD(&new_leaf->msg_list);
1121 info->node_cache = new_leaf;
Doug Ledfordce2d52c2012-05-31 16:26:38 -07001122 new_leaf = NULL;
1123 } else {
1124 kfree(new_leaf);
1125 }
1126
Linus Torvalds1da177e2005-04-16 15:20:36 -07001127 if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) {
Al Viro2903ff02012-08-28 12:52:22 -04001128 if (f.file->f_flags & O_NONBLOCK) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001129 ret = -EAGAIN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130 } else {
1131 wait.task = current;
1132 wait.msg = (void *) msg_ptr;
Manfred Spraulc5b2cbd2020-02-03 17:34:36 -08001133
1134 /* memory barrier not required, we hold info->lock */
1135 WRITE_ONCE(wait.state, STATE_NONE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001136 ret = wq_sleep(info, SEND, timeout, &wait);
Doug Ledfordce2d52c2012-05-31 16:26:38 -07001137 /*
1138 * wq_sleep must be called with info->lock held, and
1139 * returns with the lock released
1140 */
1141 goto out_free;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001142 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001143 } else {
1144 receiver = wq_get_first_waiter(info, RECV);
1145 if (receiver) {
Davidlohr Buesofa6004a2015-05-04 07:02:46 -07001146 pipelined_send(&wake_q, info, msg_ptr, receiver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147 } else {
1148 /* adds message to the queue */
Doug Ledfordce2d52c2012-05-31 16:26:38 -07001149 ret = msg_insert(msg_ptr, info);
1150 if (ret)
1151 goto out_unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001152 __do_notify(info);
1153 }
1154 inode->i_atime = inode->i_mtime = inode->i_ctime =
Deepa Dinamani078cd822016-09-14 07:48:04 -07001155 current_time(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001156 }
Doug Ledfordce2d52c2012-05-31 16:26:38 -07001157out_unlock:
1158 spin_unlock(&info->lock);
Davidlohr Buesofa6004a2015-05-04 07:02:46 -07001159 wake_up_q(&wake_q);
Doug Ledfordce2d52c2012-05-31 16:26:38 -07001160out_free:
1161 if (ret)
1162 free_msg(msg_ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163out_fput:
Al Viro2903ff02012-08-28 12:52:22 -04001164 fdput(f);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001165out:
1166 return ret;
1167}
1168
Al Viro0d060602017-06-27 21:32:36 -04001169static int do_mq_timedreceive(mqd_t mqdes, char __user *u_msg_ptr,
1170 size_t msg_len, unsigned int __user *u_msg_prio,
Deepa Dinamanib9047722017-08-02 19:51:11 -07001171 struct timespec64 *ts)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001172{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001173 ssize_t ret;
1174 struct msg_msg *msg_ptr;
Al Viro2903ff02012-08-28 12:52:22 -04001175 struct fd f;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001176 struct inode *inode;
1177 struct mqueue_inode_info *info;
1178 struct ext_wait_queue wait;
Carsten Emde9ca7d8e2010-04-02 22:40:20 +02001179 ktime_t expires, *timeout = NULL;
Doug Ledfordce2d52c2012-05-31 16:26:38 -07001180 struct posix_msg_tree_node *new_leaf = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001181
Al Viro0d060602017-06-27 21:32:36 -04001182 if (ts) {
Deepa Dinamanib9047722017-08-02 19:51:11 -07001183 expires = timespec64_to_ktime(*ts);
Carsten Emde9ca7d8e2010-04-02 22:40:20 +02001184 timeout = &expires;
Al Viroc32c8af2008-12-14 03:46:48 -05001185 }
George C. Wilson20ca73b2006-05-24 16:09:55 -05001186
Al Viro0d060602017-06-27 21:32:36 -04001187 audit_mq_sendrecv(mqdes, msg_len, 0, ts);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188
Al Viro2903ff02012-08-28 12:52:22 -04001189 f = fdget(mqdes);
1190 if (unlikely(!f.file)) {
André Goddard Rosa8d8ffef2010-02-23 04:04:26 -03001191 ret = -EBADF;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001192 goto out;
André Goddard Rosa8d8ffef2010-02-23 04:04:26 -03001193 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001194
Al Viro496ad9a2013-01-23 17:07:38 -05001195 inode = file_inode(f.file);
Al Viro2903ff02012-08-28 12:52:22 -04001196 if (unlikely(f.file->f_op != &mqueue_file_operations)) {
André Goddard Rosa8d8ffef2010-02-23 04:04:26 -03001197 ret = -EBADF;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001198 goto out_fput;
André Goddard Rosa8d8ffef2010-02-23 04:04:26 -03001199 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200 info = MQUEUE_I(inode);
Al Viro9f45f5b2014-10-31 17:44:57 -04001201 audit_file(f.file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202
Al Viro2903ff02012-08-28 12:52:22 -04001203 if (unlikely(!(f.file->f_mode & FMODE_READ))) {
André Goddard Rosa8d8ffef2010-02-23 04:04:26 -03001204 ret = -EBADF;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205 goto out_fput;
André Goddard Rosa8d8ffef2010-02-23 04:04:26 -03001206 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207
1208 /* checks if buffer is big enough */
1209 if (unlikely(msg_len < info->attr.mq_msgsize)) {
1210 ret = -EMSGSIZE;
1211 goto out_fput;
1212 }
1213
Doug Ledfordce2d52c2012-05-31 16:26:38 -07001214 /*
1215 * msg_insert really wants us to have a valid, spare node struct so
1216 * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will
1217 * fall back to that if necessary.
1218 */
1219 if (!info->node_cache)
1220 new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL);
1221
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222 spin_lock(&info->lock);
Doug Ledfordce2d52c2012-05-31 16:26:38 -07001223
1224 if (!info->node_cache && new_leaf) {
1225 /* Save our speculative allocation into the cache */
Doug Ledfordce2d52c2012-05-31 16:26:38 -07001226 INIT_LIST_HEAD(&new_leaf->msg_list);
1227 info->node_cache = new_leaf;
Doug Ledfordce2d52c2012-05-31 16:26:38 -07001228 } else {
1229 kfree(new_leaf);
1230 }
1231
Linus Torvalds1da177e2005-04-16 15:20:36 -07001232 if (info->attr.mq_curmsgs == 0) {
Al Viro2903ff02012-08-28 12:52:22 -04001233 if (f.file->f_flags & O_NONBLOCK) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001234 spin_unlock(&info->lock);
1235 ret = -EAGAIN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236 } else {
1237 wait.task = current;
Manfred Spraulc5b2cbd2020-02-03 17:34:36 -08001238
1239 /* memory barrier not required, we hold info->lock */
1240 WRITE_ONCE(wait.state, STATE_NONE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241 ret = wq_sleep(info, RECV, timeout, &wait);
1242 msg_ptr = wait.msg;
1243 }
1244 } else {
Waiman Long194a6b52016-11-17 11:46:38 -05001245 DEFINE_WAKE_Q(wake_q);
Davidlohr Buesofa6004a2015-05-04 07:02:46 -07001246
Linus Torvalds1da177e2005-04-16 15:20:36 -07001247 msg_ptr = msg_get(info);
1248
1249 inode->i_atime = inode->i_mtime = inode->i_ctime =
Deepa Dinamani078cd822016-09-14 07:48:04 -07001250 current_time(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001251
1252 /* There is now free space in queue. */
Davidlohr Buesofa6004a2015-05-04 07:02:46 -07001253 pipelined_receive(&wake_q, info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254 spin_unlock(&info->lock);
Davidlohr Buesofa6004a2015-05-04 07:02:46 -07001255 wake_up_q(&wake_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256 ret = 0;
1257 }
1258 if (ret == 0) {
1259 ret = msg_ptr->m_ts;
1260
1261 if ((u_msg_prio && put_user(msg_ptr->m_type, u_msg_prio)) ||
1262 store_msg(u_msg_ptr, msg_ptr, msg_ptr->m_ts)) {
1263 ret = -EFAULT;
1264 }
1265 free_msg(msg_ptr);
1266 }
1267out_fput:
Al Viro2903ff02012-08-28 12:52:22 -04001268 fdput(f);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001269out:
1270 return ret;
1271}
1272
Al Viro0d060602017-06-27 21:32:36 -04001273SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
1274 size_t, msg_len, unsigned int, msg_prio,
Arnd Bergmann21fc5382018-04-13 13:58:00 +02001275 const struct __kernel_timespec __user *, u_abs_timeout)
Al Viro0d060602017-06-27 21:32:36 -04001276{
Deepa Dinamanib9047722017-08-02 19:51:11 -07001277 struct timespec64 ts, *p = NULL;
Al Viro0d060602017-06-27 21:32:36 -04001278 if (u_abs_timeout) {
1279 int res = prepare_timeout(u_abs_timeout, &ts);
1280 if (res)
1281 return res;
1282 p = &ts;
1283 }
1284 return do_mq_timedsend(mqdes, u_msg_ptr, msg_len, msg_prio, p);
1285}
1286
1287SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
1288 size_t, msg_len, unsigned int __user *, u_msg_prio,
Arnd Bergmann21fc5382018-04-13 13:58:00 +02001289 const struct __kernel_timespec __user *, u_abs_timeout)
Al Viro0d060602017-06-27 21:32:36 -04001290{
Deepa Dinamanib9047722017-08-02 19:51:11 -07001291 struct timespec64 ts, *p = NULL;
Al Viro0d060602017-06-27 21:32:36 -04001292 if (u_abs_timeout) {
1293 int res = prepare_timeout(u_abs_timeout, &ts);
1294 if (res)
1295 return res;
1296 p = &ts;
1297 }
1298 return do_mq_timedreceive(mqdes, u_msg_ptr, msg_len, u_msg_prio, p);
1299}
1300
Linus Torvalds1da177e2005-04-16 15:20:36 -07001301/*
1302 * Notes: the case when user wants us to deregister (with NULL as pointer)
1303 * and he isn't currently owner of notification, will be silently discarded.
1304 * It isn't explicitly defined in the POSIX.
1305 */
Al Viro0d060602017-06-27 21:32:36 -04001306static int do_mq_notify(mqd_t mqdes, const struct sigevent *notification)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307{
Al Viro2903ff02012-08-28 12:52:22 -04001308 int ret;
1309 struct fd f;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310 struct sock *sock;
1311 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312 struct mqueue_inode_info *info;
1313 struct sk_buff *nc;
1314
Al Viro0d060602017-06-27 21:32:36 -04001315 audit_mq_notify(mqdes, notification);
George C. Wilson20ca73b2006-05-24 16:09:55 -05001316
Linus Torvalds1da177e2005-04-16 15:20:36 -07001317 nc = NULL;
1318 sock = NULL;
Al Viro0d060602017-06-27 21:32:36 -04001319 if (notification != NULL) {
1320 if (unlikely(notification->sigev_notify != SIGEV_NONE &&
1321 notification->sigev_notify != SIGEV_SIGNAL &&
1322 notification->sigev_notify != SIGEV_THREAD))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001323 return -EINVAL;
Al Viro0d060602017-06-27 21:32:36 -04001324 if (notification->sigev_notify == SIGEV_SIGNAL &&
1325 !valid_signal(notification->sigev_signo)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001326 return -EINVAL;
1327 }
Al Viro0d060602017-06-27 21:32:36 -04001328 if (notification->sigev_notify == SIGEV_THREAD) {
Patrick McHardyc3d8d1e2007-11-07 02:42:09 -08001329 long timeo;
1330
Linus Torvalds1da177e2005-04-16 15:20:36 -07001331 /* create the notify skb */
1332 nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL);
Markus Elfringc2317402019-09-25 16:48:17 -07001333 if (!nc)
1334 return -ENOMEM;
1335
Linus Torvalds1da177e2005-04-16 15:20:36 -07001336 if (copy_from_user(nc->data,
Al Viro0d060602017-06-27 21:32:36 -04001337 notification->sigev_value.sival_ptr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001338 NOTIFY_COOKIE_LEN)) {
André Goddard Rosa8d8ffef2010-02-23 04:04:26 -03001339 ret = -EFAULT;
Markus Elfringc2317402019-09-25 16:48:17 -07001340 goto free_skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001341 }
1342
1343 /* TODO: add a header? */
1344 skb_put(nc, NOTIFY_COOKIE_LEN);
1345 /* and attach it to the socket */
1346retry:
Al Viro0d060602017-06-27 21:32:36 -04001347 f = fdget(notification->sigev_signo);
Al Viro2903ff02012-08-28 12:52:22 -04001348 if (!f.file) {
André Goddard Rosa8d8ffef2010-02-23 04:04:26 -03001349 ret = -EBADF;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001350 goto out;
André Goddard Rosa8d8ffef2010-02-23 04:04:26 -03001351 }
Al Viro2903ff02012-08-28 12:52:22 -04001352 sock = netlink_getsockbyfilp(f.file);
1353 fdput(f);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001354 if (IS_ERR(sock)) {
1355 ret = PTR_ERR(sock);
Markus Elfringc2317402019-09-25 16:48:17 -07001356 goto free_skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001357 }
1358
Patrick McHardyc3d8d1e2007-11-07 02:42:09 -08001359 timeo = MAX_SCHEDULE_TIMEOUT;
Denis V. Lunev9457afe2008-06-05 11:23:39 -07001360 ret = netlink_attachskb(sock, nc, &timeo, NULL);
Cong Wangf991af32017-07-09 13:19:55 -07001361 if (ret == 1) {
1362 sock = NULL;
André Goddard Rosa8d8ffef2010-02-23 04:04:26 -03001363 goto retry;
Cong Wangf991af32017-07-09 13:19:55 -07001364 }
Markus Elfringc2317402019-09-25 16:48:17 -07001365 if (ret)
1366 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001367 }
1368 }
1369
Al Viro2903ff02012-08-28 12:52:22 -04001370 f = fdget(mqdes);
1371 if (!f.file) {
André Goddard Rosa8d8ffef2010-02-23 04:04:26 -03001372 ret = -EBADF;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001373 goto out;
André Goddard Rosa8d8ffef2010-02-23 04:04:26 -03001374 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001375
Al Viro496ad9a2013-01-23 17:07:38 -05001376 inode = file_inode(f.file);
Al Viro2903ff02012-08-28 12:52:22 -04001377 if (unlikely(f.file->f_op != &mqueue_file_operations)) {
André Goddard Rosa8d8ffef2010-02-23 04:04:26 -03001378 ret = -EBADF;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001379 goto out_fput;
André Goddard Rosa8d8ffef2010-02-23 04:04:26 -03001380 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381 info = MQUEUE_I(inode);
1382
1383 ret = 0;
1384 spin_lock(&info->lock);
Al Viro0d060602017-06-27 21:32:36 -04001385 if (notification == NULL) {
Cedric Le Goatera03fcb72006-10-02 02:17:26 -07001386 if (info->notify_owner == task_tgid(current)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001387 remove_notification(info);
Deepa Dinamani078cd822016-09-14 07:48:04 -07001388 inode->i_atime = inode->i_ctime = current_time(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001389 }
Cedric Le Goatera03fcb72006-10-02 02:17:26 -07001390 } else if (info->notify_owner != NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001391 ret = -EBUSY;
1392 } else {
Al Viro0d060602017-06-27 21:32:36 -04001393 switch (notification->sigev_notify) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001394 case SIGEV_NONE:
1395 info->notify.sigev_notify = SIGEV_NONE;
1396 break;
1397 case SIGEV_THREAD:
1398 info->notify_sock = sock;
1399 info->notify_cookie = nc;
1400 sock = NULL;
1401 nc = NULL;
1402 info->notify.sigev_notify = SIGEV_THREAD;
1403 break;
1404 case SIGEV_SIGNAL:
Al Viro0d060602017-06-27 21:32:36 -04001405 info->notify.sigev_signo = notification->sigev_signo;
1406 info->notify.sigev_value = notification->sigev_value;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407 info->notify.sigev_notify = SIGEV_SIGNAL;
Oleg Nesterovb5f20062020-05-07 18:35:39 -07001408 info->notify_self_exec_id = current->self_exec_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001409 break;
1410 }
Cedric Le Goatera03fcb72006-10-02 02:17:26 -07001411
1412 info->notify_owner = get_pid(task_tgid(current));
Eric W. Biederman6f9ac6d2011-11-16 22:57:55 -08001413 info->notify_user_ns = get_user_ns(current_user_ns());
Deepa Dinamani078cd822016-09-14 07:48:04 -07001414 inode->i_atime = inode->i_ctime = current_time(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415 }
1416 spin_unlock(&info->lock);
1417out_fput:
Al Viro2903ff02012-08-28 12:52:22 -04001418 fdput(f);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001419out:
Davidlohr Bueso3ab08fe2014-01-27 17:07:06 -08001420 if (sock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421 netlink_detachskb(sock, nc);
Markus Elfring97b0b1a2019-09-25 16:48:14 -07001422 else
Markus Elfringc2317402019-09-25 16:48:17 -07001423free_skb:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001424 dev_kfree_skb(nc);
Davidlohr Bueso3ab08fe2014-01-27 17:07:06 -08001425
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426 return ret;
1427}
1428
Al Viro0d060602017-06-27 21:32:36 -04001429SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes,
1430 const struct sigevent __user *, u_notification)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001431{
Al Viro0d060602017-06-27 21:32:36 -04001432 struct sigevent n, *p = NULL;
1433 if (u_notification) {
1434 if (copy_from_user(&n, u_notification, sizeof(struct sigevent)))
1435 return -EFAULT;
1436 p = &n;
1437 }
1438 return do_mq_notify(mqdes, p);
1439}
1440
1441static int do_mq_getsetattr(int mqdes, struct mq_attr *new, struct mq_attr *old)
1442{
Al Viro2903ff02012-08-28 12:52:22 -04001443 struct fd f;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001444 struct inode *inode;
1445 struct mqueue_inode_info *info;
1446
Al Viro0d060602017-06-27 21:32:36 -04001447 if (new && (new->mq_flags & (~O_NONBLOCK)))
1448 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001449
Al Viro2903ff02012-08-28 12:52:22 -04001450 f = fdget(mqdes);
Al Viro0d060602017-06-27 21:32:36 -04001451 if (!f.file)
1452 return -EBADF;
1453
1454 if (unlikely(f.file->f_op != &mqueue_file_operations)) {
1455 fdput(f);
1456 return -EBADF;
André Goddard Rosa8d8ffef2010-02-23 04:04:26 -03001457 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001458
Al Viro496ad9a2013-01-23 17:07:38 -05001459 inode = file_inode(f.file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001460 info = MQUEUE_I(inode);
1461
1462 spin_lock(&info->lock);
1463
Al Viro0d060602017-06-27 21:32:36 -04001464 if (old) {
1465 *old = info->attr;
1466 old->mq_flags = f.file->f_flags & O_NONBLOCK;
1467 }
1468 if (new) {
1469 audit_mq_getsetattr(mqdes, new);
Al Viro2903ff02012-08-28 12:52:22 -04001470 spin_lock(&f.file->f_lock);
Al Viro0d060602017-06-27 21:32:36 -04001471 if (new->mq_flags & O_NONBLOCK)
Al Viro2903ff02012-08-28 12:52:22 -04001472 f.file->f_flags |= O_NONBLOCK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001473 else
Al Viro2903ff02012-08-28 12:52:22 -04001474 f.file->f_flags &= ~O_NONBLOCK;
1475 spin_unlock(&f.file->f_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001476
Deepa Dinamani078cd822016-09-14 07:48:04 -07001477 inode->i_atime = inode->i_ctime = current_time(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001478 }
1479
1480 spin_unlock(&info->lock);
Al Viro2903ff02012-08-28 12:52:22 -04001481 fdput(f);
Al Viro0d060602017-06-27 21:32:36 -04001482 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001483}
1484
Al Viro0d060602017-06-27 21:32:36 -04001485SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes,
1486 const struct mq_attr __user *, u_mqstat,
1487 struct mq_attr __user *, u_omqstat)
1488{
1489 int ret;
1490 struct mq_attr mqstat, omqstat;
1491 struct mq_attr *new = NULL, *old = NULL;
1492
1493 if (u_mqstat) {
1494 new = &mqstat;
1495 if (copy_from_user(new, u_mqstat, sizeof(struct mq_attr)))
1496 return -EFAULT;
1497 }
1498 if (u_omqstat)
1499 old = &omqstat;
1500
1501 ret = do_mq_getsetattr(mqdes, new, old);
1502 if (ret || !old)
1503 return ret;
1504
1505 if (copy_to_user(u_omqstat, old, sizeof(struct mq_attr)))
1506 return -EFAULT;
1507 return 0;
1508}
1509
1510#ifdef CONFIG_COMPAT
1511
1512struct compat_mq_attr {
1513 compat_long_t mq_flags; /* message queue flags */
1514 compat_long_t mq_maxmsg; /* maximum number of messages */
1515 compat_long_t mq_msgsize; /* maximum message size */
1516 compat_long_t mq_curmsgs; /* number of messages currently queued */
1517 compat_long_t __reserved[4]; /* ignored for input, zeroed for output */
1518};
1519
1520static inline int get_compat_mq_attr(struct mq_attr *attr,
1521 const struct compat_mq_attr __user *uattr)
1522{
1523 struct compat_mq_attr v;
1524
1525 if (copy_from_user(&v, uattr, sizeof(*uattr)))
1526 return -EFAULT;
1527
1528 memset(attr, 0, sizeof(*attr));
1529 attr->mq_flags = v.mq_flags;
1530 attr->mq_maxmsg = v.mq_maxmsg;
1531 attr->mq_msgsize = v.mq_msgsize;
1532 attr->mq_curmsgs = v.mq_curmsgs;
1533 return 0;
1534}
1535
1536static inline int put_compat_mq_attr(const struct mq_attr *attr,
1537 struct compat_mq_attr __user *uattr)
1538{
1539 struct compat_mq_attr v;
1540
1541 memset(&v, 0, sizeof(v));
1542 v.mq_flags = attr->mq_flags;
1543 v.mq_maxmsg = attr->mq_maxmsg;
1544 v.mq_msgsize = attr->mq_msgsize;
1545 v.mq_curmsgs = attr->mq_curmsgs;
1546 if (copy_to_user(uattr, &v, sizeof(*uattr)))
1547 return -EFAULT;
1548 return 0;
1549}
1550
1551COMPAT_SYSCALL_DEFINE4(mq_open, const char __user *, u_name,
1552 int, oflag, compat_mode_t, mode,
1553 struct compat_mq_attr __user *, u_attr)
1554{
1555 struct mq_attr attr, *p = NULL;
1556 if (u_attr && oflag & O_CREAT) {
1557 p = &attr;
1558 if (get_compat_mq_attr(&attr, u_attr))
1559 return -EFAULT;
1560 }
1561 return do_mq_open(u_name, oflag, mode, p);
1562}
1563
Arnd Bergmannb0d17572018-04-13 13:58:23 +02001564COMPAT_SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes,
1565 const struct compat_sigevent __user *, u_notification)
1566{
1567 struct sigevent n, *p = NULL;
1568 if (u_notification) {
1569 if (get_compat_sigevent(&n, u_notification))
1570 return -EFAULT;
1571 if (n.sigev_notify == SIGEV_THREAD)
1572 n.sigev_value.sival_ptr = compat_ptr(n.sigev_value.sival_int);
1573 p = &n;
1574 }
1575 return do_mq_notify(mqdes, p);
1576}
1577
1578COMPAT_SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes,
1579 const struct compat_mq_attr __user *, u_mqstat,
1580 struct compat_mq_attr __user *, u_omqstat)
1581{
1582 int ret;
1583 struct mq_attr mqstat, omqstat;
1584 struct mq_attr *new = NULL, *old = NULL;
1585
1586 if (u_mqstat) {
1587 new = &mqstat;
1588 if (get_compat_mq_attr(new, u_mqstat))
1589 return -EFAULT;
1590 }
1591 if (u_omqstat)
1592 old = &omqstat;
1593
1594 ret = do_mq_getsetattr(mqdes, new, old);
1595 if (ret || !old)
1596 return ret;
1597
1598 if (put_compat_mq_attr(old, u_omqstat))
1599 return -EFAULT;
1600 return 0;
1601}
1602#endif
1603
1604#ifdef CONFIG_COMPAT_32BIT_TIME
Arnd Bergmann9afc5ee2018-07-13 12:52:28 +02001605static int compat_prepare_timeout(const struct old_timespec32 __user *p,
Deepa Dinamanib9047722017-08-02 19:51:11 -07001606 struct timespec64 *ts)
Al Viro0d060602017-06-27 21:32:36 -04001607{
Arnd Bergmann9afc5ee2018-07-13 12:52:28 +02001608 if (get_old_timespec32(ts, p))
Al Viro0d060602017-06-27 21:32:36 -04001609 return -EFAULT;
Deepa Dinamanib9047722017-08-02 19:51:11 -07001610 if (!timespec64_valid(ts))
Al Viro0d060602017-06-27 21:32:36 -04001611 return -EINVAL;
1612 return 0;
1613}
1614
Arnd Bergmann8dabe722019-01-07 00:33:08 +01001615SYSCALL_DEFINE5(mq_timedsend_time32, mqd_t, mqdes,
1616 const char __user *, u_msg_ptr,
1617 unsigned int, msg_len, unsigned int, msg_prio,
1618 const struct old_timespec32 __user *, u_abs_timeout)
Al Viro0d060602017-06-27 21:32:36 -04001619{
Deepa Dinamanib9047722017-08-02 19:51:11 -07001620 struct timespec64 ts, *p = NULL;
Al Viro0d060602017-06-27 21:32:36 -04001621 if (u_abs_timeout) {
1622 int res = compat_prepare_timeout(u_abs_timeout, &ts);
1623 if (res)
1624 return res;
1625 p = &ts;
1626 }
1627 return do_mq_timedsend(mqdes, u_msg_ptr, msg_len, msg_prio, p);
1628}
1629
Arnd Bergmann8dabe722019-01-07 00:33:08 +01001630SYSCALL_DEFINE5(mq_timedreceive_time32, mqd_t, mqdes,
1631 char __user *, u_msg_ptr,
1632 unsigned int, msg_len, unsigned int __user *, u_msg_prio,
1633 const struct old_timespec32 __user *, u_abs_timeout)
Al Viro0d060602017-06-27 21:32:36 -04001634{
Deepa Dinamanib9047722017-08-02 19:51:11 -07001635 struct timespec64 ts, *p = NULL;
Al Viro0d060602017-06-27 21:32:36 -04001636 if (u_abs_timeout) {
1637 int res = compat_prepare_timeout(u_abs_timeout, &ts);
1638 if (res)
1639 return res;
1640 p = &ts;
1641 }
1642 return do_mq_timedreceive(mqdes, u_msg_ptr, msg_len, u_msg_prio, p);
1643}
Al Viro0d060602017-06-27 21:32:36 -04001644#endif
1645
Arjan van de Ven92e1d5b2007-02-12 00:55:39 -08001646static const struct inode_operations mqueue_dir_inode_operations = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001647 .lookup = simple_lookup,
1648 .create = mqueue_create,
1649 .unlink = mqueue_unlink,
1650};
1651
Arjan van de Ven9a321442007-02-12 00:55:35 -08001652static const struct file_operations mqueue_file_operations = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001653 .flush = mqueue_flush_file,
1654 .poll = mqueue_poll_file,
1655 .read = mqueue_read_file,
Arnd Bergmann6038f372010-08-15 18:52:59 +02001656 .llseek = default_llseek,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001657};
1658
Alexey Dobriyanb87221d2009-09-21 17:01:09 -07001659static const struct super_operations mqueue_super_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660 .alloc_inode = mqueue_alloc_inode,
Al Viro015d7952019-04-15 22:30:30 -04001661 .free_inode = mqueue_free_inode,
Al Viro6d8af642010-06-05 16:29:45 -04001662 .evict_inode = mqueue_evict_inode,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001663 .statfs = simple_statfs,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001664};
1665
David Howells935c69122018-11-01 23:07:25 +00001666static const struct fs_context_operations mqueue_fs_context_ops = {
1667 .free = mqueue_fs_context_free,
1668 .get_tree = mqueue_get_tree,
1669};
1670
Linus Torvalds1da177e2005-04-16 15:20:36 -07001671static struct file_system_type mqueue_fs_type = {
David Howells935c69122018-11-01 23:07:25 +00001672 .name = "mqueue",
1673 .init_fs_context = mqueue_init_fs_context,
1674 .kill_sb = kill_litter_super,
1675 .fs_flags = FS_USERNS_MOUNT,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001676};
1677
Serge E. Hallyn7eafd7c2009-04-06 19:01:10 -07001678int mq_init_ns(struct ipc_namespace *ns)
1679{
David Howells935c69122018-11-01 23:07:25 +00001680 struct vfsmount *m;
1681
Serge E. Hallyn7eafd7c2009-04-06 19:01:10 -07001682 ns->mq_queues_count = 0;
1683 ns->mq_queues_max = DFLT_QUEUESMAX;
1684 ns->mq_msg_max = DFLT_MSGMAX;
1685 ns->mq_msgsize_max = DFLT_MSGSIZEMAX;
KOSAKI Motohirocef01842012-05-31 16:26:33 -07001686 ns->mq_msg_default = DFLT_MSG;
1687 ns->mq_msgsize_default = DFLT_MSGSIZE;
Serge E. Hallyn7eafd7c2009-04-06 19:01:10 -07001688
David Howells935c69122018-11-01 23:07:25 +00001689 m = mq_create_mount(ns);
1690 if (IS_ERR(m))
1691 return PTR_ERR(m);
1692 ns->mq_mnt = m;
Serge E. Hallyn7eafd7c2009-04-06 19:01:10 -07001693 return 0;
1694}
1695
1696void mq_clear_sbinfo(struct ipc_namespace *ns)
1697{
Eric W. Biedermancfb2f6f2018-03-24 11:28:14 -05001698 ns->mq_mnt->mnt_sb->s_fs_info = NULL;
Serge E. Hallyn7eafd7c2009-04-06 19:01:10 -07001699}
1700
1701void mq_put_mnt(struct ipc_namespace *ns)
1702{
Eric W. Biedermancfb2f6f2018-03-24 11:28:14 -05001703 kern_unmount(ns->mq_mnt);
Serge E. Hallyn7eafd7c2009-04-06 19:01:10 -07001704}
1705
Linus Torvalds1da177e2005-04-16 15:20:36 -07001706static int __init init_mqueue_fs(void)
1707{
1708 int error;
1709
1710 mqueue_inode_cachep = kmem_cache_create("mqueue_inode_cache",
1711 sizeof(struct mqueue_inode_info), 0,
Vladimir Davydov5d097052016-01-14 15:18:21 -08001712 SLAB_HWCACHE_ALIGN|SLAB_ACCOUNT, init_once);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001713 if (mqueue_inode_cachep == NULL)
1714 return -ENOMEM;
1715
André Goddard Rosa2329e3922010-02-23 04:04:27 -03001716 /* ignore failures - they are not fatal */
Serge E. Hallynbdc8e5f2009-04-06 19:01:11 -07001717 mq_sysctl_table = mq_register_sysctl_table();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718
1719 error = register_filesystem(&mqueue_fs_type);
1720 if (error)
1721 goto out_sysctl;
1722
Serge E. Hallyn7eafd7c2009-04-06 19:01:10 -07001723 spin_lock_init(&mq_lock);
1724
Al Viro6f686572011-12-09 00:38:50 -05001725 error = mq_init_ns(&init_ipc_ns);
1726 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001727 goto out_filesystem;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001728
Linus Torvalds1da177e2005-04-16 15:20:36 -07001729 return 0;
1730
1731out_filesystem:
1732 unregister_filesystem(&mqueue_fs_type);
1733out_sysctl:
1734 if (mq_sysctl_table)
1735 unregister_sysctl_table(mq_sysctl_table);
Alexey Dobriyan1a1d92c2006-09-27 01:49:40 -07001736 kmem_cache_destroy(mqueue_inode_cachep);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001737 return error;
1738}
1739
Davidlohr Bueso6d08a252014-04-07 15:39:18 -07001740device_initcall(init_mqueue_fs);