blob: 6d33e30c7c393091a4374e8ed26a13240ffbaa25 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/ipc/msg.c
Ingo Molnar5a06a362006-07-30 03:04:11 -07003 * Copyright (C) 1992 Krishna Balasubramanian
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
5 * Removed all the remaining kerneld mess
6 * Catch the -EFAULT stuff properly
7 * Use GFP_KERNEL for messages as in 1.2
8 * Fixed up the unchecked user space derefs
9 * Copyright (C) 1998 Alan Cox & Andi Kleen
10 *
11 * /proc/sysvipc/msg support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
12 *
13 * mostly rewritten, threaded and wake-one semantics added
14 * MSGMAX limit removed, sysctl's added
Christian Kujau624dffc2006-01-15 02:43:54 +010015 * (c) 1999 Manfred Spraul <manfred@colorfullife.com>
Steve Grubb073115d2006-04-02 17:07:33 -040016 *
17 * support for audit of ipc object properties and permission changes
18 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
Kirill Korotaev1e786932006-10-02 02:18:21 -070019 *
20 * namespaces support
21 * OpenVZ, SWsoft Inc.
22 * Pavel Emelianov <xemul@openvz.org>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 */
24
Randy.Dunlapc59ede72006-01-11 12:17:46 -080025#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/msg.h>
27#include <linux/spinlock.h>
28#include <linux/init.h>
Nadia Derbeyf7bf3df2008-04-29 01:00:39 -070029#include <linux/mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#include <linux/proc_fs.h>
31#include <linux/list.h>
32#include <linux/security.h>
33#include <linux/sched.h>
34#include <linux/syscalls.h>
35#include <linux/audit.h>
Mike Waychison19b49462005-09-06 15:17:10 -070036#include <linux/seq_file.h>
Nadia Derbey3e148c72007-10-18 23:40:54 -070037#include <linux/rwsem.h>
Kirill Korotaev1e786932006-10-02 02:18:21 -070038#include <linux/nsproxy.h>
Pavel Emelyanovae5e1b22008-02-08 04:18:22 -080039#include <linux/ipc_namespace.h>
Ingo Molnar5f921ae2006-03-26 01:37:17 -080040
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <asm/current.h>
Paul McQuade7153e402014-06-06 14:37:37 -070042#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043#include "util.h"
44
Ingo Molnar5a06a362006-07-30 03:04:11 -070045/*
46 * one msg_receiver structure for each sleeping receiver:
47 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070048struct msg_receiver {
Ingo Molnar5a06a362006-07-30 03:04:11 -070049 struct list_head r_list;
50 struct task_struct *r_tsk;
Linus Torvalds1da177e2005-04-16 15:20:36 -070051
Ingo Molnar5a06a362006-07-30 03:04:11 -070052 int r_mode;
53 long r_msgtype;
54 long r_maxsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
Linus Torvalds80491eb2006-11-04 09:55:00 -080056 struct msg_msg *volatile r_msg;
Linus Torvalds1da177e2005-04-16 15:20:36 -070057};
58
59/* one msg_sender for each sleeping sender */
60struct msg_sender {
Ingo Molnar5a06a362006-07-30 03:04:11 -070061 struct list_head list;
62 struct task_struct *tsk;
Linus Torvalds1da177e2005-04-16 15:20:36 -070063};
64
65#define SEARCH_ANY 1
66#define SEARCH_EQUAL 2
67#define SEARCH_NOTEQUAL 3
68#define SEARCH_LESSEQUAL 4
Peter Hurley8ac6ed52013-04-30 19:14:54 -070069#define SEARCH_NUMBER 5
Linus Torvalds1da177e2005-04-16 15:20:36 -070070
Pierre Peiffered2ddbf2008-02-08 04:18:57 -080071#define msg_ids(ns) ((ns)->ids[IPC_MSG_IDS])
Kirill Korotaev1e786932006-10-02 02:18:21 -070072
Davidlohr Buesoa5001a02013-07-08 16:01:15 -070073static inline struct msg_queue *msq_obtain_object(struct ipc_namespace *ns, int id)
74{
75 struct kern_ipc_perm *ipcp = ipc_obtain_object(&msg_ids(ns), id);
76
77 if (IS_ERR(ipcp))
78 return ERR_CAST(ipcp);
79
80 return container_of(ipcp, struct msg_queue, q_perm);
81}
82
83static inline struct msg_queue *msq_obtain_object_check(struct ipc_namespace *ns,
84 int id)
85{
86 struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&msg_ids(ns), id);
87
88 if (IS_ERR(ipcp))
89 return ERR_CAST(ipcp);
90
91 return container_of(ipcp, struct msg_queue, q_perm);
92}
93
Nadia Derbey7ca7e562007-10-18 23:40:48 -070094static inline void msg_rmid(struct ipc_namespace *ns, struct msg_queue *s)
95{
96 ipc_rmid(&msg_ids(ns), &s->q_perm);
97}
98
Davidlohr Bueso53dad6d2013-09-23 17:04:45 -070099static void msg_rcu_free(struct rcu_head *head)
100{
101 struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
102 struct msg_queue *msq = ipc_rcu_to_struct(p);
103
104 security_msg_queue_free(msq);
105 ipc_rcu_free(head);
106}
107
Nadia Derbeyf4566f02007-10-18 23:40:53 -0700108/**
109 * newque - Create a new msg queue
110 * @ns: namespace
111 * @params: ptr to the structure that contains the key and msgflg
112 *
Davidlohr Buesod9a605e2013-09-11 14:26:24 -0700113 * Called with msg_ids.rwsem held (writer)
Nadia Derbeyf4566f02007-10-18 23:40:53 -0700114 */
Nadia Derbey7748dbf2007-10-18 23:40:49 -0700115static int newque(struct ipc_namespace *ns, struct ipc_params *params)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117 struct msg_queue *msq;
Ingo Molnar5a06a362006-07-30 03:04:11 -0700118 int id, retval;
Nadia Derbey7748dbf2007-10-18 23:40:49 -0700119 key_t key = params->key;
120 int msgflg = params->flg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121
Ingo Molnar5a06a362006-07-30 03:04:11 -0700122 msq = ipc_rcu_alloc(sizeof(*msq));
123 if (!msq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124 return -ENOMEM;
125
Ingo Molnar5a06a362006-07-30 03:04:11 -0700126 msq->q_perm.mode = msgflg & S_IRWXUGO;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127 msq->q_perm.key = key;
128
129 msq->q_perm.security = NULL;
130 retval = security_msg_queue_alloc(msq);
131 if (retval) {
Davidlohr Bueso53dad6d2013-09-23 17:04:45 -0700132 ipc_rcu_putref(msq, ipc_rcu_free);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133 return retval;
134 }
135
Davidlohr Buesodbfcd912013-07-08 16:01:09 -0700136 /* ipc_addid() locks msq upon success. */
Kirill Korotaev1e786932006-10-02 02:18:21 -0700137 id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni);
Pierre Peiffer283bb7f2007-10-18 23:40:57 -0700138 if (id < 0) {
Davidlohr Bueso53dad6d2013-09-23 17:04:45 -0700139 ipc_rcu_putref(msq, msg_rcu_free);
Pierre Peiffer283bb7f2007-10-18 23:40:57 -0700140 return id;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141 }
142
143 msq->q_stime = msq->q_rtime = 0;
144 msq->q_ctime = get_seconds();
145 msq->q_cbytes = msq->q_qnum = 0;
Kirill Korotaev1e786932006-10-02 02:18:21 -0700146 msq->q_qbytes = ns->msg_ctlmnb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147 msq->q_lspid = msq->q_lrpid = 0;
148 INIT_LIST_HEAD(&msq->q_messages);
149 INIT_LIST_HEAD(&msq->q_receivers);
150 INIT_LIST_HEAD(&msq->q_senders);
Nadia Derbey7ca7e562007-10-18 23:40:48 -0700151
Davidlohr Buesocf9d5d72013-07-08 16:01:11 -0700152 ipc_unlock_object(&msq->q_perm);
Davidlohr Buesodbfcd912013-07-08 16:01:09 -0700153 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154
Nadia Derbey7ca7e562007-10-18 23:40:48 -0700155 return msq->q_perm.id;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156}
157
Ingo Molnar5a06a362006-07-30 03:04:11 -0700158static inline void ss_add(struct msg_queue *msq, struct msg_sender *mss)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159{
Ingo Molnar5a06a362006-07-30 03:04:11 -0700160 mss->tsk = current;
Davidlohr Buesof75a2f32014-06-06 14:37:44 -0700161 __set_current_state(TASK_INTERRUPTIBLE);
Ingo Molnar5a06a362006-07-30 03:04:11 -0700162 list_add_tail(&mss->list, &msq->q_senders);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163}
164
Ingo Molnar5a06a362006-07-30 03:04:11 -0700165static inline void ss_del(struct msg_sender *mss)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166{
Ingo Molnar5a06a362006-07-30 03:04:11 -0700167 if (mss->list.next != NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168 list_del(&mss->list);
169}
170
Ingo Molnar5a06a362006-07-30 03:04:11 -0700171static void ss_wakeup(struct list_head *h, int kill)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172{
Nikola Pajkovsky41239fe2013-04-30 19:15:49 -0700173 struct msg_sender *mss, *t;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174
Nikola Pajkovsky41239fe2013-04-30 19:15:49 -0700175 list_for_each_entry_safe(mss, t, h, list) {
Ingo Molnar5a06a362006-07-30 03:04:11 -0700176 if (kill)
177 mss->list.next = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178 wake_up_process(mss->tsk);
179 }
180}
181
Ingo Molnar5a06a362006-07-30 03:04:11 -0700182static void expunge_all(struct msg_queue *msq, int res)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183{
Nikola Pajkovsky41239fe2013-04-30 19:15:49 -0700184 struct msg_receiver *msr, *t;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185
Nikola Pajkovsky41239fe2013-04-30 19:15:49 -0700186 list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) {
Davidlohr Buesoffa571d2014-01-27 17:07:10 -0800187 msr->r_msg = NULL; /* initialize expunge ordering */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188 wake_up_process(msr->r_tsk);
Davidlohr Buesoffa571d2014-01-27 17:07:10 -0800189 /*
190 * Ensure that the wakeup is visible before setting r_msg as
191 * the receiving end depends on it: either spinning on a nil,
192 * or dealing with -EAGAIN cases. See lockless receive part 1
193 * and 2 in do_msgrcv().
194 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 smp_mb();
196 msr->r_msg = ERR_PTR(res);
197 }
198}
Ingo Molnar5a06a362006-07-30 03:04:11 -0700199
200/*
201 * freeque() wakes up waiters on the sender and receiver waiting queue,
Nadia Derbeyf4566f02007-10-18 23:40:53 -0700202 * removes the message queue from message queue ID IDR, and cleans up all the
203 * messages associated with this queue.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 *
Davidlohr Buesod9a605e2013-09-11 14:26:24 -0700205 * msg_ids.rwsem (writer) and the spinlock for this message queue are held
206 * before freeque() is called. msg_ids.rwsem remains locked on exit.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207 */
Pierre Peiffer01b8b072008-02-08 04:18:57 -0800208static void freeque(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209{
Nikola Pajkovsky41239fe2013-04-30 19:15:49 -0700210 struct msg_msg *msg, *t;
Pierre Peiffer01b8b072008-02-08 04:18:57 -0800211 struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212
Ingo Molnar5a06a362006-07-30 03:04:11 -0700213 expunge_all(msq, -EIDRM);
214 ss_wakeup(&msq->q_senders, 1);
Nadia Derbey7ca7e562007-10-18 23:40:48 -0700215 msg_rmid(ns, msq);
Davidlohr Bueso47187872013-09-11 14:26:25 -0700216 ipc_unlock_object(&msq->q_perm);
217 rcu_read_unlock();
Ingo Molnar5a06a362006-07-30 03:04:11 -0700218
Nikola Pajkovsky41239fe2013-04-30 19:15:49 -0700219 list_for_each_entry_safe(msg, t, &msq->q_messages, m_list) {
Kirill Korotaev3ac88a42007-10-18 23:40:56 -0700220 atomic_dec(&ns->msg_hdrs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221 free_msg(msg);
222 }
Kirill Korotaev3ac88a42007-10-18 23:40:56 -0700223 atomic_sub(msq->q_cbytes, &ns->msg_bytes);
Davidlohr Bueso53dad6d2013-09-23 17:04:45 -0700224 ipc_rcu_putref(msq, msg_rcu_free);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225}
226
Nadia Derbeyf4566f02007-10-18 23:40:53 -0700227/*
Davidlohr Buesod9a605e2013-09-11 14:26:24 -0700228 * Called with msg_ids.rwsem and ipcp locked.
Nadia Derbeyf4566f02007-10-18 23:40:53 -0700229 */
Nadia Derbey03f02c72007-10-18 23:40:51 -0700230static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
Nadia Derbey7748dbf2007-10-18 23:40:49 -0700231{
Nadia Derbey03f02c72007-10-18 23:40:51 -0700232 struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm);
233
234 return security_msg_queue_associate(msq, msgflg);
Nadia Derbey7748dbf2007-10-18 23:40:49 -0700235}
236
Heiko Carstense48fbb62009-01-14 14:14:26 +0100237SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238{
Kirill Korotaev1e786932006-10-02 02:18:21 -0700239 struct ipc_namespace *ns;
Mathias Krauseeb66ec42014-06-06 14:37:36 -0700240 static const struct ipc_ops msg_ops = {
241 .getnew = newque,
242 .associate = msg_security,
243 };
Nadia Derbey7748dbf2007-10-18 23:40:49 -0700244 struct ipc_params msg_params;
Kirill Korotaev1e786932006-10-02 02:18:21 -0700245
246 ns = current->nsproxy->ipc_ns;
Ingo Molnar5a06a362006-07-30 03:04:11 -0700247
Nadia Derbey7748dbf2007-10-18 23:40:49 -0700248 msg_params.key = key;
249 msg_params.flg = msgflg;
Nadia Derbey7ca7e562007-10-18 23:40:48 -0700250
Nadia Derbey7748dbf2007-10-18 23:40:49 -0700251 return ipcget(ns, &msg_ids(ns), &msg_ops, &msg_params);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252}
253
Ingo Molnar5a06a362006-07-30 03:04:11 -0700254static inline unsigned long
255copy_msqid_to_user(void __user *buf, struct msqid64_ds *in, int version)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256{
Manfred Spraul239521f2014-01-27 17:07:04 -0800257 switch (version) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258 case IPC_64:
Ingo Molnar5a06a362006-07-30 03:04:11 -0700259 return copy_to_user(buf, in, sizeof(*in));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260 case IPC_OLD:
Ingo Molnar5a06a362006-07-30 03:04:11 -0700261 {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262 struct msqid_ds out;
263
Ingo Molnar5a06a362006-07-30 03:04:11 -0700264 memset(&out, 0, sizeof(out));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265
266 ipc64_perm_to_ipc_perm(&in->msg_perm, &out.msg_perm);
267
268 out.msg_stime = in->msg_stime;
269 out.msg_rtime = in->msg_rtime;
270 out.msg_ctime = in->msg_ctime;
271
Alexey Dobriyan4be929b2010-05-24 14:33:03 -0700272 if (in->msg_cbytes > USHRT_MAX)
273 out.msg_cbytes = USHRT_MAX;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274 else
275 out.msg_cbytes = in->msg_cbytes;
276 out.msg_lcbytes = in->msg_cbytes;
277
Alexey Dobriyan4be929b2010-05-24 14:33:03 -0700278 if (in->msg_qnum > USHRT_MAX)
279 out.msg_qnum = USHRT_MAX;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280 else
281 out.msg_qnum = in->msg_qnum;
282
Alexey Dobriyan4be929b2010-05-24 14:33:03 -0700283 if (in->msg_qbytes > USHRT_MAX)
284 out.msg_qbytes = USHRT_MAX;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285 else
286 out.msg_qbytes = in->msg_qbytes;
287 out.msg_lqbytes = in->msg_qbytes;
288
289 out.msg_lspid = in->msg_lspid;
290 out.msg_lrpid = in->msg_lrpid;
291
Ingo Molnar5a06a362006-07-30 03:04:11 -0700292 return copy_to_user(buf, &out, sizeof(out));
293 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294 default:
295 return -EINVAL;
296 }
297}
298
Ingo Molnar5a06a362006-07-30 03:04:11 -0700299static inline unsigned long
Pierre Peiffer016d7132008-04-29 01:00:50 -0700300copy_msqid_from_user(struct msqid64_ds *out, void __user *buf, int version)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301{
Manfred Spraul239521f2014-01-27 17:07:04 -0800302 switch (version) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303 case IPC_64:
Pierre Peiffer016d7132008-04-29 01:00:50 -0700304 if (copy_from_user(out, buf, sizeof(*out)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307 case IPC_OLD:
Ingo Molnar5a06a362006-07-30 03:04:11 -0700308 {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309 struct msqid_ds tbuf_old;
310
Ingo Molnar5a06a362006-07-30 03:04:11 -0700311 if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312 return -EFAULT;
313
Manfred Spraul239521f2014-01-27 17:07:04 -0800314 out->msg_perm.uid = tbuf_old.msg_perm.uid;
315 out->msg_perm.gid = tbuf_old.msg_perm.gid;
316 out->msg_perm.mode = tbuf_old.msg_perm.mode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317
Ingo Molnar5a06a362006-07-30 03:04:11 -0700318 if (tbuf_old.msg_qbytes == 0)
Pierre Peiffer016d7132008-04-29 01:00:50 -0700319 out->msg_qbytes = tbuf_old.msg_lqbytes;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320 else
Pierre Peiffer016d7132008-04-29 01:00:50 -0700321 out->msg_qbytes = tbuf_old.msg_qbytes;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322
323 return 0;
Ingo Molnar5a06a362006-07-30 03:04:11 -0700324 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325 default:
326 return -EINVAL;
327 }
328}
329
Pierre Peiffera0d092f2008-04-29 01:00:48 -0700330/*
Davidlohr Buesod9a605e2013-09-11 14:26:24 -0700331 * This function handles some msgctl commands which require the rwsem
Pierre Peiffera0d092f2008-04-29 01:00:48 -0700332 * to be held in write mode.
Davidlohr Buesod9a605e2013-09-11 14:26:24 -0700333 * NOTE: no locks must be held, the rwsem is taken inside this function.
Pierre Peiffera0d092f2008-04-29 01:00:48 -0700334 */
335static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd,
336 struct msqid_ds __user *buf, int version)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338 struct kern_ipc_perm *ipcp;
Felipe Contrerasf1970c42009-10-19 01:54:29 +0300339 struct msqid64_ds uninitialized_var(msqid64);
Pierre Peiffera0d092f2008-04-29 01:00:48 -0700340 struct msg_queue *msq;
341 int err;
342
343 if (cmd == IPC_SET) {
Pierre Peiffer016d7132008-04-29 01:00:50 -0700344 if (copy_msqid_from_user(&msqid64, buf, version))
Pierre Peiffera0d092f2008-04-29 01:00:48 -0700345 return -EFAULT;
346 }
347
Davidlohr Buesod9a605e2013-09-11 14:26:24 -0700348 down_write(&msg_ids(ns).rwsem);
Davidlohr Bueso7b4cc5d2013-07-08 16:01:12 -0700349 rcu_read_lock();
350
Davidlohr Bueso15724ec2013-07-08 16:01:13 -0700351 ipcp = ipcctl_pre_down_nolock(ns, &msg_ids(ns), msqid, cmd,
352 &msqid64.msg_perm, msqid64.msg_qbytes);
Davidlohr Bueso7b4cc5d2013-07-08 16:01:12 -0700353 if (IS_ERR(ipcp)) {
354 err = PTR_ERR(ipcp);
Davidlohr Bueso7b4cc5d2013-07-08 16:01:12 -0700355 goto out_unlock1;
356 }
Pierre Peiffera0d092f2008-04-29 01:00:48 -0700357
Pierre Peiffera5f75e72008-04-29 01:00:54 -0700358 msq = container_of(ipcp, struct msg_queue, q_perm);
Pierre Peiffera0d092f2008-04-29 01:00:48 -0700359
360 err = security_msg_queue_msgctl(msq, cmd);
361 if (err)
Davidlohr Bueso15724ec2013-07-08 16:01:13 -0700362 goto out_unlock1;
Pierre Peiffera0d092f2008-04-29 01:00:48 -0700363
364 switch (cmd) {
365 case IPC_RMID:
Davidlohr Bueso15724ec2013-07-08 16:01:13 -0700366 ipc_lock_object(&msq->q_perm);
Davidlohr Bueso7b4cc5d2013-07-08 16:01:12 -0700367 /* freeque unlocks the ipc object and rcu */
Pierre Peiffera0d092f2008-04-29 01:00:48 -0700368 freeque(ns, ipcp);
369 goto out_up;
370 case IPC_SET:
Pierre Peiffer016d7132008-04-29 01:00:50 -0700371 if (msqid64.msg_qbytes > ns->msg_ctlmnb &&
Pierre Peiffera0d092f2008-04-29 01:00:48 -0700372 !capable(CAP_SYS_RESOURCE)) {
373 err = -EPERM;
Davidlohr Bueso15724ec2013-07-08 16:01:13 -0700374 goto out_unlock1;
Pierre Peiffera0d092f2008-04-29 01:00:48 -0700375 }
376
Davidlohr Bueso15724ec2013-07-08 16:01:13 -0700377 ipc_lock_object(&msq->q_perm);
Eric W. Biederman1efdb692012-02-07 16:54:11 -0800378 err = ipc_update_perm(&msqid64.msg_perm, ipcp);
379 if (err)
Davidlohr Bueso7b4cc5d2013-07-08 16:01:12 -0700380 goto out_unlock0;
Eric W. Biederman1efdb692012-02-07 16:54:11 -0800381
Pierre Peiffer016d7132008-04-29 01:00:50 -0700382 msq->q_qbytes = msqid64.msg_qbytes;
Pierre Peiffera0d092f2008-04-29 01:00:48 -0700383
Pierre Peiffera0d092f2008-04-29 01:00:48 -0700384 msq->q_ctime = get_seconds();
385 /* sleeping receivers might be excluded by
386 * stricter permissions.
387 */
388 expunge_all(msq, -EAGAIN);
389 /* sleeping senders might be able to send
390 * due to a larger queue size.
391 */
392 ss_wakeup(&msq->q_senders, 0);
393 break;
394 default:
395 err = -EINVAL;
Davidlohr Bueso15724ec2013-07-08 16:01:13 -0700396 goto out_unlock1;
Pierre Peiffera0d092f2008-04-29 01:00:48 -0700397 }
Davidlohr Bueso7b4cc5d2013-07-08 16:01:12 -0700398
399out_unlock0:
400 ipc_unlock_object(&msq->q_perm);
401out_unlock1:
402 rcu_read_unlock();
Pierre Peiffera0d092f2008-04-29 01:00:48 -0700403out_up:
Davidlohr Buesod9a605e2013-09-11 14:26:24 -0700404 up_write(&msg_ids(ns).rwsem);
Pierre Peiffera0d092f2008-04-29 01:00:48 -0700405 return err;
406}
407
Davidlohr Bueso2cafed32013-07-08 16:01:14 -0700408static int msgctl_nolock(struct ipc_namespace *ns, int msqid,
409 int cmd, int version, void __user *buf)
Pierre Peiffera0d092f2008-04-29 01:00:48 -0700410{
Davidlohr Bueso2cafed32013-07-08 16:01:14 -0700411 int err;
Ingo Molnar5a06a362006-07-30 03:04:11 -0700412 struct msg_queue *msq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413
414 switch (cmd) {
Ingo Molnar5a06a362006-07-30 03:04:11 -0700415 case IPC_INFO:
416 case MSG_INFO:
417 {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418 struct msginfo msginfo;
419 int max_id;
Ingo Molnar5a06a362006-07-30 03:04:11 -0700420
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421 if (!buf)
422 return -EFAULT;
Davidlohr Bueso2cafed32013-07-08 16:01:14 -0700423
Ingo Molnar5a06a362006-07-30 03:04:11 -0700424 /*
425 * We must not return kernel stack data.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 * due to padding, it's not enough
427 * to set all member fields.
428 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429 err = security_msg_queue_msgctl(NULL, cmd);
430 if (err)
431 return err;
432
Ingo Molnar5a06a362006-07-30 03:04:11 -0700433 memset(&msginfo, 0, sizeof(msginfo));
Kirill Korotaev1e786932006-10-02 02:18:21 -0700434 msginfo.msgmni = ns->msg_ctlmni;
435 msginfo.msgmax = ns->msg_ctlmax;
436 msginfo.msgmnb = ns->msg_ctlmnb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437 msginfo.msgssz = MSGSSZ;
438 msginfo.msgseg = MSGSEG;
Davidlohr Buesod9a605e2013-09-11 14:26:24 -0700439 down_read(&msg_ids(ns).rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440 if (cmd == MSG_INFO) {
Kirill Korotaev1e786932006-10-02 02:18:21 -0700441 msginfo.msgpool = msg_ids(ns).in_use;
Kirill Korotaev3ac88a42007-10-18 23:40:56 -0700442 msginfo.msgmap = atomic_read(&ns->msg_hdrs);
443 msginfo.msgtql = atomic_read(&ns->msg_bytes);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444 } else {
445 msginfo.msgmap = MSGMAP;
446 msginfo.msgpool = MSGPOOL;
447 msginfo.msgtql = MSGTQL;
448 }
Nadia Derbey7ca7e562007-10-18 23:40:48 -0700449 max_id = ipc_get_maxid(&msg_ids(ns));
Davidlohr Buesod9a605e2013-09-11 14:26:24 -0700450 up_read(&msg_ids(ns).rwsem);
Ingo Molnar5a06a362006-07-30 03:04:11 -0700451 if (copy_to_user(buf, &msginfo, sizeof(struct msginfo)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452 return -EFAULT;
Ingo Molnar5a06a362006-07-30 03:04:11 -0700453 return (max_id < 0) ? 0 : max_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454 }
Davidlohr Bueso2cafed32013-07-08 16:01:14 -0700455
456 case MSG_STAT:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457 case IPC_STAT:
458 {
459 struct msqid64_ds tbuf;
460 int success_return;
Ingo Molnar5a06a362006-07-30 03:04:11 -0700461
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462 if (!buf)
463 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464
Davidlohr Buesoac0ba202013-07-08 16:01:16 -0700465 memset(&tbuf, 0, sizeof(tbuf));
466
467 rcu_read_lock();
Ingo Molnar5a06a362006-07-30 03:04:11 -0700468 if (cmd == MSG_STAT) {
Davidlohr Buesoac0ba202013-07-08 16:01:16 -0700469 msq = msq_obtain_object(ns, msqid);
470 if (IS_ERR(msq)) {
471 err = PTR_ERR(msq);
472 goto out_unlock;
473 }
Nadia Derbey7ca7e562007-10-18 23:40:48 -0700474 success_return = msq->q_perm.id;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475 } else {
Davidlohr Buesoac0ba202013-07-08 16:01:16 -0700476 msq = msq_obtain_object_check(ns, msqid);
477 if (IS_ERR(msq)) {
478 err = PTR_ERR(msq);
479 goto out_unlock;
480 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481 success_return = 0;
482 }
Davidlohr Buesoac0ba202013-07-08 16:01:16 -0700483
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484 err = -EACCES;
Serge E. Hallynb0e77592011-03-23 16:43:24 -0700485 if (ipcperms(ns, &msq->q_perm, S_IRUGO))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486 goto out_unlock;
487
488 err = security_msg_queue_msgctl(msq, cmd);
489 if (err)
490 goto out_unlock;
491
492 kernel_to_ipc64_perm(&msq->q_perm, &tbuf.msg_perm);
493 tbuf.msg_stime = msq->q_stime;
494 tbuf.msg_rtime = msq->q_rtime;
495 tbuf.msg_ctime = msq->q_ctime;
496 tbuf.msg_cbytes = msq->q_cbytes;
497 tbuf.msg_qnum = msq->q_qnum;
498 tbuf.msg_qbytes = msq->q_qbytes;
499 tbuf.msg_lspid = msq->q_lspid;
500 tbuf.msg_lrpid = msq->q_lrpid;
Davidlohr Buesoac0ba202013-07-08 16:01:16 -0700501 rcu_read_unlock();
502
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503 if (copy_msqid_to_user(buf, &tbuf, version))
504 return -EFAULT;
505 return success_return;
506 }
Davidlohr Bueso2cafed32013-07-08 16:01:14 -0700507
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508 default:
Davidlohr Bueso2cafed32013-07-08 16:01:14 -0700509 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510 }
511
Davidlohr Bueso2cafed32013-07-08 16:01:14 -0700512 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513out_unlock:
Davidlohr Buesoac0ba202013-07-08 16:01:16 -0700514 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515 return err;
516}
517
Davidlohr Bueso2cafed32013-07-08 16:01:14 -0700518SYSCALL_DEFINE3(msgctl, int, msqid, int, cmd, struct msqid_ds __user *, buf)
519{
520 int version;
521 struct ipc_namespace *ns;
522
523 if (msqid < 0 || cmd < 0)
524 return -EINVAL;
525
526 version = ipc_parse_version(&cmd);
527 ns = current->nsproxy->ipc_ns;
528
529 switch (cmd) {
530 case IPC_INFO:
531 case MSG_INFO:
532 case MSG_STAT: /* msqid is an index rather than a msg queue id */
533 case IPC_STAT:
534 return msgctl_nolock(ns, msqid, cmd, version, buf);
535 case IPC_SET:
536 case IPC_RMID:
537 return msgctl_down(ns, msqid, cmd, buf, version);
538 default:
539 return -EINVAL;
540 }
541}
542
Ingo Molnar5a06a362006-07-30 03:04:11 -0700543static int testmsg(struct msg_msg *msg, long type, int mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544{
Paul McQuade46c0a8c2014-06-06 14:37:37 -0700545 switch (mode) {
546 case SEARCH_ANY:
547 case SEARCH_NUMBER:
548 return 1;
549 case SEARCH_LESSEQUAL:
550 if (msg->m_type <= type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551 return 1;
Paul McQuade46c0a8c2014-06-06 14:37:37 -0700552 break;
553 case SEARCH_EQUAL:
554 if (msg->m_type == type)
555 return 1;
556 break;
557 case SEARCH_NOTEQUAL:
558 if (msg->m_type != type)
559 return 1;
560 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561 }
562 return 0;
563}
564
Ingo Molnar5a06a362006-07-30 03:04:11 -0700565static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566{
Nikola Pajkovsky41239fe2013-04-30 19:15:49 -0700567 struct msg_receiver *msr, *t;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568
Nikola Pajkovsky41239fe2013-04-30 19:15:49 -0700569 list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) {
Ingo Molnar5a06a362006-07-30 03:04:11 -0700570 if (testmsg(msg, msr->r_msgtype, msr->r_mode) &&
571 !security_msg_queue_msgrcv(msq, msg, msr->r_tsk,
572 msr->r_msgtype, msr->r_mode)) {
573
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574 list_del(&msr->r_list);
Ingo Molnar5a06a362006-07-30 03:04:11 -0700575 if (msr->r_maxsize < msg->m_ts) {
Davidlohr Buesoffa571d2014-01-27 17:07:10 -0800576 /* initialize pipelined send ordering */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577 msr->r_msg = NULL;
578 wake_up_process(msr->r_tsk);
Davidlohr Buesoffa571d2014-01-27 17:07:10 -0800579 smp_mb(); /* see barrier comment below */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580 msr->r_msg = ERR_PTR(-E2BIG);
581 } else {
582 msr->r_msg = NULL;
Pavel Emelyanovb4888932007-10-18 23:40:14 -0700583 msq->q_lrpid = task_pid_vnr(msr->r_tsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584 msq->q_rtime = get_seconds();
585 wake_up_process(msr->r_tsk);
Davidlohr Buesoffa571d2014-01-27 17:07:10 -0800586 /*
587 * Ensure that the wakeup is visible before
588 * setting r_msg, as the receiving end depends
589 * on it. See lockless receive part 1 and 2 in
590 * do_msgrcv().
591 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592 smp_mb();
593 msr->r_msg = msg;
Ingo Molnar5a06a362006-07-30 03:04:11 -0700594
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595 return 1;
596 }
597 }
598 }
Davidlohr Buesoffa571d2014-01-27 17:07:10 -0800599
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600 return 0;
601}
602
suzuki651971c2006-12-06 20:37:48 -0800603long do_msgsnd(int msqid, long mtype, void __user *mtext,
604 size_t msgsz, int msgflg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605{
606 struct msg_queue *msq;
607 struct msg_msg *msg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608 int err;
Kirill Korotaev1e786932006-10-02 02:18:21 -0700609 struct ipc_namespace *ns;
Ingo Molnar5a06a362006-07-30 03:04:11 -0700610
Kirill Korotaev1e786932006-10-02 02:18:21 -0700611 ns = current->nsproxy->ipc_ns;
612
613 if (msgsz > ns->msg_ctlmax || (long) msgsz < 0 || msqid < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615 if (mtype < 1)
616 return -EINVAL;
617
suzuki651971c2006-12-06 20:37:48 -0800618 msg = load_msg(mtext, msgsz);
Ingo Molnar5a06a362006-07-30 03:04:11 -0700619 if (IS_ERR(msg))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620 return PTR_ERR(msg);
621
622 msg->m_type = mtype;
623 msg->m_ts = msgsz;
624
Davidlohr Bueso3dd1f782013-07-08 16:01:17 -0700625 rcu_read_lock();
626 msq = msq_obtain_object_check(ns, msqid);
Nadia Derbey023a5352007-10-18 23:40:51 -0700627 if (IS_ERR(msq)) {
628 err = PTR_ERR(msq);
Davidlohr Bueso3dd1f782013-07-08 16:01:17 -0700629 goto out_unlock1;
Nadia Derbey023a5352007-10-18 23:40:51 -0700630 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631
Manfred Spraulbebcb922013-09-03 16:00:08 +0200632 ipc_lock_object(&msq->q_perm);
633
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634 for (;;) {
635 struct msg_sender s;
636
Ingo Molnar5a06a362006-07-30 03:04:11 -0700637 err = -EACCES;
Serge E. Hallynb0e77592011-03-23 16:43:24 -0700638 if (ipcperms(ns, &msq->q_perm, S_IWUGO))
Manfred Spraulbebcb922013-09-03 16:00:08 +0200639 goto out_unlock0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640
Davidlohr Bueso4271b052013-09-30 13:45:26 -0700641 /* raced with RMID? */
Rafael Aquini0f3d2b02014-01-27 17:07:01 -0800642 if (!ipc_valid_object(&msq->q_perm)) {
Davidlohr Bueso4271b052013-09-30 13:45:26 -0700643 err = -EIDRM;
644 goto out_unlock0;
645 }
646
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647 err = security_msg_queue_msgsnd(msq, msg, msgflg);
648 if (err)
Manfred Spraulbebcb922013-09-03 16:00:08 +0200649 goto out_unlock0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650
Ingo Molnar5a06a362006-07-30 03:04:11 -0700651 if (msgsz + msq->q_cbytes <= msq->q_qbytes &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652 1 + msq->q_qnum <= msq->q_qbytes) {
653 break;
654 }
655
656 /* queue full, wait: */
Ingo Molnar5a06a362006-07-30 03:04:11 -0700657 if (msgflg & IPC_NOWAIT) {
658 err = -EAGAIN;
Manfred Spraulbebcb922013-09-03 16:00:08 +0200659 goto out_unlock0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660 }
Davidlohr Bueso3dd1f782013-07-08 16:01:17 -0700661
Davidlohr Buesoffa571d2014-01-27 17:07:10 -0800662 /* enqueue the sender and prepare to block */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663 ss_add(msq, &s);
Rik van Riel6062a8d2013-04-30 19:15:44 -0700664
665 if (!ipc_rcu_getref(msq)) {
666 err = -EIDRM;
Davidlohr Bueso3dd1f782013-07-08 16:01:17 -0700667 goto out_unlock0;
Rik van Riel6062a8d2013-04-30 19:15:44 -0700668 }
669
Davidlohr Bueso3dd1f782013-07-08 16:01:17 -0700670 ipc_unlock_object(&msq->q_perm);
671 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672 schedule();
673
Davidlohr Bueso3dd1f782013-07-08 16:01:17 -0700674 rcu_read_lock();
675 ipc_lock_object(&msq->q_perm);
676
Davidlohr Bueso53dad6d2013-09-23 17:04:45 -0700677 ipc_rcu_putref(msq, ipc_rcu_free);
Rafael Aquini0f3d2b02014-01-27 17:07:01 -0800678 /* raced with RMID? */
679 if (!ipc_valid_object(&msq->q_perm)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680 err = -EIDRM;
Davidlohr Bueso3dd1f782013-07-08 16:01:17 -0700681 goto out_unlock0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682 }
Davidlohr Bueso3dd1f782013-07-08 16:01:17 -0700683
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684 ss_del(&s);
Ingo Molnar5a06a362006-07-30 03:04:11 -0700685
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686 if (signal_pending(current)) {
Ingo Molnar5a06a362006-07-30 03:04:11 -0700687 err = -ERESTARTNOHAND;
Davidlohr Bueso3dd1f782013-07-08 16:01:17 -0700688 goto out_unlock0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689 }
Davidlohr Bueso3dd1f782013-07-08 16:01:17 -0700690
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691 }
Pavel Emelyanovb4888932007-10-18 23:40:14 -0700692 msq->q_lspid = task_tgid_vnr(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693 msq->q_stime = get_seconds();
694
Ingo Molnar5a06a362006-07-30 03:04:11 -0700695 if (!pipelined_send(msq, msg)) {
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300696 /* no one is waiting for this message, enqueue it */
Ingo Molnar5a06a362006-07-30 03:04:11 -0700697 list_add_tail(&msg->m_list, &msq->q_messages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698 msq->q_cbytes += msgsz;
699 msq->q_qnum++;
Kirill Korotaev3ac88a42007-10-18 23:40:56 -0700700 atomic_add(msgsz, &ns->msg_bytes);
701 atomic_inc(&ns->msg_hdrs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702 }
Ingo Molnar5a06a362006-07-30 03:04:11 -0700703
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704 err = 0;
705 msg = NULL;
706
Davidlohr Bueso3dd1f782013-07-08 16:01:17 -0700707out_unlock0:
708 ipc_unlock_object(&msq->q_perm);
709out_unlock1:
710 rcu_read_unlock();
Ingo Molnar5a06a362006-07-30 03:04:11 -0700711 if (msg != NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712 free_msg(msg);
713 return err;
714}
715
Heiko Carstense48fbb62009-01-14 14:14:26 +0100716SYSCALL_DEFINE4(msgsnd, int, msqid, struct msgbuf __user *, msgp, size_t, msgsz,
717 int, msgflg)
suzuki651971c2006-12-06 20:37:48 -0800718{
719 long mtype;
720
721 if (get_user(mtype, &msgp->mtype))
722 return -EFAULT;
723 return do_msgsnd(msqid, mtype, msgp->mtext, msgsz, msgflg);
724}
725
Ingo Molnar5a06a362006-07-30 03:04:11 -0700726static inline int convert_mode(long *msgtyp, int msgflg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727{
Peter Hurley8ac6ed52013-04-30 19:14:54 -0700728 if (msgflg & MSG_COPY)
729 return SEARCH_NUMBER;
Ingo Molnar5a06a362006-07-30 03:04:11 -0700730 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731 * find message of correct type.
732 * msgtyp = 0 => get first.
733 * msgtyp > 0 => get first message of matching type.
Ingo Molnar5a06a362006-07-30 03:04:11 -0700734 * msgtyp < 0 => get message with least type must be < abs(msgtype).
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735 */
Ingo Molnar5a06a362006-07-30 03:04:11 -0700736 if (*msgtyp == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737 return SEARCH_ANY;
Ingo Molnar5a06a362006-07-30 03:04:11 -0700738 if (*msgtyp < 0) {
739 *msgtyp = -*msgtyp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740 return SEARCH_LESSEQUAL;
741 }
Ingo Molnar5a06a362006-07-30 03:04:11 -0700742 if (msgflg & MSG_EXCEPT)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743 return SEARCH_NOTEQUAL;
744 return SEARCH_EQUAL;
745}
746
Stanislav Kinsburskyf9dd87f2013-01-04 15:34:52 -0800747static long do_msg_fill(void __user *dest, struct msg_msg *msg, size_t bufsz)
748{
749 struct msgbuf __user *msgp = dest;
750 size_t msgsz;
751
752 if (put_user(msg->m_type, &msgp->mtype))
753 return -EFAULT;
754
755 msgsz = (bufsz > msg->m_ts) ? msg->m_ts : bufsz;
756 if (store_msg(msgp->mtext, msg, msgsz))
757 return -EFAULT;
758 return msgsz;
759}
760
Stanislav Kinsbursky4a674f32013-01-04 15:34:55 -0800761#ifdef CONFIG_CHECKPOINT_RESTORE
Stanislav Kinsbursky3fcfe782013-01-04 15:35:03 -0800762/*
763 * This function creates new kernel message structure, large enough to store
764 * bufsz message bytes.
765 */
Peter Hurley8ac6ed52013-04-30 19:14:54 -0700766static inline struct msg_msg *prepare_copy(void __user *buf, size_t bufsz)
Stanislav Kinsbursky4a674f32013-01-04 15:34:55 -0800767{
768 struct msg_msg *copy;
769
Stanislav Kinsbursky4a674f32013-01-04 15:34:55 -0800770 /*
771 * Create dummy message to copy real message to.
772 */
773 copy = load_msg(buf, bufsz);
774 if (!IS_ERR(copy))
775 copy->m_ts = bufsz;
776 return copy;
777}
778
Stanislav Kinsbursky85398aa2013-01-04 15:34:58 -0800779static inline void free_copy(struct msg_msg *copy)
Stanislav Kinsbursky4a674f32013-01-04 15:34:55 -0800780{
Stanislav Kinsbursky85398aa2013-01-04 15:34:58 -0800781 if (copy)
Stanislav Kinsbursky4a674f32013-01-04 15:34:55 -0800782 free_msg(copy);
783}
784#else
Peter Hurley8ac6ed52013-04-30 19:14:54 -0700785static inline struct msg_msg *prepare_copy(void __user *buf, size_t bufsz)
Stanislav Kinsburskyb30efe22013-01-04 15:35:00 -0800786{
787 return ERR_PTR(-ENOSYS);
788}
789
Stanislav Kinsbursky85398aa2013-01-04 15:34:58 -0800790static inline void free_copy(struct msg_msg *copy)
791{
792}
Stanislav Kinsbursky4a674f32013-01-04 15:34:55 -0800793#endif
794
Peter Hurleydaaf74c2013-04-30 19:15:04 -0700795static struct msg_msg *find_msg(struct msg_queue *msq, long *msgtyp, int mode)
796{
Svenning Sørensen368ae532013-08-28 16:35:17 -0700797 struct msg_msg *msg, *found = NULL;
Peter Hurleydaaf74c2013-04-30 19:15:04 -0700798 long count = 0;
799
800 list_for_each_entry(msg, &msq->q_messages, m_list) {
801 if (testmsg(msg, *msgtyp, mode) &&
802 !security_msg_queue_msgrcv(msq, msg, current,
803 *msgtyp, mode)) {
804 if (mode == SEARCH_LESSEQUAL && msg->m_type != 1) {
805 *msgtyp = msg->m_type - 1;
Svenning Sørensen368ae532013-08-28 16:35:17 -0700806 found = msg;
Peter Hurleydaaf74c2013-04-30 19:15:04 -0700807 } else if (mode == SEARCH_NUMBER) {
808 if (*msgtyp == count)
809 return msg;
810 } else
811 return msg;
812 count++;
813 }
814 }
815
Svenning Sørensen368ae532013-08-28 16:35:17 -0700816 return found ?: ERR_PTR(-EAGAIN);
Peter Hurleydaaf74c2013-04-30 19:15:04 -0700817}
818
Davidlohr Bueso41a0d522013-07-08 16:01:18 -0700819long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgflg,
Stanislav Kinsburskyf9dd87f2013-01-04 15:34:52 -0800820 long (*msg_handler)(void __user *, struct msg_msg *, size_t))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700821{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822 int mode;
Davidlohr Bueso41a0d522013-07-08 16:01:18 -0700823 struct msg_queue *msq;
Kirill Korotaev1e786932006-10-02 02:18:21 -0700824 struct ipc_namespace *ns;
Davidlohr Bueso41a0d522013-07-08 16:01:18 -0700825 struct msg_msg *msg, *copy = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826
Peter Hurley88b9e452013-03-08 12:43:27 -0800827 ns = current->nsproxy->ipc_ns;
828
Stanislav Kinsburskyf9dd87f2013-01-04 15:34:52 -0800829 if (msqid < 0 || (long) bufsz < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 return -EINVAL;
Davidlohr Bueso41a0d522013-07-08 16:01:18 -0700831
Stanislav Kinsbursky4a674f32013-01-04 15:34:55 -0800832 if (msgflg & MSG_COPY) {
Michael Kerrisk4f87dac2014-03-10 14:46:07 +0100833 if ((msgflg & MSG_EXCEPT) || !(msgflg & IPC_NOWAIT))
834 return -EINVAL;
Peter Hurley8ac6ed52013-04-30 19:14:54 -0700835 copy = prepare_copy(buf, min_t(size_t, bufsz, ns->msg_ctlmax));
Stanislav Kinsbursky4a674f32013-01-04 15:34:55 -0800836 if (IS_ERR(copy))
837 return PTR_ERR(copy);
838 }
Ingo Molnar5a06a362006-07-30 03:04:11 -0700839 mode = convert_mode(&msgtyp, msgflg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840
Davidlohr Bueso41a0d522013-07-08 16:01:18 -0700841 rcu_read_lock();
842 msq = msq_obtain_object_check(ns, msqid);
Stanislav Kinsbursky4a674f32013-01-04 15:34:55 -0800843 if (IS_ERR(msq)) {
Davidlohr Bueso41a0d522013-07-08 16:01:18 -0700844 rcu_read_unlock();
Stanislav Kinsbursky85398aa2013-01-04 15:34:58 -0800845 free_copy(copy);
Nadia Derbey023a5352007-10-18 23:40:51 -0700846 return PTR_ERR(msq);
Stanislav Kinsbursky4a674f32013-01-04 15:34:55 -0800847 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848
849 for (;;) {
850 struct msg_receiver msr_d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851
852 msg = ERR_PTR(-EACCES);
Serge E. Hallynb0e77592011-03-23 16:43:24 -0700853 if (ipcperms(ns, &msq->q_perm, S_IRUGO))
Davidlohr Bueso41a0d522013-07-08 16:01:18 -0700854 goto out_unlock1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855
Davidlohr Bueso41a0d522013-07-08 16:01:18 -0700856 ipc_lock_object(&msq->q_perm);
Davidlohr Bueso4271b052013-09-30 13:45:26 -0700857
858 /* raced with RMID? */
Rafael Aquini0f3d2b02014-01-27 17:07:01 -0800859 if (!ipc_valid_object(&msq->q_perm)) {
Davidlohr Bueso4271b052013-09-30 13:45:26 -0700860 msg = ERR_PTR(-EIDRM);
861 goto out_unlock0;
862 }
863
Peter Hurleydaaf74c2013-04-30 19:15:04 -0700864 msg = find_msg(msq, &msgtyp, mode);
Ingo Molnar5a06a362006-07-30 03:04:11 -0700865 if (!IS_ERR(msg)) {
866 /*
867 * Found a suitable message.
868 * Unlink it from the queue.
869 */
Stanislav Kinsburskyf9dd87f2013-01-04 15:34:52 -0800870 if ((bufsz < msg->m_ts) && !(msgflg & MSG_NOERROR)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871 msg = ERR_PTR(-E2BIG);
Davidlohr Bueso41a0d522013-07-08 16:01:18 -0700872 goto out_unlock0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873 }
Stanislav Kinsbursky3fcfe782013-01-04 15:35:03 -0800874 /*
875 * If we are copying, then do not unlink message and do
876 * not update queue parameters.
877 */
Peter Hurley852028a2013-04-30 19:14:48 -0700878 if (msgflg & MSG_COPY) {
879 msg = copy_msg(msg, copy);
Davidlohr Bueso41a0d522013-07-08 16:01:18 -0700880 goto out_unlock0;
Peter Hurley852028a2013-04-30 19:14:48 -0700881 }
Davidlohr Bueso41a0d522013-07-08 16:01:18 -0700882
Linus Torvalds1da177e2005-04-16 15:20:36 -0700883 list_del(&msg->m_list);
884 msq->q_qnum--;
885 msq->q_rtime = get_seconds();
Pavel Emelyanovb4888932007-10-18 23:40:14 -0700886 msq->q_lrpid = task_tgid_vnr(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700887 msq->q_cbytes -= msg->m_ts;
Kirill Korotaev3ac88a42007-10-18 23:40:56 -0700888 atomic_sub(msg->m_ts, &ns->msg_bytes);
889 atomic_dec(&ns->msg_hdrs);
Ingo Molnar5a06a362006-07-30 03:04:11 -0700890 ss_wakeup(&msq->q_senders, 0);
Davidlohr Bueso41a0d522013-07-08 16:01:18 -0700891
892 goto out_unlock0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893 }
Davidlohr Bueso41a0d522013-07-08 16:01:18 -0700894
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895 /* No message waiting. Wait for a message */
896 if (msgflg & IPC_NOWAIT) {
897 msg = ERR_PTR(-ENOMSG);
Davidlohr Bueso41a0d522013-07-08 16:01:18 -0700898 goto out_unlock0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899 }
Davidlohr Bueso41a0d522013-07-08 16:01:18 -0700900
Ingo Molnar5a06a362006-07-30 03:04:11 -0700901 list_add_tail(&msr_d.r_list, &msq->q_receivers);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902 msr_d.r_tsk = current;
903 msr_d.r_msgtype = msgtyp;
904 msr_d.r_mode = mode;
Ingo Molnar5a06a362006-07-30 03:04:11 -0700905 if (msgflg & MSG_NOERROR)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906 msr_d.r_maxsize = INT_MAX;
Ingo Molnar5a06a362006-07-30 03:04:11 -0700907 else
Stanislav Kinsburskyf9dd87f2013-01-04 15:34:52 -0800908 msr_d.r_maxsize = bufsz;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909 msr_d.r_msg = ERR_PTR(-EAGAIN);
Davidlohr Buesof75a2f32014-06-06 14:37:44 -0700910 __set_current_state(TASK_INTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911
Davidlohr Bueso41a0d522013-07-08 16:01:18 -0700912 ipc_unlock_object(&msq->q_perm);
913 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914 schedule();
915
916 /* Lockless receive, part 1:
917 * Disable preemption. We don't hold a reference to the queue
918 * and getting a reference would defeat the idea of a lockless
919 * operation, thus the code relies on rcu to guarantee the
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300920 * existence of msq:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700921 * Prior to destruction, expunge_all(-EIRDM) changes r_msg.
922 * Thus if r_msg is -EAGAIN, then the queue not yet destroyed.
923 * rcu_read_lock() prevents preemption between reading r_msg
Davidlohr Bueso41a0d522013-07-08 16:01:18 -0700924 * and acquiring the q_perm.lock in ipc_lock_object().
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925 */
926 rcu_read_lock();
927
928 /* Lockless receive, part 2:
929 * Wait until pipelined_send or expunge_all are outside of
930 * wake_up_process(). There is a race with exit(), see
931 * ipc/mqueue.c for the details.
932 */
Manfred Spraul239521f2014-01-27 17:07:04 -0800933 msg = (struct msg_msg *)msr_d.r_msg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700934 while (msg == NULL) {
935 cpu_relax();
Ingo Molnar5a06a362006-07-30 03:04:11 -0700936 msg = (struct msg_msg *)msr_d.r_msg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937 }
938
939 /* Lockless receive, part 3:
940 * If there is a message or an error then accept it without
941 * locking.
942 */
Davidlohr Bueso41a0d522013-07-08 16:01:18 -0700943 if (msg != ERR_PTR(-EAGAIN))
944 goto out_unlock1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945
946 /* Lockless receive, part 3:
947 * Acquire the queue spinlock.
948 */
Davidlohr Bueso41a0d522013-07-08 16:01:18 -0700949 ipc_lock_object(&msq->q_perm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950
951 /* Lockless receive, part 4:
952 * Repeat test after acquiring the spinlock.
953 */
Manfred Spraul239521f2014-01-27 17:07:04 -0800954 msg = (struct msg_msg *)msr_d.r_msg;
Ingo Molnar5a06a362006-07-30 03:04:11 -0700955 if (msg != ERR_PTR(-EAGAIN))
Davidlohr Bueso41a0d522013-07-08 16:01:18 -0700956 goto out_unlock0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700957
958 list_del(&msr_d.r_list);
959 if (signal_pending(current)) {
960 msg = ERR_PTR(-ERESTARTNOHAND);
Davidlohr Bueso41a0d522013-07-08 16:01:18 -0700961 goto out_unlock0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962 }
Davidlohr Bueso41a0d522013-07-08 16:01:18 -0700963
964 ipc_unlock_object(&msq->q_perm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965 }
Davidlohr Bueso41a0d522013-07-08 16:01:18 -0700966
967out_unlock0:
968 ipc_unlock_object(&msq->q_perm);
969out_unlock1:
970 rcu_read_unlock();
Stanislav Kinsbursky4a674f32013-01-04 15:34:55 -0800971 if (IS_ERR(msg)) {
Stanislav Kinsbursky85398aa2013-01-04 15:34:58 -0800972 free_copy(copy);
Ingo Molnar5a06a362006-07-30 03:04:11 -0700973 return PTR_ERR(msg);
Stanislav Kinsbursky4a674f32013-01-04 15:34:55 -0800974 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975
Stanislav Kinsburskyf9dd87f2013-01-04 15:34:52 -0800976 bufsz = msg_handler(buf, msg, bufsz);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977 free_msg(msg);
Ingo Molnar5a06a362006-07-30 03:04:11 -0700978
Stanislav Kinsburskyf9dd87f2013-01-04 15:34:52 -0800979 return bufsz;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980}
981
Heiko Carstense48fbb62009-01-14 14:14:26 +0100982SYSCALL_DEFINE5(msgrcv, int, msqid, struct msgbuf __user *, msgp, size_t, msgsz,
983 long, msgtyp, int, msgflg)
suzuki651971c2006-12-06 20:37:48 -0800984{
Stanislav Kinsburskyf9dd87f2013-01-04 15:34:52 -0800985 return do_msgrcv(msqid, msgp, msgsz, msgtyp, msgflg, do_msg_fill);
suzuki651971c2006-12-06 20:37:48 -0800986}
987
Davidlohr Bueso3440a6b2014-06-06 14:37:45 -0700988/*
989 * Scale msgmni with the available lowmem size: the memory dedicated to msg
990 * queues should occupy at most 1/MSG_MEM_SCALE of lowmem.
991 * Also take into account the number of nsproxies created so far.
992 * This should be done staying within the (MSGMNI , IPCMNI/nr_ipc_ns) range.
993 */
994void recompute_msgmni(struct ipc_namespace *ns)
995{
996 struct sysinfo i;
997 unsigned long allowed;
998 int nb_ns;
999
1000 si_meminfo(&i);
1001 allowed = (((i.totalram - i.totalhigh) / MSG_MEM_SCALE) * i.mem_unit)
1002 / MSGMNB;
1003 nb_ns = atomic_read(&nr_ipc_ns);
1004 allowed /= nb_ns;
1005
1006 if (allowed < MSGMNI) {
1007 ns->msg_ctlmni = MSGMNI;
1008 return;
1009 }
1010
1011 if (allowed > IPCMNI / nb_ns) {
1012 ns->msg_ctlmni = IPCMNI / nb_ns;
1013 return;
1014 }
1015
1016 ns->msg_ctlmni = allowed;
1017}
1018
1019void msg_init_ns(struct ipc_namespace *ns)
1020{
1021 ns->msg_ctlmax = MSGMAX;
1022 ns->msg_ctlmnb = MSGMNB;
1023
1024 recompute_msgmni(ns);
1025
1026 atomic_set(&ns->msg_bytes, 0);
1027 atomic_set(&ns->msg_hdrs, 0);
1028 ipc_init_ids(&ns->ids[IPC_MSG_IDS]);
1029}
1030
1031#ifdef CONFIG_IPC_NS
1032void msg_exit_ns(struct ipc_namespace *ns)
1033{
1034 free_ipcs(ns, &msg_ids(ns), freeque);
1035 idr_destroy(&ns->ids[IPC_MSG_IDS].ipcs_idr);
1036}
1037#endif
1038
Linus Torvalds1da177e2005-04-16 15:20:36 -07001039#ifdef CONFIG_PROC_FS
Mike Waychison19b49462005-09-06 15:17:10 -07001040static int sysvipc_msg_proc_show(struct seq_file *s, void *it)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041{
Eric W. Biederman1efdb692012-02-07 16:54:11 -08001042 struct user_namespace *user_ns = seq_user_ns(s);
Mike Waychison19b49462005-09-06 15:17:10 -07001043 struct msg_queue *msq = it;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001044
Mike Waychison19b49462005-09-06 15:17:10 -07001045 return seq_printf(s,
Ingo Molnar5a06a362006-07-30 03:04:11 -07001046 "%10d %10d %4o %10lu %10lu %5u %5u %5u %5u %5u %5u %10lu %10lu %10lu\n",
1047 msq->q_perm.key,
Nadia Derbey7ca7e562007-10-18 23:40:48 -07001048 msq->q_perm.id,
Ingo Molnar5a06a362006-07-30 03:04:11 -07001049 msq->q_perm.mode,
1050 msq->q_cbytes,
1051 msq->q_qnum,
1052 msq->q_lspid,
1053 msq->q_lrpid,
Eric W. Biederman1efdb692012-02-07 16:54:11 -08001054 from_kuid_munged(user_ns, msq->q_perm.uid),
1055 from_kgid_munged(user_ns, msq->q_perm.gid),
1056 from_kuid_munged(user_ns, msq->q_perm.cuid),
1057 from_kgid_munged(user_ns, msq->q_perm.cgid),
Ingo Molnar5a06a362006-07-30 03:04:11 -07001058 msq->q_stime,
1059 msq->q_rtime,
1060 msq->q_ctime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001061}
1062#endif
Davidlohr Bueso3440a6b2014-06-06 14:37:45 -07001063
1064void __init msg_init(void)
1065{
1066 msg_init_ns(&init_ipc_ns);
1067
1068 printk(KERN_INFO "msgmni has been set to %d\n",
1069 init_ipc_ns.msg_ctlmni);
1070
1071 ipc_init_proc_interface("sysvipc/msg",
1072 " key msqid perms cbytes qnum lspid lrpid uid gid cuid cgid stime rtime ctime\n",
1073 IPC_MSG_IDS, sysvipc_msg_proc_show);
1074}