blob: 051a3e1fb8df9b2bcb2073e8299d31cdfc938724 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * linux/ipc/shm.c
4 * Copyright (C) 1992, 1993 Krishna Balasubramanian
5 * Many improvements/fixes by Bruno Haible.
6 * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
7 * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
8 *
9 * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
10 * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
11 * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
12 * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
13 * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
14 * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
15 * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
16 *
Steve Grubb073115d2006-04-02 17:07:33 -040017 * support for audit of ipc object properties and permission changes
18 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
Kirill Korotaev4e982312006-10-02 02:18:22 -070019 *
20 * namespaces support
21 * OpenVZ, SWsoft Inc.
22 * Pavel Emelianov <xemul@openvz.org>
Davidlohr Buesoc2c737a2013-09-11 14:26:23 -070023 *
24 * Better ipc lock (kern_ipc_perm.lock) handling
25 * Davidlohr Bueso <davidlohr.bueso@hp.com>, June 2013.
Linus Torvalds1da177e2005-04-16 15:20:36 -070026 */
27
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/slab.h>
29#include <linux/mm.h>
30#include <linux/hugetlb.h>
31#include <linux/shm.h>
32#include <linux/init.h>
33#include <linux/file.h>
34#include <linux/mman.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/shmem_fs.h>
36#include <linux/security.h>
37#include <linux/syscalls.h>
38#include <linux/audit.h>
Randy.Dunlapc59ede72006-01-11 12:17:46 -080039#include <linux/capability.h>
Stephen Rothwell7d87e14c2005-05-01 08:59:12 -070040#include <linux/ptrace.h>
Mike Waychison19b49462005-09-06 15:17:10 -070041#include <linux/seq_file.h>
Nadia Derbey3e148c72007-10-18 23:40:54 -070042#include <linux/rwsem.h>
Kirill Korotaev4e982312006-10-02 02:18:22 -070043#include <linux/nsproxy.h>
Eric W. Biedermanbc56bba2007-02-20 13:57:53 -080044#include <linux/mount.h>
Pavel Emelyanovae5e1b22008-02-08 04:18:22 -080045#include <linux/ipc_namespace.h>
Stephen Rothwell7d87e14c2005-05-01 08:59:12 -070046
Paul McQuade7153e402014-06-06 14:37:37 -070047#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070048
49#include "util.h"
50
Eric W. Biedermana2e102c2018-03-22 21:34:44 -050051struct shmid_kernel /* private to the kernel */
52{
53 struct kern_ipc_perm shm_perm;
54 struct file *shm_file;
55 unsigned long shm_nattch;
56 unsigned long shm_segsz;
57 time64_t shm_atim;
58 time64_t shm_dtim;
59 time64_t shm_ctim;
Eric W. Biederman98f929b2018-03-23 00:29:57 -050060 struct pid *shm_cprid;
61 struct pid *shm_lprid;
Eric W. Biedermana2e102c2018-03-22 21:34:44 -050062 struct user_struct *mlock_user;
63
64 /* The task created the shm object. NULL if the task is dead. */
65 struct task_struct *shm_creator;
66 struct list_head shm_clist; /* list by creator */
67} __randomize_layout;
68
69/* shm_mode upper byte flags */
70#define SHM_DEST 01000 /* segment will be destroyed on last detach */
71#define SHM_LOCKED 02000 /* segment will not be swapped */
72
Eric W. Biedermanbc56bba2007-02-20 13:57:53 -080073struct shm_file_data {
74 int id;
75 struct ipc_namespace *ns;
76 struct file *file;
77 const struct vm_operations_struct *vm_ops;
78};
79
80#define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data))
81
Arjan van de Ven9a321442007-02-12 00:55:35 -080082static const struct file_operations shm_file_operations;
Alexey Dobriyanf0f37e2f2009-09-27 22:29:37 +040083static const struct vm_operations_struct shm_vm_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -070084
Pierre Peiffered2ddbf2008-02-08 04:18:57 -080085#define shm_ids(ns) ((ns)->ids[IPC_SHM_IDS])
Linus Torvalds1da177e2005-04-16 15:20:36 -070086
Kirill Korotaev4e982312006-10-02 02:18:22 -070087#define shm_unlock(shp) \
88 ipc_unlock(&(shp)->shm_perm)
Kirill Korotaev4e982312006-10-02 02:18:22 -070089
Nadia Derbey7748dbf2007-10-18 23:40:49 -070090static int newseg(struct ipc_namespace *, struct ipc_params *);
Eric W. Biedermanbc56bba2007-02-20 13:57:53 -080091static void shm_open(struct vm_area_struct *vma);
92static void shm_close(struct vm_area_struct *vma);
Manfred Spraul239521f2014-01-27 17:07:04 -080093static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070094#ifdef CONFIG_PROC_FS
Mike Waychison19b49462005-09-06 15:17:10 -070095static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
Linus Torvalds1da177e2005-04-16 15:20:36 -070096#endif
97
Guillaume Knispel0cfb6ae2017-09-08 16:17:55 -070098int shm_init_ns(struct ipc_namespace *ns)
Kirill Korotaev4e982312006-10-02 02:18:22 -070099{
Kirill Korotaev4e982312006-10-02 02:18:22 -0700100 ns->shm_ctlmax = SHMMAX;
101 ns->shm_ctlall = SHMALL;
102 ns->shm_ctlmni = SHMMNI;
Vasiliy Kulikovb34a6b12011-07-26 16:08:48 -0700103 ns->shm_rmid_forced = 0;
Kirill Korotaev4e982312006-10-02 02:18:22 -0700104 ns->shm_tot = 0;
Guillaume Knispel0cfb6ae2017-09-08 16:17:55 -0700105 return ipc_init_ids(&shm_ids(ns));
Kirill Korotaev4e982312006-10-02 02:18:22 -0700106}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107
Nadia Derbeyf4566f02007-10-18 23:40:53 -0700108/*
Davidlohr Buesod9a605e2013-09-11 14:26:24 -0700109 * Called with shm_ids.rwsem (writer) and the shp structure locked.
110 * Only shm_ids.rwsem remains locked on exit.
Nadia Derbeyf4566f02007-10-18 23:40:53 -0700111 */
Pierre Peiffer01b8b072008-02-08 04:18:57 -0800112static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
Kirill Korotaev4e982312006-10-02 02:18:22 -0700113{
Pierre Peiffer01b8b072008-02-08 04:18:57 -0800114 struct shmid_kernel *shp;
Shailesh Pandey63980c82016-12-14 15:06:10 -0800115
Pierre Peiffer01b8b072008-02-08 04:18:57 -0800116 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
117
Manfred Spraul239521f2014-01-27 17:07:04 -0800118 if (shp->shm_nattch) {
Kirill Korotaev4e982312006-10-02 02:18:22 -0700119 shp->shm_perm.mode |= SHM_DEST;
120 /* Do not find it any more */
Guillaume Knispel0cfb6ae2017-09-08 16:17:55 -0700121 ipc_set_key_private(&shm_ids(ns), &shp->shm_perm);
Kirill Korotaev4e982312006-10-02 02:18:22 -0700122 shm_unlock(shp);
123 } else
124 shm_destroy(ns, shp);
125}
126
Pavel Emelyanovae5e1b22008-02-08 04:18:22 -0800127#ifdef CONFIG_IPC_NS
Kirill Korotaev4e982312006-10-02 02:18:22 -0700128void shm_exit_ns(struct ipc_namespace *ns)
129{
Pierre Peiffer01b8b072008-02-08 04:18:57 -0800130 free_ipcs(ns, &shm_ids(ns), do_shm_rmid);
Serge E. Hallyn7d6feeb2009-12-15 16:47:27 -0800131 idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr);
Guillaume Knispel0cfb6ae2017-09-08 16:17:55 -0700132 rhashtable_destroy(&ns->ids[IPC_SHM_IDS].key_ht);
Kirill Korotaev4e982312006-10-02 02:18:22 -0700133}
Pavel Emelyanovae5e1b22008-02-08 04:18:22 -0800134#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135
Linus Torvalds140d0b22011-08-04 19:35:59 -1000136static int __init ipc_ns_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137{
Guillaume Knispel0cfb6ae2017-09-08 16:17:55 -0700138 const int err = shm_init_ns(&init_ipc_ns);
139 WARN(err, "ipc: sysv shm_init_ns failed: %d\n", err);
140 return err;
Linus Torvalds140d0b22011-08-04 19:35:59 -1000141}
142
143pure_initcall(ipc_ns_init);
144
Manfred Spraul239521f2014-01-27 17:07:04 -0800145void __init shm_init(void)
Linus Torvalds140d0b22011-08-04 19:35:59 -1000146{
Mike Waychison19b49462005-09-06 15:17:10 -0700147 ipc_init_proc_interface("sysvipc/shm",
Helge Dellerb7952182010-10-27 15:34:16 -0700148#if BITS_PER_LONG <= 32
149 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n",
150#else
151 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n",
152#endif
Kirill Korotaev4e982312006-10-02 02:18:22 -0700153 IPC_SHM_IDS, sysvipc_shm_proc_show);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154}
155
Davidlohr Bueso8b8d52a2013-09-11 14:26:15 -0700156static inline struct shmid_kernel *shm_obtain_object(struct ipc_namespace *ns, int id)
157{
Davidlohr Bueso55b7ae52015-06-30 14:58:42 -0700158 struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&shm_ids(ns), id);
Davidlohr Bueso8b8d52a2013-09-11 14:26:15 -0700159
160 if (IS_ERR(ipcp))
161 return ERR_CAST(ipcp);
162
163 return container_of(ipcp, struct shmid_kernel, shm_perm);
164}
165
166static inline struct shmid_kernel *shm_obtain_object_check(struct ipc_namespace *ns, int id)
167{
168 struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&shm_ids(ns), id);
169
170 if (IS_ERR(ipcp))
171 return ERR_CAST(ipcp);
172
173 return container_of(ipcp, struct shmid_kernel, shm_perm);
174}
175
Nadia Derbey3e148c72007-10-18 23:40:54 -0700176/*
Davidlohr Buesod9a605e2013-09-11 14:26:24 -0700177 * shm_lock_(check_) routines are called in the paths where the rwsem
Nadia Derbey00c2bf82008-07-25 01:48:03 -0700178 * is not necessarily held.
Nadia Derbey3e148c72007-10-18 23:40:54 -0700179 */
Nadia Derbey023a5352007-10-18 23:40:51 -0700180static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181{
Nadia Derbey03f02c72007-10-18 23:40:51 -0700182 struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id);
183
Davidlohr Buesoc5c89752015-06-30 14:58:36 -0700184 /*
Kirill A. Shutemov1ac0b6d2016-02-17 13:11:35 -0800185 * Callers of shm_lock() must validate the status of the returned ipc
186 * object pointer (as returned by ipc_lock()), and error out as
187 * appropriate.
Davidlohr Buesoc5c89752015-06-30 14:58:36 -0700188 */
Kirill A. Shutemov1ac0b6d2016-02-17 13:11:35 -0800189 if (IS_ERR(ipcp))
190 return (void *)ipcp;
Nadia Derbey03f02c72007-10-18 23:40:51 -0700191 return container_of(ipcp, struct shmid_kernel, shm_perm);
Nadia Derbey023a5352007-10-18 23:40:51 -0700192}
193
Vasiliy Kulikov4c677e22011-07-29 03:56:40 +0400194static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
195{
196 rcu_read_lock();
Davidlohr Buesocf9d5d72013-07-08 16:01:11 -0700197 ipc_lock_object(&ipcp->shm_perm);
Vasiliy Kulikov4c677e22011-07-29 03:56:40 +0400198}
199
Davidlohr Bueso53dad6d2013-09-23 17:04:45 -0700200static void shm_rcu_free(struct rcu_head *head)
201{
Manfred Sprauldba4cdd2017-07-12 14:34:41 -0700202 struct kern_ipc_perm *ptr = container_of(head, struct kern_ipc_perm,
203 rcu);
204 struct shmid_kernel *shp = container_of(ptr, struct shmid_kernel,
205 shm_perm);
Eric W. Biederman7191adf2018-03-22 21:08:27 -0500206 security_shm_free(&shp->shm_perm);
Kees Cook42e618f2017-07-12 14:35:25 -0700207 kvfree(shp);
Davidlohr Bueso53dad6d2013-09-23 17:04:45 -0700208}
209
Nadia Derbey7ca7e562007-10-18 23:40:48 -0700210static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211{
Jack Millerab602f72014-08-08 14:23:19 -0700212 list_del(&s->shm_clist);
Nadia Derbey7ca7e562007-10-18 23:40:48 -0700213 ipc_rmid(&shm_ids(ns), &s->shm_perm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214}
215
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216
Kirill A. Shutemov1ac0b6d2016-02-17 13:11:35 -0800217static int __shm_open(struct vm_area_struct *vma)
Kirill Korotaev4e982312006-10-02 02:18:22 -0700218{
Eric W. Biedermanbc56bba2007-02-20 13:57:53 -0800219 struct file *file = vma->vm_file;
220 struct shm_file_data *sfd = shm_file_data(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221 struct shmid_kernel *shp;
222
Eric W. Biedermanbc56bba2007-02-20 13:57:53 -0800223 shp = shm_lock(sfd->ns, sfd->id);
Kirill A. Shutemov1ac0b6d2016-02-17 13:11:35 -0800224
225 if (IS_ERR(shp))
226 return PTR_ERR(shp);
227
Eric Biggers3f053172018-04-13 15:35:30 -0700228 if (shp->shm_file != sfd->file) {
229 /* ID was reused */
230 shm_unlock(shp);
231 return -EINVAL;
232 }
233
Deepa Dinamani7ff28192017-08-02 19:51:14 -0700234 shp->shm_atim = ktime_get_real_seconds();
Eric W. Biederman98f929b2018-03-23 00:29:57 -0500235 ipc_update_pid(&shp->shm_lprid, task_tgid(current));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236 shp->shm_nattch++;
237 shm_unlock(shp);
Kirill A. Shutemov1ac0b6d2016-02-17 13:11:35 -0800238 return 0;
239}
240
241/* This is called by fork, once for every shm attach. */
242static void shm_open(struct vm_area_struct *vma)
243{
244 int err = __shm_open(vma);
245 /*
246 * We raced in the idr lookup or with shm_destroy().
247 * Either way, the ID is busted.
248 */
249 WARN_ON_ONCE(err);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250}
251
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252/*
253 * shm_destroy - free the struct shmid_kernel
254 *
Nadia Derbeyf4566f02007-10-18 23:40:53 -0700255 * @ns: namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256 * @shp: struct to free
257 *
Davidlohr Buesod9a605e2013-09-11 14:26:24 -0700258 * It has to be called with shp and shm_ids.rwsem (writer) locked,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259 * but returns with shp unlocked and freed.
260 */
Kirill Korotaev4e982312006-10-02 02:18:22 -0700261static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262{
Greg Thelena399b292013-11-21 14:32:00 -0800263 struct file *shm_file;
264
265 shm_file = shp->shm_file;
266 shp->shm_file = NULL;
Kirill Korotaev4e982312006-10-02 02:18:22 -0700267 ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
Nadia Derbey7ca7e562007-10-18 23:40:48 -0700268 shm_rmid(ns, shp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 shm_unlock(shp);
Greg Thelena399b292013-11-21 14:32:00 -0800270 if (!is_file_hugepages(shm_file))
271 shmem_lock(shm_file, 0, shp->mlock_user);
Hugh Dickins353d5c32009-08-24 16:30:28 +0100272 else if (shp->mlock_user)
Dave Hansen07a46ed2014-12-12 16:58:22 -0800273 user_shm_unlock(i_size_read(file_inode(shm_file)),
274 shp->mlock_user);
Greg Thelena399b292013-11-21 14:32:00 -0800275 fput(shm_file);
Eric W. Biederman98f929b2018-03-23 00:29:57 -0500276 ipc_update_pid(&shp->shm_cprid, NULL);
277 ipc_update_pid(&shp->shm_lprid, NULL);
Manfred Sprauldba4cdd2017-07-12 14:34:41 -0700278 ipc_rcu_putref(&shp->shm_perm, shm_rcu_free);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279}
280
281/*
Vasiliy Kulikovb34a6b12011-07-26 16:08:48 -0700282 * shm_may_destroy - identifies whether shm segment should be destroyed now
283 *
284 * Returns true if and only if there are no active users of the segment and
285 * one of the following is true:
286 *
287 * 1) shmctl(id, IPC_RMID, NULL) was called for this shp
288 *
289 * 2) sysctl kernel.shm_rmid_forced is set to 1.
290 */
291static bool shm_may_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
292{
293 return (shp->shm_nattch == 0) &&
294 (ns->shm_rmid_forced ||
295 (shp->shm_perm.mode & SHM_DEST));
296}
297
298/*
Eric W. Biedermanbc56bba2007-02-20 13:57:53 -0800299 * remove the attach descriptor vma.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300 * free memory for segment if it is marked destroyed.
301 * The descriptor has already been removed from the current->mm->mmap list
302 * and will later be kfree()d.
303 */
Eric W. Biedermanbc56bba2007-02-20 13:57:53 -0800304static void shm_close(struct vm_area_struct *vma)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305{
Manfred Spraul239521f2014-01-27 17:07:04 -0800306 struct file *file = vma->vm_file;
Eric W. Biedermanbc56bba2007-02-20 13:57:53 -0800307 struct shm_file_data *sfd = shm_file_data(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308 struct shmid_kernel *shp;
Eric W. Biedermanbc56bba2007-02-20 13:57:53 -0800309 struct ipc_namespace *ns = sfd->ns;
Kirill Korotaev4e982312006-10-02 02:18:22 -0700310
Davidlohr Buesod9a605e2013-09-11 14:26:24 -0700311 down_write(&shm_ids(ns).rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312 /* remove from the list of attaches of the shm segment */
Nadia Derbey00c2bf82008-07-25 01:48:03 -0700313 shp = shm_lock(ns, sfd->id);
Kirill A. Shutemov1ac0b6d2016-02-17 13:11:35 -0800314
315 /*
316 * We raced in the idr lookup or with shm_destroy().
317 * Either way, the ID is busted.
318 */
319 if (WARN_ON_ONCE(IS_ERR(shp)))
320 goto done; /* no-op */
321
Eric W. Biederman98f929b2018-03-23 00:29:57 -0500322 ipc_update_pid(&shp->shm_lprid, task_tgid(current));
Deepa Dinamani7ff28192017-08-02 19:51:14 -0700323 shp->shm_dtim = ktime_get_real_seconds();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324 shp->shm_nattch--;
Vasiliy Kulikovb34a6b12011-07-26 16:08:48 -0700325 if (shm_may_destroy(ns, shp))
Kirill Korotaev4e982312006-10-02 02:18:22 -0700326 shm_destroy(ns, shp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327 else
328 shm_unlock(shp);
Kirill A. Shutemov1ac0b6d2016-02-17 13:11:35 -0800329done:
Davidlohr Buesod9a605e2013-09-11 14:26:24 -0700330 up_write(&shm_ids(ns).rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331}
332
Davidlohr Buesod9a605e2013-09-11 14:26:24 -0700333/* Called with ns->shm_ids(ns).rwsem locked */
Vasiliy Kulikovb34a6b12011-07-26 16:08:48 -0700334static int shm_try_destroy_orphaned(int id, void *p, void *data)
335{
336 struct ipc_namespace *ns = data;
Vasiliy Kulikov4c677e22011-07-29 03:56:40 +0400337 struct kern_ipc_perm *ipcp = p;
338 struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm);
Vasiliy Kulikovb34a6b12011-07-26 16:08:48 -0700339
340 /*
341 * We want to destroy segments without users and with already
342 * exit'ed originating process.
Vasiliy Kulikov4c677e22011-07-29 03:56:40 +0400343 *
Davidlohr Buesod9a605e2013-09-11 14:26:24 -0700344 * As shp->* are changed under rwsem, it's safe to skip shp locking.
Vasiliy Kulikovb34a6b12011-07-26 16:08:48 -0700345 */
Vasiliy Kulikov4c677e22011-07-29 03:56:40 +0400346 if (shp->shm_creator != NULL)
Vasiliy Kulikovb34a6b12011-07-26 16:08:48 -0700347 return 0;
Vasiliy Kulikovb34a6b12011-07-26 16:08:48 -0700348
Vasiliy Kulikov4c677e22011-07-29 03:56:40 +0400349 if (shm_may_destroy(ns, shp)) {
350 shm_lock_by_ptr(shp);
Vasiliy Kulikovb34a6b12011-07-26 16:08:48 -0700351 shm_destroy(ns, shp);
Vasiliy Kulikov4c677e22011-07-29 03:56:40 +0400352 }
Vasiliy Kulikovb34a6b12011-07-26 16:08:48 -0700353 return 0;
354}
355
356void shm_destroy_orphaned(struct ipc_namespace *ns)
357{
Davidlohr Buesod9a605e2013-09-11 14:26:24 -0700358 down_write(&shm_ids(ns).rwsem);
Vasiliy Kulikov33a30ed2011-08-03 22:26:55 +0400359 if (shm_ids(ns).in_use)
Vasiliy Kulikov4c677e22011-07-29 03:56:40 +0400360 idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns);
Davidlohr Buesod9a605e2013-09-11 14:26:24 -0700361 up_write(&shm_ids(ns).rwsem);
Vasiliy Kulikovb34a6b12011-07-26 16:08:48 -0700362}
363
Jack Miller83293c0f52014-08-08 14:23:21 -0700364/* Locking assumes this will only be called with task == current */
Vasiliy Kulikovb34a6b12011-07-26 16:08:48 -0700365void exit_shm(struct task_struct *task)
366{
Vasiliy Kulikov4c677e22011-07-29 03:56:40 +0400367 struct ipc_namespace *ns = task->nsproxy->ipc_ns;
Jack Millerab602f72014-08-08 14:23:19 -0700368 struct shmid_kernel *shp, *n;
Vasiliy Kulikovb34a6b12011-07-26 16:08:48 -0700369
Jack Miller83293c0f52014-08-08 14:23:21 -0700370 if (list_empty(&task->sysvshm.shm_clist))
Vasiliy Kulikov298507d2011-08-03 22:28:26 +0400371 return;
372
Jack Miller83293c0f52014-08-08 14:23:21 -0700373 /*
374 * If kernel.shm_rmid_forced is not set then only keep track of
375 * which shmids are orphaned, so that a later set of the sysctl
376 * can clean them up.
377 */
378 if (!ns->shm_rmid_forced) {
379 down_read(&shm_ids(ns).rwsem);
380 list_for_each_entry(shp, &task->sysvshm.shm_clist, shm_clist)
381 shp->shm_creator = NULL;
382 /*
383 * Only under read lock but we are only called on current
384 * so no entry on the list will be shared.
385 */
386 list_del(&task->sysvshm.shm_clist);
387 up_read(&shm_ids(ns).rwsem);
388 return;
389 }
390
391 /*
392 * Destroy all already created segments, that were not yet mapped,
393 * and mark any mapped as orphan to cover the sysctl toggling.
394 * Destroy is skipped if shm_may_destroy() returns false.
395 */
Davidlohr Buesod9a605e2013-09-11 14:26:24 -0700396 down_write(&shm_ids(ns).rwsem);
Jack Miller83293c0f52014-08-08 14:23:21 -0700397 list_for_each_entry_safe(shp, n, &task->sysvshm.shm_clist, shm_clist) {
398 shp->shm_creator = NULL;
399
400 if (shm_may_destroy(ns, shp)) {
401 shm_lock_by_ptr(shp);
402 shm_destroy(ns, shp);
403 }
404 }
405
406 /* Remove the list head from any segments still attached. */
Jack Millerab602f72014-08-08 14:23:19 -0700407 list_del(&task->sysvshm.shm_clist);
Davidlohr Buesod9a605e2013-09-11 14:26:24 -0700408 up_write(&shm_ids(ns).rwsem);
Vasiliy Kulikovb34a6b12011-07-26 16:08:48 -0700409}
410
Souptick Joarder14f28f572018-06-14 15:27:55 -0700411static vm_fault_t shm_fault(struct vm_fault *vmf)
Eric W. Biedermanbc56bba2007-02-20 13:57:53 -0800412{
Dave Jiang11bac802017-02-24 14:56:41 -0800413 struct file *file = vmf->vma->vm_file;
Eric W. Biedermanbc56bba2007-02-20 13:57:53 -0800414 struct shm_file_data *sfd = shm_file_data(file);
415
Dave Jiang11bac802017-02-24 14:56:41 -0800416 return sfd->vm_ops->fault(vmf);
Eric W. Biedermanbc56bba2007-02-20 13:57:53 -0800417}
418
Mike Kravetz3d942ee2018-03-28 16:01:01 -0700419static int shm_split(struct vm_area_struct *vma, unsigned long addr)
420{
421 struct file *file = vma->vm_file;
422 struct shm_file_data *sfd = shm_file_data(file);
423
Andrew Mortona61fc2c2018-04-10 16:35:42 -0700424 if (sfd->vm_ops->split)
Mike Kravetz3d942ee2018-03-28 16:01:01 -0700425 return sfd->vm_ops->split(vma, addr);
426
427 return 0;
428}
429
Eric W. Biedermanbc56bba2007-02-20 13:57:53 -0800430#ifdef CONFIG_NUMA
Adrian Bunkd823e3e2007-10-16 23:26:42 -0700431static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
Eric W. Biedermanbc56bba2007-02-20 13:57:53 -0800432{
433 struct file *file = vma->vm_file;
434 struct shm_file_data *sfd = shm_file_data(file);
435 int err = 0;
Shailesh Pandey63980c82016-12-14 15:06:10 -0800436
Eric W. Biedermanbc56bba2007-02-20 13:57:53 -0800437 if (sfd->vm_ops->set_policy)
438 err = sfd->vm_ops->set_policy(vma, new);
439 return err;
440}
441
Adrian Bunkd823e3e2007-10-16 23:26:42 -0700442static struct mempolicy *shm_get_policy(struct vm_area_struct *vma,
443 unsigned long addr)
Eric W. Biedermanbc56bba2007-02-20 13:57:53 -0800444{
445 struct file *file = vma->vm_file;
446 struct shm_file_data *sfd = shm_file_data(file);
447 struct mempolicy *pol = NULL;
448
449 if (sfd->vm_ops->get_policy)
450 pol = sfd->vm_ops->get_policy(vma, addr);
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -0700451 else if (vma->vm_policy)
Eric W. Biedermanbc56bba2007-02-20 13:57:53 -0800452 pol = vma->vm_policy;
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -0700453
Eric W. Biedermanbc56bba2007-02-20 13:57:53 -0800454 return pol;
455}
456#endif
457
Manfred Spraul239521f2014-01-27 17:07:04 -0800458static int shm_mmap(struct file *file, struct vm_area_struct *vma)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459{
Eric W. Biedermanbc56bba2007-02-20 13:57:53 -0800460 struct shm_file_data *sfd = shm_file_data(file);
David Howellsb0e15192006-01-06 00:11:42 -0800461 int ret;
462
Kirill A. Shutemov1ac0b6d2016-02-17 13:11:35 -0800463 /*
Eric Biggers3f053172018-04-13 15:35:30 -0700464 * In case of remap_file_pages() emulation, the file can represent an
465 * IPC ID that was removed, and possibly even reused by another shm
466 * segment already. Propagate this case as an error to caller.
Kirill A. Shutemov1ac0b6d2016-02-17 13:11:35 -0800467 */
Shailesh Pandey63980c82016-12-14 15:06:10 -0800468 ret = __shm_open(vma);
Kirill A. Shutemov1ac0b6d2016-02-17 13:11:35 -0800469 if (ret)
Eric W. Biedermanbc56bba2007-02-20 13:57:53 -0800470 return ret;
Kirill A. Shutemov1ac0b6d2016-02-17 13:11:35 -0800471
Miklos Szeredif74ac012017-02-20 16:51:23 +0100472 ret = call_mmap(sfd->file, vma);
Kirill A. Shutemov1ac0b6d2016-02-17 13:11:35 -0800473 if (ret) {
474 shm_close(vma);
475 return ret;
476 }
Eric W. Biedermanbc56bba2007-02-20 13:57:53 -0800477 sfd->vm_ops = vma->vm_ops;
David Howells2e92a3b2007-07-31 00:37:24 -0700478#ifdef CONFIG_MMU
Davidlohr Buesod0edd852015-09-09 15:39:20 -0700479 WARN_ON(!sfd->vm_ops->fault);
David Howells2e92a3b2007-07-31 00:37:24 -0700480#endif
Eric W. Biedermanbc56bba2007-02-20 13:57:53 -0800481 vma->vm_ops = &shm_vm_ops;
Kirill A. Shutemov1ac0b6d2016-02-17 13:11:35 -0800482 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483}
484
Kirill Korotaev4e982312006-10-02 02:18:22 -0700485static int shm_release(struct inode *ino, struct file *file)
486{
Eric W. Biedermanbc56bba2007-02-20 13:57:53 -0800487 struct shm_file_data *sfd = shm_file_data(file);
Kirill Korotaev4e982312006-10-02 02:18:22 -0700488
Eric W. Biedermanbc56bba2007-02-20 13:57:53 -0800489 put_ipc_ns(sfd->ns);
Eric Biggers3f053172018-04-13 15:35:30 -0700490 fput(sfd->file);
Eric W. Biedermanbc56bba2007-02-20 13:57:53 -0800491 shm_file_data(file) = NULL;
492 kfree(sfd);
Kirill Korotaev4e982312006-10-02 02:18:22 -0700493 return 0;
494}
495
Josef Bacik02c24a82011-07-16 20:44:56 -0400496static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync)
Adam Litke516dffd2007-03-01 15:46:08 -0800497{
Adam Litke516dffd2007-03-01 15:46:08 -0800498 struct shm_file_data *sfd = shm_file_data(file);
Adam Litke516dffd2007-03-01 15:46:08 -0800499
Christoph Hellwig7ea80852010-05-26 17:53:25 +0200500 if (!sfd->file->f_op->fsync)
501 return -EINVAL;
Jeff Layton0f410742017-07-05 15:26:50 -0400502 return sfd->file->f_op->fsync(sfd->file, start, end, datasync);
Adam Litke516dffd2007-03-01 15:46:08 -0800503}
504
Will Deacon7d8a4562012-06-07 14:21:13 -0700505static long shm_fallocate(struct file *file, int mode, loff_t offset,
506 loff_t len)
507{
508 struct shm_file_data *sfd = shm_file_data(file);
509
510 if (!sfd->file->f_op->fallocate)
511 return -EOPNOTSUPP;
512 return sfd->file->f_op->fallocate(file, mode, offset, len);
513}
514
Eric W. Biedermanbc56bba2007-02-20 13:57:53 -0800515static unsigned long shm_get_unmapped_area(struct file *file,
516 unsigned long addr, unsigned long len, unsigned long pgoff,
517 unsigned long flags)
518{
519 struct shm_file_data *sfd = shm_file_data(file);
Shailesh Pandey63980c82016-12-14 15:06:10 -0800520
Al Viroc4caa772009-11-30 08:38:43 -0500521 return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len,
522 pgoff, flags);
Adam Litke516dffd2007-03-01 15:46:08 -0800523}
Eric W. Biedermanbc56bba2007-02-20 13:57:53 -0800524
Arjan van de Ven9a321442007-02-12 00:55:35 -0800525static const struct file_operations shm_file_operations = {
Kirill Korotaev4e982312006-10-02 02:18:22 -0700526 .mmap = shm_mmap,
Adam Litke516dffd2007-03-01 15:46:08 -0800527 .fsync = shm_fsync,
Kirill Korotaev4e982312006-10-02 02:18:22 -0700528 .release = shm_release,
David Howellsed5e5892010-01-15 17:01:32 -0800529 .get_unmapped_area = shm_get_unmapped_area,
Arnd Bergmann6038f372010-08-15 18:52:59 +0200530 .llseek = noop_llseek,
Will Deacon7d8a4562012-06-07 14:21:13 -0700531 .fallocate = shm_fallocate,
Al Viroc4caa772009-11-30 08:38:43 -0500532};
533
Hugh Dickinsc01d5b32016-07-26 15:26:15 -0700534/*
535 * shm_file_operations_huge is now identical to shm_file_operations,
536 * but we keep it distinct for the sake of is_file_shm_hugepages().
537 */
Al Viroc4caa772009-11-30 08:38:43 -0500538static const struct file_operations shm_file_operations_huge = {
539 .mmap = shm_mmap,
540 .fsync = shm_fsync,
541 .release = shm_release,
Eric W. Biedermanbc56bba2007-02-20 13:57:53 -0800542 .get_unmapped_area = shm_get_unmapped_area,
Arnd Bergmann6038f372010-08-15 18:52:59 +0200543 .llseek = noop_llseek,
Will Deacon7d8a4562012-06-07 14:21:13 -0700544 .fallocate = shm_fallocate,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545};
546
Yaowei Bai2954e4402016-01-20 15:01:11 -0800547bool is_file_shm_hugepages(struct file *file)
Al Viroc4caa772009-11-30 08:38:43 -0500548{
549 return file->f_op == &shm_file_operations_huge;
550}
551
Alexey Dobriyanf0f37e2f2009-09-27 22:29:37 +0400552static const struct vm_operations_struct shm_vm_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553 .open = shm_open, /* callback for a new vm-area open */
554 .close = shm_close, /* callback for when the vm-area is released */
Nick Piggin54cb8822007-07-19 01:46:59 -0700555 .fault = shm_fault,
Mike Kravetz3d942ee2018-03-28 16:01:01 -0700556 .split = shm_split,
Eric W. Biedermanbc56bba2007-02-20 13:57:53 -0800557#if defined(CONFIG_NUMA)
558 .set_policy = shm_set_policy,
559 .get_policy = shm_get_policy,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560#endif
561};
562
Nadia Derbeyf4566f02007-10-18 23:40:53 -0700563/**
564 * newseg - Create a new shared memory segment
565 * @ns: namespace
566 * @params: ptr to the structure that contains key, size and shmflg
567 *
Davidlohr Buesod9a605e2013-09-11 14:26:24 -0700568 * Called with shm_ids.rwsem held as a writer.
Nadia Derbeyf4566f02007-10-18 23:40:53 -0700569 */
Nadia Derbey7748dbf2007-10-18 23:40:49 -0700570static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571{
Nadia Derbey7748dbf2007-10-18 23:40:49 -0700572 key_t key = params->key;
573 int shmflg = params->flg;
574 size_t size = params->u.size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575 int error;
576 struct shmid_kernel *shp;
Robin Holtd69f3ba2013-04-30 19:15:54 -0700577 size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
Manfred Spraul239521f2014-01-27 17:07:04 -0800578 struct file *file;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579 char name[13];
KOSAKI Motohiroca16d142011-05-26 19:16:19 +0900580 vm_flags_t acctflag = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581
Kirill Korotaev4e982312006-10-02 02:18:22 -0700582 if (size < SHMMIN || size > ns->shm_ctlmax)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583 return -EINVAL;
584
Manfred Spraul13763272014-06-06 14:37:41 -0700585 if (numpages << PAGE_SHIFT < size)
586 return -ENOSPC;
587
Manfred Spraul09c6eb12014-06-06 14:37:40 -0700588 if (ns->shm_tot + numpages < ns->shm_tot ||
589 ns->shm_tot + numpages > ns->shm_ctlall)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590 return -ENOSPC;
591
Kees Cook42e618f2017-07-12 14:35:25 -0700592 shp = kvmalloc(sizeof(*shp), GFP_KERNEL);
593 if (unlikely(!shp))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594 return -ENOMEM;
595
596 shp->shm_perm.key = key;
Andrew Mortonb33291c2006-01-08 01:02:21 -0800597 shp->shm_perm.mode = (shmflg & S_IRWXUGO);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598 shp->mlock_user = NULL;
599
600 shp->shm_perm.security = NULL;
Eric W. Biederman7191adf2018-03-22 21:08:27 -0500601 error = security_shm_alloc(&shp->shm_perm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602 if (error) {
Kees Cook42e618f2017-07-12 14:35:25 -0700603 kvfree(shp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604 return error;
605 }
606
Manfred Spraul239521f2014-01-27 17:07:04 -0800607 sprintf(name, "SYSV%08x", key);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608 if (shmflg & SHM_HUGETLB) {
Andrew Mortonc103a4d2013-07-08 16:01:08 -0700609 struct hstate *hs;
Li Zefan091d0d52013-05-09 15:08:15 +0800610 size_t hugesize;
611
Andrew Mortonc103a4d2013-07-08 16:01:08 -0700612 hs = hstate_sizelog((shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
Li Zefan091d0d52013-05-09 15:08:15 +0800613 if (!hs) {
614 error = -EINVAL;
615 goto no_file;
616 }
617 hugesize = ALIGN(size, huge_page_size(hs));
Naoya Horiguchiaf73e4d2013-05-07 16:18:13 -0700618
Mel Gorman5a6fe122009-02-10 14:02:27 +0000619 /* hugetlb_file_setup applies strict accounting */
620 if (shmflg & SHM_NORESERVE)
621 acctflag = VM_NORESERVE;
Naoya Horiguchiaf73e4d2013-05-07 16:18:13 -0700622 file = hugetlb_file_setup(name, hugesize, acctflag,
Andi Kleen42d73952012-12-11 16:01:34 -0800623 &shp->mlock_user, HUGETLB_SHMFS_INODE,
624 (shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625 } else {
Badari Pulavartybf8f9722005-11-07 00:59:27 -0800626 /*
627 * Do not allow no accounting for OVERCOMMIT_NEVER, even
Manfred Spraul239521f2014-01-27 17:07:04 -0800628 * if it's asked for.
Badari Pulavartybf8f9722005-11-07 00:59:27 -0800629 */
630 if ((shmflg & SHM_NORESERVE) &&
631 sysctl_overcommit_memory != OVERCOMMIT_NEVER)
Linus Torvaldsfc8744a2009-01-31 15:08:56 -0800632 acctflag = VM_NORESERVE;
Stephen Smalleye1832f22015-08-06 15:46:55 -0700633 file = shmem_kernel_file_setup(name, size, acctflag);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634 }
635 error = PTR_ERR(file);
636 if (IS_ERR(file))
637 goto no_file;
638
Eric W. Biederman98f929b2018-03-23 00:29:57 -0500639 shp->shm_cprid = get_pid(task_tgid(current));
640 shp->shm_lprid = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641 shp->shm_atim = shp->shm_dtim = 0;
Deepa Dinamani7ff28192017-08-02 19:51:14 -0700642 shp->shm_ctim = ktime_get_real_seconds();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643 shp->shm_segsz = size;
644 shp->shm_nattch = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645 shp->shm_file = file;
Vasiliy Kulikov5774ed02011-07-29 03:55:31 +0400646 shp->shm_creator = current;
Linus Torvaldsb9a53222015-09-30 12:48:40 -0400647
Davidlohr Bueso39c96a12017-11-17 15:31:11 -0800648 /* ipc_addid() locks shp upon success. */
Manfred Spraula2642f82017-07-12 14:35:16 -0700649 error = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
650 if (error < 0)
Linus Torvaldsb9a53222015-09-30 12:48:40 -0400651 goto no_id;
Linus Torvaldsb9a53222015-09-30 12:48:40 -0400652
Jack Millerab602f72014-08-08 14:23:19 -0700653 list_add(&shp->shm_clist, &current->sysvshm.shm_clist);
Davidlohr Buesodbfcd912013-07-08 16:01:09 -0700654
Badari Pulavarty30475cc2007-06-16 10:15:59 -0700655 /*
656 * shmid gets reported as "inode#" in /proc/pid/maps.
657 * proc-ps tools use this. Changing this will break them.
658 */
Al Viro496ad9a2013-01-23 17:07:38 -0500659 file_inode(file)->i_ino = shp->shm_perm.id;
Krishnakumar R551110a2005-10-29 18:16:45 -0700660
Kirill Korotaev4e982312006-10-02 02:18:22 -0700661 ns->shm_tot += numpages;
Nadia Derbey7ca7e562007-10-18 23:40:48 -0700662 error = shp->shm_perm.id;
Davidlohr Buesodbfcd912013-07-08 16:01:09 -0700663
Davidlohr Buesocf9d5d72013-07-08 16:01:11 -0700664 ipc_unlock_object(&shp->shm_perm);
Davidlohr Buesodbfcd912013-07-08 16:01:09 -0700665 rcu_read_unlock();
Nadia Derbey7ca7e562007-10-18 23:40:48 -0700666 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667
668no_id:
Eric W. Biederman2236d4d2018-03-28 13:38:55 -0500669 ipc_update_pid(&shp->shm_cprid, NULL);
670 ipc_update_pid(&shp->shm_lprid, NULL);
Hugh Dickins2195d282009-09-12 12:21:27 +0100671 if (is_file_hugepages(file) && shp->mlock_user)
Hugh Dickins353d5c32009-08-24 16:30:28 +0100672 user_shm_unlock(size, shp->mlock_user);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673 fput(file);
674no_file:
Manfred Spraula2642f82017-07-12 14:35:16 -0700675 call_rcu(&shp->shm_perm.rcu, shm_rcu_free);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676 return error;
677}
678
Nadia Derbeyf4566f02007-10-18 23:40:53 -0700679/*
Davidlohr Buesod9a605e2013-09-11 14:26:24 -0700680 * Called with shm_ids.rwsem and ipcp locked.
Nadia Derbeyf4566f02007-10-18 23:40:53 -0700681 */
Nadia Derbey03f02c72007-10-18 23:40:51 -0700682static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
683 struct ipc_params *params)
Nadia Derbey7748dbf2007-10-18 23:40:49 -0700684{
Nadia Derbey03f02c72007-10-18 23:40:51 -0700685 struct shmid_kernel *shp;
686
687 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
688 if (shp->shm_segsz < params->u.size)
Nadia Derbey7748dbf2007-10-18 23:40:49 -0700689 return -EINVAL;
690
691 return 0;
692}
693
Dominik Brodowski65749e02018-03-20 20:07:53 +0100694long ksys_shmget(key_t key, size_t size, int shmflg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695{
Kirill Korotaev4e982312006-10-02 02:18:22 -0700696 struct ipc_namespace *ns;
Mathias Krauseeb66ec42014-06-06 14:37:36 -0700697 static const struct ipc_ops shm_ops = {
698 .getnew = newseg,
Eric W. Biederman50ab44b2018-03-23 23:41:55 -0500699 .associate = security_shm_associate,
Mathias Krauseeb66ec42014-06-06 14:37:36 -0700700 .more_checks = shm_more_checks,
701 };
Nadia Derbey7748dbf2007-10-18 23:40:49 -0700702 struct ipc_params shm_params;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703
Kirill Korotaev4e982312006-10-02 02:18:22 -0700704 ns = current->nsproxy->ipc_ns;
705
Nadia Derbey7748dbf2007-10-18 23:40:49 -0700706 shm_params.key = key;
707 shm_params.flg = shmflg;
708 shm_params.u.size = size;
Nadia Derbey7ca7e562007-10-18 23:40:48 -0700709
Nadia Derbey7748dbf2007-10-18 23:40:49 -0700710 return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711}
712
Dominik Brodowski65749e02018-03-20 20:07:53 +0100713SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
714{
715 return ksys_shmget(key, size, shmflg);
716}
717
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
719{
Manfred Spraul239521f2014-01-27 17:07:04 -0800720 switch (version) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721 case IPC_64:
722 return copy_to_user(buf, in, sizeof(*in));
723 case IPC_OLD:
724 {
725 struct shmid_ds out;
726
Vasiliy Kulikov3af54c92010-10-30 18:22:49 +0400727 memset(&out, 0, sizeof(out));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728 ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
729 out.shm_segsz = in->shm_segsz;
730 out.shm_atime = in->shm_atime;
731 out.shm_dtime = in->shm_dtime;
732 out.shm_ctime = in->shm_ctime;
733 out.shm_cpid = in->shm_cpid;
734 out.shm_lpid = in->shm_lpid;
735 out.shm_nattch = in->shm_nattch;
736
737 return copy_to_user(buf, &out, sizeof(out));
738 }
739 default:
740 return -EINVAL;
741 }
742}
743
Pierre Peiffer016d7132008-04-29 01:00:50 -0700744static inline unsigned long
745copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746{
Manfred Spraul239521f2014-01-27 17:07:04 -0800747 switch (version) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748 case IPC_64:
Pierre Peiffer016d7132008-04-29 01:00:50 -0700749 if (copy_from_user(out, buf, sizeof(*out)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752 case IPC_OLD:
753 {
754 struct shmid_ds tbuf_old;
755
756 if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
757 return -EFAULT;
758
Pierre Peiffer016d7132008-04-29 01:00:50 -0700759 out->shm_perm.uid = tbuf_old.shm_perm.uid;
760 out->shm_perm.gid = tbuf_old.shm_perm.gid;
761 out->shm_perm.mode = tbuf_old.shm_perm.mode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762
763 return 0;
764 }
765 default:
766 return -EINVAL;
767 }
768}
769
770static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
771{
Manfred Spraul239521f2014-01-27 17:07:04 -0800772 switch (version) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773 case IPC_64:
774 return copy_to_user(buf, in, sizeof(*in));
775 case IPC_OLD:
776 {
777 struct shminfo out;
778
Manfred Spraul239521f2014-01-27 17:07:04 -0800779 if (in->shmmax > INT_MAX)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780 out.shmmax = INT_MAX;
781 else
782 out.shmmax = (int)in->shmmax;
783
784 out.shmmin = in->shmmin;
785 out.shmmni = in->shmmni;
786 out.shmseg = in->shmseg;
Paul McQuade46c0a8c2014-06-06 14:37:37 -0700787 out.shmall = in->shmall;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788
789 return copy_to_user(buf, &out, sizeof(out));
790 }
791 default:
792 return -EINVAL;
793 }
794}
795
Nadia Derbeyf4566f02007-10-18 23:40:53 -0700796/*
Helge Dellerb7952182010-10-27 15:34:16 -0700797 * Calculate and add used RSS and swap pages of a shm.
Davidlohr Buesod9a605e2013-09-11 14:26:24 -0700798 * Called with shm_ids.rwsem held as a reader
Helge Dellerb7952182010-10-27 15:34:16 -0700799 */
800static void shm_add_rss_swap(struct shmid_kernel *shp,
801 unsigned long *rss_add, unsigned long *swp_add)
802{
803 struct inode *inode;
804
Al Viro496ad9a2013-01-23 17:07:38 -0500805 inode = file_inode(shp->shm_file);
Helge Dellerb7952182010-10-27 15:34:16 -0700806
807 if (is_file_hugepages(shp->shm_file)) {
808 struct address_space *mapping = inode->i_mapping;
809 struct hstate *h = hstate_file(shp->shm_file);
810 *rss_add += pages_per_huge_page(h) * mapping->nrpages;
811 } else {
812#ifdef CONFIG_SHMEM
813 struct shmem_inode_info *info = SHMEM_I(inode);
Shailesh Pandey63980c82016-12-14 15:06:10 -0800814
Kirill A. Shutemov4595ef82016-07-26 15:26:29 -0700815 spin_lock_irq(&info->lock);
Helge Dellerb7952182010-10-27 15:34:16 -0700816 *rss_add += inode->i_mapping->nrpages;
817 *swp_add += info->swapped;
Kirill A. Shutemov4595ef82016-07-26 15:26:29 -0700818 spin_unlock_irq(&info->lock);
Helge Dellerb7952182010-10-27 15:34:16 -0700819#else
820 *rss_add += inode->i_mapping->nrpages;
821#endif
822 }
823}
824
825/*
Davidlohr Buesod9a605e2013-09-11 14:26:24 -0700826 * Called with shm_ids.rwsem held as a reader
Nadia Derbeyf4566f02007-10-18 23:40:53 -0700827 */
Kirill Korotaev4e982312006-10-02 02:18:22 -0700828static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
829 unsigned long *swp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830{
Nadia Derbey7ca7e562007-10-18 23:40:48 -0700831 int next_id;
832 int total, in_use;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833
834 *rss = 0;
835 *swp = 0;
836
Nadia Derbey7ca7e562007-10-18 23:40:48 -0700837 in_use = shm_ids(ns).in_use;
838
839 for (total = 0, next_id = 0; total < in_use; next_id++) {
Tony Battersbye562aeb2009-04-02 16:58:26 -0700840 struct kern_ipc_perm *ipc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841 struct shmid_kernel *shp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842
Tony Battersbye562aeb2009-04-02 16:58:26 -0700843 ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id);
844 if (ipc == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845 continue;
Tony Battersbye562aeb2009-04-02 16:58:26 -0700846 shp = container_of(ipc, struct shmid_kernel, shm_perm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847
Helge Dellerb7952182010-10-27 15:34:16 -0700848 shm_add_rss_swap(shp, rss, swp);
Nadia Derbey7ca7e562007-10-18 23:40:48 -0700849
850 total++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851 }
852}
853
Pierre Peiffer8d4cc8b2008-04-29 01:00:47 -0700854/*
Davidlohr Buesod9a605e2013-09-11 14:26:24 -0700855 * This function handles some shmctl commands which require the rwsem
Pierre Peiffer8d4cc8b2008-04-29 01:00:47 -0700856 * to be held in write mode.
Davidlohr Buesod9a605e2013-09-11 14:26:24 -0700857 * NOTE: no locks must be held, the rwsem is taken inside this function.
Pierre Peiffer8d4cc8b2008-04-29 01:00:47 -0700858 */
859static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
Al Viro9ba720c2017-07-08 20:58:06 -0400860 struct shmid64_ds *shmid64)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861{
Pierre Peiffer8d4cc8b2008-04-29 01:00:47 -0700862 struct kern_ipc_perm *ipcp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863 struct shmid_kernel *shp;
Pierre Peiffer8d4cc8b2008-04-29 01:00:47 -0700864 int err;
865
Davidlohr Buesod9a605e2013-09-11 14:26:24 -0700866 down_write(&shm_ids(ns).rwsem);
Davidlohr Bueso7b4cc5d2013-07-08 16:01:12 -0700867 rcu_read_lock();
868
Davidlohr Bueso79ccf0f2013-09-11 14:26:16 -0700869 ipcp = ipcctl_pre_down_nolock(ns, &shm_ids(ns), shmid, cmd,
Al Viro9ba720c2017-07-08 20:58:06 -0400870 &shmid64->shm_perm, 0);
Davidlohr Bueso7b4cc5d2013-07-08 16:01:12 -0700871 if (IS_ERR(ipcp)) {
872 err = PTR_ERR(ipcp);
Davidlohr Bueso7b4cc5d2013-07-08 16:01:12 -0700873 goto out_unlock1;
874 }
Pierre Peiffer8d4cc8b2008-04-29 01:00:47 -0700875
Pierre Peiffera5f75e72008-04-29 01:00:54 -0700876 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
Pierre Peiffer8d4cc8b2008-04-29 01:00:47 -0700877
Eric W. Biederman7191adf2018-03-22 21:08:27 -0500878 err = security_shm_shmctl(&shp->shm_perm, cmd);
Pierre Peiffer8d4cc8b2008-04-29 01:00:47 -0700879 if (err)
Davidlohr Bueso79ccf0f2013-09-11 14:26:16 -0700880 goto out_unlock1;
Davidlohr Bueso7b4cc5d2013-07-08 16:01:12 -0700881
Pierre Peiffer8d4cc8b2008-04-29 01:00:47 -0700882 switch (cmd) {
883 case IPC_RMID:
Davidlohr Bueso79ccf0f2013-09-11 14:26:16 -0700884 ipc_lock_object(&shp->shm_perm);
Davidlohr Bueso7b4cc5d2013-07-08 16:01:12 -0700885 /* do_shm_rmid unlocks the ipc object and rcu */
Pierre Peiffer8d4cc8b2008-04-29 01:00:47 -0700886 do_shm_rmid(ns, ipcp);
887 goto out_up;
888 case IPC_SET:
Davidlohr Bueso79ccf0f2013-09-11 14:26:16 -0700889 ipc_lock_object(&shp->shm_perm);
Al Viro9ba720c2017-07-08 20:58:06 -0400890 err = ipc_update_perm(&shmid64->shm_perm, ipcp);
Eric W. Biederman1efdb692012-02-07 16:54:11 -0800891 if (err)
Davidlohr Bueso7b4cc5d2013-07-08 16:01:12 -0700892 goto out_unlock0;
Deepa Dinamani7ff28192017-08-02 19:51:14 -0700893 shp->shm_ctim = ktime_get_real_seconds();
Pierre Peiffer8d4cc8b2008-04-29 01:00:47 -0700894 break;
895 default:
896 err = -EINVAL;
Davidlohr Bueso79ccf0f2013-09-11 14:26:16 -0700897 goto out_unlock1;
Pierre Peiffer8d4cc8b2008-04-29 01:00:47 -0700898 }
Davidlohr Bueso7b4cc5d2013-07-08 16:01:12 -0700899
900out_unlock0:
901 ipc_unlock_object(&shp->shm_perm);
902out_unlock1:
903 rcu_read_unlock();
Pierre Peiffer8d4cc8b2008-04-29 01:00:47 -0700904out_up:
Davidlohr Buesod9a605e2013-09-11 14:26:24 -0700905 up_write(&shm_ids(ns).rwsem);
Pierre Peiffer8d4cc8b2008-04-29 01:00:47 -0700906 return err;
907}
908
Al Viro9ba720c2017-07-08 20:58:06 -0400909static int shmctl_ipc_info(struct ipc_namespace *ns,
910 struct shminfo64 *shminfo)
Pierre Peiffer8d4cc8b2008-04-29 01:00:47 -0700911{
Al Viro9ba720c2017-07-08 20:58:06 -0400912 int err = security_shm_shmctl(NULL, IPC_INFO);
913 if (!err) {
914 memset(shminfo, 0, sizeof(*shminfo));
915 shminfo->shmmni = shminfo->shmseg = ns->shm_ctlmni;
916 shminfo->shmmax = ns->shm_ctlmax;
917 shminfo->shmall = ns->shm_ctlall;
918 shminfo->shmmin = SHMMIN;
Davidlohr Buesod9a605e2013-09-11 14:26:24 -0700919 down_read(&shm_ids(ns).rwsem);
Nadia Derbey7ca7e562007-10-18 23:40:48 -0700920 err = ipc_get_maxid(&shm_ids(ns));
Davidlohr Buesod9a605e2013-09-11 14:26:24 -0700921 up_read(&shm_ids(ns).rwsem);
Manfred Spraul239521f2014-01-27 17:07:04 -0800922 if (err < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923 err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924 }
Al Viro9ba720c2017-07-08 20:58:06 -0400925 return err;
926}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927
Al Viro9ba720c2017-07-08 20:58:06 -0400928static int shmctl_shm_info(struct ipc_namespace *ns,
929 struct shm_info *shm_info)
930{
931 int err = security_shm_shmctl(NULL, SHM_INFO);
932 if (!err) {
933 memset(shm_info, 0, sizeof(*shm_info));
Davidlohr Buesod9a605e2013-09-11 14:26:24 -0700934 down_read(&shm_ids(ns).rwsem);
Al Viro9ba720c2017-07-08 20:58:06 -0400935 shm_info->used_ids = shm_ids(ns).in_use;
936 shm_get_stat(ns, &shm_info->shm_rss, &shm_info->shm_swp);
937 shm_info->shm_tot = ns->shm_tot;
938 shm_info->swap_attempts = 0;
939 shm_info->swap_successes = 0;
Nadia Derbey7ca7e562007-10-18 23:40:48 -0700940 err = ipc_get_maxid(&shm_ids(ns));
Davidlohr Buesod9a605e2013-09-11 14:26:24 -0700941 up_read(&shm_ids(ns).rwsem);
Al Viro9ba720c2017-07-08 20:58:06 -0400942 if (err < 0)
943 err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944 }
Al Viro9ba720c2017-07-08 20:58:06 -0400945 return err;
946}
Nadia Derbey023a5352007-10-18 23:40:51 -0700947
Al Viro9ba720c2017-07-08 20:58:06 -0400948static int shmctl_stat(struct ipc_namespace *ns, int shmid,
949 int cmd, struct shmid64_ds *tbuf)
950{
951 struct shmid_kernel *shp;
Philippe Mikoyan87ad4b02018-02-06 15:40:49 -0800952 int id = 0;
Al Viro9ba720c2017-07-08 20:58:06 -0400953 int err;
Davidlohr Buesoc97cb9c2013-09-11 14:26:20 -0700954
Philippe Mikoyan87ad4b02018-02-06 15:40:49 -0800955 memset(tbuf, 0, sizeof(*tbuf));
956
Al Viro9ba720c2017-07-08 20:58:06 -0400957 rcu_read_lock();
Davidlohr Buesoc21a6972018-04-10 16:35:23 -0700958 if (cmd == SHM_STAT || cmd == SHM_STAT_ANY) {
Al Viro9ba720c2017-07-08 20:58:06 -0400959 shp = shm_obtain_object(ns, shmid);
960 if (IS_ERR(shp)) {
961 err = PTR_ERR(shp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962 goto out_unlock;
Al Viro9ba720c2017-07-08 20:58:06 -0400963 }
Philippe Mikoyan87ad4b02018-02-06 15:40:49 -0800964 id = shp->shm_perm.id;
Davidlohr Buesoc21a6972018-04-10 16:35:23 -0700965 } else { /* IPC_STAT */
Al Viro9ba720c2017-07-08 20:58:06 -0400966 shp = shm_obtain_object_check(ns, shmid);
967 if (IS_ERR(shp)) {
968 err = PTR_ERR(shp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969 goto out_unlock;
Al Viro9ba720c2017-07-08 20:58:06 -0400970 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971 }
Al Viro9ba720c2017-07-08 20:58:06 -0400972
Davidlohr Buesoc21a6972018-04-10 16:35:23 -0700973 /*
974 * Semantically SHM_STAT_ANY ought to be identical to
975 * that functionality provided by the /proc/sysvipc/
976 * interface. As such, only audit these calls and
977 * do not do traditional S_IRUGO permission checks on
978 * the ipc object.
979 */
980 if (cmd == SHM_STAT_ANY)
981 audit_ipc_obj(&shp->shm_perm);
982 else {
983 err = -EACCES;
984 if (ipcperms(ns, &shp->shm_perm, S_IRUGO))
985 goto out_unlock;
986 }
Al Viro9ba720c2017-07-08 20:58:06 -0400987
Eric W. Biederman7191adf2018-03-22 21:08:27 -0500988 err = security_shm_shmctl(&shp->shm_perm, cmd);
Al Viro9ba720c2017-07-08 20:58:06 -0400989 if (err)
990 goto out_unlock;
991
Philippe Mikoyan87ad4b02018-02-06 15:40:49 -0800992 ipc_lock_object(&shp->shm_perm);
993
994 if (!ipc_valid_object(&shp->shm_perm)) {
995 ipc_unlock_object(&shp->shm_perm);
996 err = -EIDRM;
997 goto out_unlock;
998 }
999
Al Viro9ba720c2017-07-08 20:58:06 -04001000 kernel_to_ipc64_perm(&shp->shm_perm, &tbuf->shm_perm);
1001 tbuf->shm_segsz = shp->shm_segsz;
1002 tbuf->shm_atime = shp->shm_atim;
1003 tbuf->shm_dtime = shp->shm_dtim;
1004 tbuf->shm_ctime = shp->shm_ctim;
Arnd Bergmannc2ab9752015-04-28 21:39:50 +02001005#ifndef CONFIG_64BIT
1006 tbuf->shm_atime_high = shp->shm_atim >> 32;
1007 tbuf->shm_dtime_high = shp->shm_dtim >> 32;
1008 tbuf->shm_ctime_high = shp->shm_ctim >> 32;
1009#endif
Eric W. Biederman98f929b2018-03-23 00:29:57 -05001010 tbuf->shm_cpid = pid_vnr(shp->shm_cprid);
1011 tbuf->shm_lpid = pid_vnr(shp->shm_lprid);
Al Viro9ba720c2017-07-08 20:58:06 -04001012 tbuf->shm_nattch = shp->shm_nattch;
Philippe Mikoyan87ad4b02018-02-06 15:40:49 -08001013
1014 ipc_unlock_object(&shp->shm_perm);
Al Viro9ba720c2017-07-08 20:58:06 -04001015 rcu_read_unlock();
Philippe Mikoyan87ad4b02018-02-06 15:40:49 -08001016 return id;
Davidlohr Bueso68eccc12013-09-11 14:26:18 -07001017
1018out_unlock:
Davidlohr Buesoc97cb9c2013-09-11 14:26:20 -07001019 rcu_read_unlock();
Al Viro9ba720c2017-07-08 20:58:06 -04001020 return err;
1021}
1022
1023static int shmctl_do_lock(struct ipc_namespace *ns, int shmid, int cmd)
1024{
1025 struct shmid_kernel *shp;
1026 struct file *shm_file;
1027 int err;
1028
1029 rcu_read_lock();
1030 shp = shm_obtain_object_check(ns, shmid);
1031 if (IS_ERR(shp)) {
1032 err = PTR_ERR(shp);
1033 goto out_unlock1;
1034 }
1035
1036 audit_ipc_obj(&(shp->shm_perm));
Eric W. Biederman7191adf2018-03-22 21:08:27 -05001037 err = security_shm_shmctl(&shp->shm_perm, cmd);
Al Viro9ba720c2017-07-08 20:58:06 -04001038 if (err)
1039 goto out_unlock1;
1040
1041 ipc_lock_object(&shp->shm_perm);
1042
1043 /* check if shm_destroy() is tearing down shp */
1044 if (!ipc_valid_object(&shp->shm_perm)) {
1045 err = -EIDRM;
1046 goto out_unlock0;
1047 }
1048
1049 if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) {
1050 kuid_t euid = current_euid();
1051
1052 if (!uid_eq(euid, shp->shm_perm.uid) &&
1053 !uid_eq(euid, shp->shm_perm.cuid)) {
1054 err = -EPERM;
1055 goto out_unlock0;
1056 }
1057 if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK)) {
1058 err = -EPERM;
1059 goto out_unlock0;
1060 }
1061 }
1062
1063 shm_file = shp->shm_file;
1064 if (is_file_hugepages(shm_file))
1065 goto out_unlock0;
1066
1067 if (cmd == SHM_LOCK) {
1068 struct user_struct *user = current_user();
1069
1070 err = shmem_lock(shm_file, 1, user);
1071 if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) {
1072 shp->shm_perm.mode |= SHM_LOCKED;
1073 shp->mlock_user = user;
1074 }
1075 goto out_unlock0;
1076 }
1077
1078 /* SHM_UNLOCK */
1079 if (!(shp->shm_perm.mode & SHM_LOCKED))
1080 goto out_unlock0;
1081 shmem_lock(shm_file, 0, shp->mlock_user);
1082 shp->shm_perm.mode &= ~SHM_LOCKED;
1083 shp->mlock_user = NULL;
1084 get_file(shm_file);
1085 ipc_unlock_object(&shp->shm_perm);
1086 rcu_read_unlock();
1087 shmem_unlock_mapping(shm_file->f_mapping);
1088
1089 fput(shm_file);
1090 return err;
1091
1092out_unlock0:
1093 ipc_unlock_object(&shp->shm_perm);
1094out_unlock1:
1095 rcu_read_unlock();
Davidlohr Bueso68eccc12013-09-11 14:26:18 -07001096 return err;
1097}
1098
Dominik Brodowskic84d0792018-03-20 20:12:33 +01001099long ksys_shmctl(int shmid, int cmd, struct shmid_ds __user *buf)
Davidlohr Bueso68eccc12013-09-11 14:26:18 -07001100{
Davidlohr Bueso68eccc12013-09-11 14:26:18 -07001101 int err, version;
1102 struct ipc_namespace *ns;
Al Viro553f7702017-07-08 22:52:47 -04001103 struct shmid64_ds sem64;
Davidlohr Bueso68eccc12013-09-11 14:26:18 -07001104
Davidlohr Bueso2caacaa2013-09-11 14:26:21 -07001105 if (cmd < 0 || shmid < 0)
1106 return -EINVAL;
Davidlohr Bueso68eccc12013-09-11 14:26:18 -07001107
1108 version = ipc_parse_version(&cmd);
1109 ns = current->nsproxy->ipc_ns;
1110
1111 switch (cmd) {
Al Viro9ba720c2017-07-08 20:58:06 -04001112 case IPC_INFO: {
1113 struct shminfo64 shminfo;
1114 err = shmctl_ipc_info(ns, &shminfo);
1115 if (err < 0)
1116 return err;
1117 if (copy_shminfo_to_user(buf, &shminfo, version))
1118 err = -EFAULT;
Pierre Peiffer8d4cc8b2008-04-29 01:00:47 -07001119 return err;
Davidlohr Bueso2caacaa2013-09-11 14:26:21 -07001120 }
Al Viro9ba720c2017-07-08 20:58:06 -04001121 case SHM_INFO: {
1122 struct shm_info shm_info;
1123 err = shmctl_shm_info(ns, &shm_info);
1124 if (err < 0)
1125 return err;
1126 if (copy_to_user(buf, &shm_info, sizeof(shm_info)))
1127 err = -EFAULT;
1128 return err;
1129 }
1130 case SHM_STAT:
Davidlohr Buesoc21a6972018-04-10 16:35:23 -07001131 case SHM_STAT_ANY:
Al Viro9ba720c2017-07-08 20:58:06 -04001132 case IPC_STAT: {
Al Viro553f7702017-07-08 22:52:47 -04001133 err = shmctl_stat(ns, shmid, cmd, &sem64);
Al Viro9ba720c2017-07-08 20:58:06 -04001134 if (err < 0)
1135 return err;
Al Viro553f7702017-07-08 22:52:47 -04001136 if (copy_shmid_to_user(buf, &sem64, version))
Al Viro9ba720c2017-07-08 20:58:06 -04001137 err = -EFAULT;
1138 return err;
1139 }
1140 case IPC_SET:
Al Viro553f7702017-07-08 22:52:47 -04001141 if (copy_shmid_from_user(&sem64, buf, version))
Al Viro9ba720c2017-07-08 20:58:06 -04001142 return -EFAULT;
Al Viro553f7702017-07-08 22:52:47 -04001143 /* fallthru */
Al Viro9ba720c2017-07-08 20:58:06 -04001144 case IPC_RMID:
Al Viro553f7702017-07-08 22:52:47 -04001145 return shmctl_down(ns, shmid, cmd, &sem64);
Al Viro9ba720c2017-07-08 20:58:06 -04001146 case SHM_LOCK:
1147 case SHM_UNLOCK:
1148 return shmctl_do_lock(ns, shmid, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001149 default:
Pierre Peiffer8d4cc8b2008-04-29 01:00:47 -07001150 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001151 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001152}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153
Dominik Brodowskic84d0792018-03-20 20:12:33 +01001154SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
1155{
1156 return ksys_shmctl(shmid, cmd, buf);
1157}
1158
Al Viro553f7702017-07-08 22:52:47 -04001159#ifdef CONFIG_COMPAT
1160
1161struct compat_shmid_ds {
1162 struct compat_ipc_perm shm_perm;
1163 int shm_segsz;
1164 compat_time_t shm_atime;
1165 compat_time_t shm_dtime;
1166 compat_time_t shm_ctime;
1167 compat_ipc_pid_t shm_cpid;
1168 compat_ipc_pid_t shm_lpid;
1169 unsigned short shm_nattch;
1170 unsigned short shm_unused;
1171 compat_uptr_t shm_unused2;
1172 compat_uptr_t shm_unused3;
1173};
1174
1175struct compat_shminfo64 {
1176 compat_ulong_t shmmax;
1177 compat_ulong_t shmmin;
1178 compat_ulong_t shmmni;
1179 compat_ulong_t shmseg;
1180 compat_ulong_t shmall;
1181 compat_ulong_t __unused1;
1182 compat_ulong_t __unused2;
1183 compat_ulong_t __unused3;
1184 compat_ulong_t __unused4;
1185};
1186
1187struct compat_shm_info {
1188 compat_int_t used_ids;
1189 compat_ulong_t shm_tot, shm_rss, shm_swp;
1190 compat_ulong_t swap_attempts, swap_successes;
1191};
1192
1193static int copy_compat_shminfo_to_user(void __user *buf, struct shminfo64 *in,
1194 int version)
1195{
1196 if (in->shmmax > INT_MAX)
1197 in->shmmax = INT_MAX;
1198 if (version == IPC_64) {
1199 struct compat_shminfo64 info;
1200 memset(&info, 0, sizeof(info));
1201 info.shmmax = in->shmmax;
1202 info.shmmin = in->shmmin;
1203 info.shmmni = in->shmmni;
1204 info.shmseg = in->shmseg;
1205 info.shmall = in->shmall;
1206 return copy_to_user(buf, &info, sizeof(info));
1207 } else {
1208 struct shminfo info;
1209 memset(&info, 0, sizeof(info));
1210 info.shmmax = in->shmmax;
1211 info.shmmin = in->shmmin;
1212 info.shmmni = in->shmmni;
1213 info.shmseg = in->shmseg;
1214 info.shmall = in->shmall;
1215 return copy_to_user(buf, &info, sizeof(info));
1216 }
1217}
1218
1219static int put_compat_shm_info(struct shm_info *ip,
1220 struct compat_shm_info __user *uip)
1221{
1222 struct compat_shm_info info;
1223
1224 memset(&info, 0, sizeof(info));
1225 info.used_ids = ip->used_ids;
1226 info.shm_tot = ip->shm_tot;
1227 info.shm_rss = ip->shm_rss;
1228 info.shm_swp = ip->shm_swp;
1229 info.swap_attempts = ip->swap_attempts;
1230 info.swap_successes = ip->swap_successes;
Al Virob776e4b2017-09-25 20:38:45 -04001231 return copy_to_user(uip, &info, sizeof(info));
Al Viro553f7702017-07-08 22:52:47 -04001232}
1233
1234static int copy_compat_shmid_to_user(void __user *buf, struct shmid64_ds *in,
1235 int version)
1236{
1237 if (version == IPC_64) {
1238 struct compat_shmid64_ds v;
1239 memset(&v, 0, sizeof(v));
Al Viro28327fa2017-07-09 10:10:32 -04001240 to_compat_ipc64_perm(&v.shm_perm, &in->shm_perm);
Arnd Bergmannc2ab9752015-04-28 21:39:50 +02001241 v.shm_atime = lower_32_bits(in->shm_atime);
1242 v.shm_atime_high = upper_32_bits(in->shm_atime);
1243 v.shm_dtime = lower_32_bits(in->shm_dtime);
1244 v.shm_dtime_high = upper_32_bits(in->shm_dtime);
1245 v.shm_ctime = lower_32_bits(in->shm_ctime);
1246 v.shm_ctime_high = upper_32_bits(in->shm_ctime);
Al Viro553f7702017-07-08 22:52:47 -04001247 v.shm_segsz = in->shm_segsz;
1248 v.shm_nattch = in->shm_nattch;
1249 v.shm_cpid = in->shm_cpid;
1250 v.shm_lpid = in->shm_lpid;
1251 return copy_to_user(buf, &v, sizeof(v));
1252 } else {
1253 struct compat_shmid_ds v;
1254 memset(&v, 0, sizeof(v));
Al Viro28327fa2017-07-09 10:10:32 -04001255 to_compat_ipc_perm(&v.shm_perm, &in->shm_perm);
Al Viro553f7702017-07-08 22:52:47 -04001256 v.shm_perm.key = in->shm_perm.key;
Al Viro553f7702017-07-08 22:52:47 -04001257 v.shm_atime = in->shm_atime;
1258 v.shm_dtime = in->shm_dtime;
1259 v.shm_ctime = in->shm_ctime;
1260 v.shm_segsz = in->shm_segsz;
1261 v.shm_nattch = in->shm_nattch;
1262 v.shm_cpid = in->shm_cpid;
1263 v.shm_lpid = in->shm_lpid;
1264 return copy_to_user(buf, &v, sizeof(v));
1265 }
1266}
1267
1268static int copy_compat_shmid_from_user(struct shmid64_ds *out, void __user *buf,
1269 int version)
1270{
1271 memset(out, 0, sizeof(*out));
1272 if (version == IPC_64) {
Linus Torvalds6aa211e2017-09-25 18:37:28 -07001273 struct compat_shmid64_ds __user *p = buf;
Al Viro28327fa2017-07-09 10:10:32 -04001274 return get_compat_ipc64_perm(&out->shm_perm, &p->shm_perm);
Al Viro553f7702017-07-08 22:52:47 -04001275 } else {
Linus Torvalds6aa211e2017-09-25 18:37:28 -07001276 struct compat_shmid_ds __user *p = buf;
Al Viro28327fa2017-07-09 10:10:32 -04001277 return get_compat_ipc_perm(&out->shm_perm, &p->shm_perm);
Al Viro553f7702017-07-08 22:52:47 -04001278 }
Al Viro553f7702017-07-08 22:52:47 -04001279}
1280
Dominik Brodowskic84d0792018-03-20 20:12:33 +01001281long compat_ksys_shmctl(int shmid, int cmd, void __user *uptr)
Al Viro553f7702017-07-08 22:52:47 -04001282{
1283 struct ipc_namespace *ns;
1284 struct shmid64_ds sem64;
1285 int version = compat_ipc_parse_version(&cmd);
1286 int err;
1287
1288 ns = current->nsproxy->ipc_ns;
1289
1290 if (cmd < 0 || shmid < 0)
1291 return -EINVAL;
1292
1293 switch (cmd) {
1294 case IPC_INFO: {
1295 struct shminfo64 shminfo;
1296 err = shmctl_ipc_info(ns, &shminfo);
1297 if (err < 0)
1298 return err;
1299 if (copy_compat_shminfo_to_user(uptr, &shminfo, version))
1300 err = -EFAULT;
1301 return err;
1302 }
1303 case SHM_INFO: {
1304 struct shm_info shm_info;
1305 err = shmctl_shm_info(ns, &shm_info);
1306 if (err < 0)
1307 return err;
1308 if (put_compat_shm_info(&shm_info, uptr))
1309 err = -EFAULT;
1310 return err;
1311 }
1312 case IPC_STAT:
Davidlohr Buesoc21a6972018-04-10 16:35:23 -07001313 case SHM_STAT_ANY:
Al Viro553f7702017-07-08 22:52:47 -04001314 case SHM_STAT:
1315 err = shmctl_stat(ns, shmid, cmd, &sem64);
1316 if (err < 0)
1317 return err;
Will Deacon58aff0a2017-09-18 17:47:38 +01001318 if (copy_compat_shmid_to_user(uptr, &sem64, version))
Al Viro553f7702017-07-08 22:52:47 -04001319 err = -EFAULT;
1320 return err;
1321
1322 case IPC_SET:
1323 if (copy_compat_shmid_from_user(&sem64, uptr, version))
1324 return -EFAULT;
1325 /* fallthru */
1326 case IPC_RMID:
1327 return shmctl_down(ns, shmid, cmd, &sem64);
1328 case SHM_LOCK:
1329 case SHM_UNLOCK:
1330 return shmctl_do_lock(ns, shmid, cmd);
1331 break;
1332 default:
1333 return -EINVAL;
1334 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001335 return err;
1336}
Dominik Brodowskic84d0792018-03-20 20:12:33 +01001337
1338COMPAT_SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, void __user *, uptr)
1339{
1340 return compat_ksys_shmctl(shmid, cmd, uptr);
1341}
Al Viro553f7702017-07-08 22:52:47 -04001342#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343
1344/*
1345 * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
1346 *
1347 * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
1348 * "raddr" thing points to kernel space, and there has to be a wrapper around
1349 * this.
1350 */
Davidlohr Bueso95e91b82017-02-27 14:28:24 -08001351long do_shmat(int shmid, char __user *shmaddr, int shmflg,
1352 ulong *raddr, unsigned long shmlba)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001353{
1354 struct shmid_kernel *shp;
Davidlohr Buesof0cb8802017-05-08 15:57:03 -07001355 unsigned long addr = (unsigned long)shmaddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001356 unsigned long size;
Manfred Spraul239521f2014-01-27 17:07:04 -08001357 struct file *file;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001358 int err;
Davidlohr Buesof0cb8802017-05-08 15:57:03 -07001359 unsigned long flags = MAP_SHARED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360 unsigned long prot;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001361 int acc_mode;
Kirill Korotaev4e982312006-10-02 02:18:22 -07001362 struct ipc_namespace *ns;
Eric W. Biedermanbc56bba2007-02-20 13:57:53 -08001363 struct shm_file_data *sfd;
1364 struct path path;
Al Viroaeb5d722008-09-02 15:28:45 -04001365 fmode_t f_mode;
Michel Lespinasse41badc12013-02-22 16:32:47 -08001366 unsigned long populate = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001367
Eric W. Biedermanbc56bba2007-02-20 13:57:53 -08001368 err = -EINVAL;
1369 if (shmid < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001370 goto out;
Davidlohr Buesof0cb8802017-05-08 15:57:03 -07001371
1372 if (addr) {
Will Deacon079a96a2012-07-30 14:42:38 -07001373 if (addr & (shmlba - 1)) {
Davidlohr Bueso8f89c002018-05-25 14:47:30 -07001374 if (shmflg & SHM_RND) {
Davidlohr Buesoa73ab242018-05-25 14:47:27 -07001375 addr &= ~(shmlba - 1); /* round down */
Davidlohr Bueso8f89c002018-05-25 14:47:30 -07001376
1377 /*
1378 * Ensure that the round-down is non-nil
1379 * when remapping. This can happen for
1380 * cases when addr < shmlba.
1381 */
1382 if (!addr && (shmflg & SHM_REMAP))
1383 goto out;
1384 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001385#ifndef __ARCH_FORCE_SHMLBA
1386 if (addr & ~PAGE_MASK)
1387#endif
Eric W. Biedermanbc56bba2007-02-20 13:57:53 -08001388 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001389 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001390
Davidlohr Buesof0cb8802017-05-08 15:57:03 -07001391 flags |= MAP_FIXED;
1392 } else if ((shmflg & SHM_REMAP))
1393 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001394
1395 if (shmflg & SHM_RDONLY) {
1396 prot = PROT_READ;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001397 acc_mode = S_IRUGO;
Eric W. Biedermanbc56bba2007-02-20 13:57:53 -08001398 f_mode = FMODE_READ;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001399 } else {
1400 prot = PROT_READ | PROT_WRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001401 acc_mode = S_IRUGO | S_IWUGO;
Eric W. Biedermanbc56bba2007-02-20 13:57:53 -08001402 f_mode = FMODE_READ | FMODE_WRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001403 }
1404 if (shmflg & SHM_EXEC) {
1405 prot |= PROT_EXEC;
1406 acc_mode |= S_IXUGO;
1407 }
1408
1409 /*
1410 * We cannot rely on the fs check since SYSV IPC does have an
1411 * additional creator id...
1412 */
Kirill Korotaev4e982312006-10-02 02:18:22 -07001413 ns = current->nsproxy->ipc_ns;
Davidlohr Buesoc2c737a2013-09-11 14:26:23 -07001414 rcu_read_lock();
1415 shp = shm_obtain_object_check(ns, shmid);
Nadia Derbey023a5352007-10-18 23:40:51 -07001416 if (IS_ERR(shp)) {
1417 err = PTR_ERR(shp);
Davidlohr Buesoc2c737a2013-09-11 14:26:23 -07001418 goto out_unlock;
Nadia Derbey023a5352007-10-18 23:40:51 -07001419 }
Eric W. Biedermanbc56bba2007-02-20 13:57:53 -08001420
1421 err = -EACCES;
Serge E. Hallynb0e77592011-03-23 16:43:24 -07001422 if (ipcperms(ns, &shp->shm_perm, acc_mode))
Eric W. Biedermanbc56bba2007-02-20 13:57:53 -08001423 goto out_unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001424
Eric W. Biederman7191adf2018-03-22 21:08:27 -05001425 err = security_shm_shmat(&shp->shm_perm, shmaddr, shmflg);
Eric W. Biedermanbc56bba2007-02-20 13:57:53 -08001426 if (err)
1427 goto out_unlock;
1428
Davidlohr Buesoc2c737a2013-09-11 14:26:23 -07001429 ipc_lock_object(&shp->shm_perm);
Greg Thelena399b292013-11-21 14:32:00 -08001430
1431 /* check if shm_destroy() is tearing down shp */
Rafael Aquini0f3d2b02014-01-27 17:07:01 -08001432 if (!ipc_valid_object(&shp->shm_perm)) {
Greg Thelena399b292013-11-21 14:32:00 -08001433 ipc_unlock_object(&shp->shm_perm);
1434 err = -EIDRM;
1435 goto out_unlock;
1436 }
1437
Al Viro2c48b9c2009-08-09 00:52:35 +04001438 path = shp->shm_file->f_path;
1439 path_get(&path);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001440 shp->shm_nattch++;
David Howells75c3cfa2015-03-17 22:26:12 +00001441 size = i_size_read(d_inode(path.dentry));
Davidlohr Buesoc2c737a2013-09-11 14:26:23 -07001442 ipc_unlock_object(&shp->shm_perm);
1443 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001444
Eric W. Biedermanbc56bba2007-02-20 13:57:53 -08001445 err = -ENOMEM;
1446 sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
Davidlohr Buesof42569b2013-09-11 14:26:22 -07001447 if (!sfd) {
1448 path_put(&path);
1449 goto out_nattch;
1450 }
Eric W. Biedermanbc56bba2007-02-20 13:57:53 -08001451
Al Viro2c48b9c2009-08-09 00:52:35 +04001452 file = alloc_file(&path, f_mode,
1453 is_file_hugepages(shp->shm_file) ?
Al Viroc4caa772009-11-30 08:38:43 -05001454 &shm_file_operations_huge :
1455 &shm_file_operations);
Anatol Pomozov39b65252012-09-12 20:11:55 -07001456 err = PTR_ERR(file);
Davidlohr Buesof42569b2013-09-11 14:26:22 -07001457 if (IS_ERR(file)) {
1458 kfree(sfd);
1459 path_put(&path);
1460 goto out_nattch;
1461 }
Eric W. Biedermanbc56bba2007-02-20 13:57:53 -08001462
Eric W. Biedermanbc56bba2007-02-20 13:57:53 -08001463 file->private_data = sfd;
Eric W. Biedermanbc56bba2007-02-20 13:57:53 -08001464 file->f_mapping = shp->shm_file->f_mapping;
Nadia Derbey7ca7e562007-10-18 23:40:48 -07001465 sfd->id = shp->shm_perm.id;
Eric W. Biedermanbc56bba2007-02-20 13:57:53 -08001466 sfd->ns = get_ipc_ns(ns);
Eric Biggers3f053172018-04-13 15:35:30 -07001467 /*
1468 * We need to take a reference to the real shm file to prevent the
1469 * pointer from becoming stale in cases where the lifetime of the outer
1470 * file extends beyond that of the shm segment. It's not usually
1471 * possible, but it can happen during remap_file_pages() emulation as
1472 * that unmaps the memory, then does ->mmap() via file reference only.
1473 * We'll deny the ->mmap() if the shm segment was since removed, but to
1474 * detect shm ID reuse we need to compare the file pointers.
1475 */
1476 sfd->file = get_file(shp->shm_file);
Eric W. Biedermanbc56bba2007-02-20 13:57:53 -08001477 sfd->vm_ops = NULL;
1478
Al Viro8b3ec682012-05-30 17:11:23 -04001479 err = security_mmap_file(file, prot, flags);
1480 if (err)
1481 goto out_fput;
1482
Michal Hocko91f4f942016-05-23 16:25:51 -07001483 if (down_write_killable(&current->mm->mmap_sem)) {
1484 err = -EINTR;
1485 goto out_fput;
1486 }
1487
Linus Torvalds1da177e2005-04-16 15:20:36 -07001488 if (addr && !(shmflg & SHM_REMAP)) {
Eric W. Biedermanbc56bba2007-02-20 13:57:53 -08001489 err = -EINVAL;
Manfred Spraul247a8ce2014-06-06 14:37:38 -07001490 if (addr + size < addr)
1491 goto invalid;
1492
Linus Torvalds1da177e2005-04-16 15:20:36 -07001493 if (find_vma_intersection(current->mm, addr, addr + size))
1494 goto invalid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001495 }
Davidlohr Buesof42569b2013-09-11 14:26:22 -07001496
Mike Rapoport897ab3e2017-02-24 14:58:22 -08001497 addr = do_mmap_pgoff(file, addr, size, prot, flags, 0, &populate, NULL);
Michel Lespinassebebeb3d2013-02-22 16:32:37 -08001498 *raddr = addr;
Eric W. Biedermanbc56bba2007-02-20 13:57:53 -08001499 err = 0;
Michel Lespinassebebeb3d2013-02-22 16:32:37 -08001500 if (IS_ERR_VALUE(addr))
1501 err = (long)addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001502invalid:
1503 up_write(&current->mm->mmap_sem);
Michel Lespinassebebeb3d2013-02-22 16:32:37 -08001504 if (populate)
Michel Lespinasse41badc12013-02-22 16:32:47 -08001505 mm_populate(addr, populate);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001506
Al Viro8b3ec682012-05-30 17:11:23 -04001507out_fput:
Eric W. Biedermanbc56bba2007-02-20 13:57:53 -08001508 fput(file);
1509
1510out_nattch:
Davidlohr Buesod9a605e2013-09-11 14:26:24 -07001511 down_write(&shm_ids(ns).rwsem);
Nadia Derbey00c2bf82008-07-25 01:48:03 -07001512 shp = shm_lock(ns, shmid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001513 shp->shm_nattch--;
Vasiliy Kulikovb34a6b12011-07-26 16:08:48 -07001514 if (shm_may_destroy(ns, shp))
Kirill Korotaev4e982312006-10-02 02:18:22 -07001515 shm_destroy(ns, shp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001516 else
1517 shm_unlock(shp);
Davidlohr Buesod9a605e2013-09-11 14:26:24 -07001518 up_write(&shm_ids(ns).rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519 return err;
Eric W. Biedermanbc56bba2007-02-20 13:57:53 -08001520
1521out_unlock:
Davidlohr Buesoc2c737a2013-09-11 14:26:23 -07001522 rcu_read_unlock();
Davidlohr Buesof42569b2013-09-11 14:26:22 -07001523out:
1524 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001525}
1526
Heiko Carstensd5460c92009-01-14 14:14:27 +01001527SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg)
Stephen Rothwell7d87e14c2005-05-01 08:59:12 -07001528{
1529 unsigned long ret;
1530 long err;
1531
Will Deacon079a96a2012-07-30 14:42:38 -07001532 err = do_shmat(shmid, shmaddr, shmflg, &ret, SHMLBA);
Stephen Rothwell7d87e14c2005-05-01 08:59:12 -07001533 if (err)
1534 return err;
1535 force_successful_syscall_return();
1536 return (long)ret;
1537}
1538
Al Viroa78ee9e2017-07-09 10:38:28 -04001539#ifdef CONFIG_COMPAT
1540
1541#ifndef COMPAT_SHMLBA
1542#define COMPAT_SHMLBA SHMLBA
1543#endif
1544
1545COMPAT_SYSCALL_DEFINE3(shmat, int, shmid, compat_uptr_t, shmaddr, int, shmflg)
1546{
1547 unsigned long ret;
1548 long err;
1549
1550 err = do_shmat(shmid, compat_ptr(shmaddr), shmflg, &ret, COMPAT_SHMLBA);
1551 if (err)
1552 return err;
1553 force_successful_syscall_return();
1554 return (long)ret;
1555}
1556#endif
1557
Linus Torvalds1da177e2005-04-16 15:20:36 -07001558/*
1559 * detach and kill segment if marked destroyed.
1560 * The work is done in shm_close.
1561 */
Dominik Brodowskida1e27442018-03-20 20:09:48 +01001562long ksys_shmdt(char __user *shmaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001563{
1564 struct mm_struct *mm = current->mm;
Mike Frysinger586c7e62009-06-09 16:26:23 -07001565 struct vm_area_struct *vma;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001566 unsigned long addr = (unsigned long)shmaddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001567 int retval = -EINVAL;
Mike Frysinger586c7e62009-06-09 16:26:23 -07001568#ifdef CONFIG_MMU
1569 loff_t size = 0;
Dave Hansend3c97902014-12-12 16:58:19 -08001570 struct file *file;
Mike Frysinger586c7e62009-06-09 16:26:23 -07001571 struct vm_area_struct *next;
1572#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001573
Hugh Dickinsdf1e2fb2006-03-24 03:18:06 -08001574 if (addr & ~PAGE_MASK)
1575 return retval;
1576
Michal Hocko91f4f942016-05-23 16:25:51 -07001577 if (down_write_killable(&mm->mmap_sem))
1578 return -EINTR;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001579
1580 /*
1581 * This function tries to be smart and unmap shm segments that
1582 * were modified by partial mlock or munmap calls:
1583 * - It first determines the size of the shm segment that should be
1584 * unmapped: It searches for a vma that is backed by shm and that
1585 * started at address shmaddr. It records it's size and then unmaps
1586 * it.
1587 * - Then it unmaps all shm vmas that started at shmaddr and that
Dave Hansend3c97902014-12-12 16:58:19 -08001588 * are within the initially determined size and that are from the
1589 * same shm segment from which we determined the size.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001590 * Errors from do_munmap are ignored: the function only fails if
1591 * it's called with invalid parameters or if it's called to unmap
1592 * a part of a vma. Both calls in this function are for full vmas,
1593 * the parameters are directly copied from the vma itself and always
1594 * valid - therefore do_munmap cannot fail. (famous last words?)
1595 */
1596 /*
1597 * If it had been mremap()'d, the starting address would not
1598 * match the usual checks anyway. So assume all vma's are
1599 * above the starting address given.
1600 */
1601 vma = find_vma(mm, addr);
1602
David Howells8feae132009-01-08 12:04:47 +00001603#ifdef CONFIG_MMU
Linus Torvalds1da177e2005-04-16 15:20:36 -07001604 while (vma) {
1605 next = vma->vm_next;
1606
1607 /*
1608 * Check if the starting address would match, i.e. it's
1609 * a fragment created by mprotect() and/or munmap(), or it
1610 * otherwise it starts at this address with no hassles.
1611 */
Eric W. Biedermanbc56bba2007-02-20 13:57:53 -08001612 if ((vma->vm_ops == &shm_vm_ops) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001613 (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
1614
Dave Hansend3c97902014-12-12 16:58:19 -08001615 /*
1616 * Record the file of the shm segment being
1617 * unmapped. With mremap(), someone could place
1618 * page from another segment but with equal offsets
1619 * in the range we are unmapping.
1620 */
1621 file = vma->vm_file;
Dave Hansen07a46ed2014-12-12 16:58:22 -08001622 size = i_size_read(file_inode(vma->vm_file));
Mike Rapoport897ab3e2017-02-24 14:58:22 -08001623 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001624 /*
1625 * We discovered the size of the shm segment, so
1626 * break out of here and fall through to the next
1627 * loop that uses the size information to stop
1628 * searching for matching vma's.
1629 */
1630 retval = 0;
1631 vma = next;
1632 break;
1633 }
1634 vma = next;
1635 }
1636
1637 /*
1638 * We need look no further than the maximum address a fragment
1639 * could possibly have landed at. Also cast things to loff_t to
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001640 * prevent overflows and make comparisons vs. equal-width types.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001641 */
KAMEZAWA Hiroyuki8e367092006-02-10 01:51:12 -08001642 size = PAGE_ALIGN(size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001643 while (vma && (loff_t)(vma->vm_end - addr) <= size) {
1644 next = vma->vm_next;
1645
1646 /* finding a matching vma now does not alter retval */
Eric W. Biedermanbc56bba2007-02-20 13:57:53 -08001647 if ((vma->vm_ops == &shm_vm_ops) &&
Dave Hansend3c97902014-12-12 16:58:19 -08001648 ((vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) &&
1649 (vma->vm_file == file))
Mike Rapoport897ab3e2017-02-24 14:58:22 -08001650 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001651 vma = next;
1652 }
1653
Shailesh Pandey63980c82016-12-14 15:06:10 -08001654#else /* CONFIG_MMU */
David Howells8feae132009-01-08 12:04:47 +00001655 /* under NOMMU conditions, the exact address to be destroyed must be
Shailesh Pandey63980c82016-12-14 15:06:10 -08001656 * given
1657 */
Davidlohr Bueso530fcd162013-09-11 14:26:28 -07001658 if (vma && vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
Mike Rapoport897ab3e2017-02-24 14:58:22 -08001659 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
David Howells8feae132009-01-08 12:04:47 +00001660 retval = 0;
1661 }
1662
1663#endif
1664
Linus Torvalds1da177e2005-04-16 15:20:36 -07001665 up_write(&mm->mmap_sem);
1666 return retval;
1667}
1668
Dominik Brodowskida1e27442018-03-20 20:09:48 +01001669SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
1670{
1671 return ksys_shmdt(shmaddr);
1672}
1673
Linus Torvalds1da177e2005-04-16 15:20:36 -07001674#ifdef CONFIG_PROC_FS
Mike Waychison19b49462005-09-06 15:17:10 -07001675static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001676{
Eric W. Biederman98f929b2018-03-23 00:29:57 -05001677 struct pid_namespace *pid_ns = ipc_seq_pid_ns(s);
Eric W. Biederman1efdb692012-02-07 16:54:11 -08001678 struct user_namespace *user_ns = seq_user_ns(s);
Kees Cookade9f912017-08-02 13:32:21 -07001679 struct kern_ipc_perm *ipcp = it;
1680 struct shmid_kernel *shp;
Helge Dellerb7952182010-10-27 15:34:16 -07001681 unsigned long rss = 0, swp = 0;
1682
Kees Cookade9f912017-08-02 13:32:21 -07001683 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
Helge Dellerb7952182010-10-27 15:34:16 -07001684 shm_add_rss_swap(shp, &rss, &swp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001685
Paul Menage6c826812008-06-12 15:21:49 -07001686#if BITS_PER_LONG <= 32
1687#define SIZE_SPEC "%10lu"
1688#else
1689#define SIZE_SPEC "%21lu"
1690#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001691
Joe Perches7f032d62015-04-15 16:17:54 -07001692 seq_printf(s,
1693 "%10d %10d %4o " SIZE_SPEC " %5u %5u "
Deepa Dinamani7ff28192017-08-02 19:51:14 -07001694 "%5lu %5u %5u %5u %5u %10llu %10llu %10llu "
Joe Perches7f032d62015-04-15 16:17:54 -07001695 SIZE_SPEC " " SIZE_SPEC "\n",
1696 shp->shm_perm.key,
1697 shp->shm_perm.id,
1698 shp->shm_perm.mode,
1699 shp->shm_segsz,
Eric W. Biederman98f929b2018-03-23 00:29:57 -05001700 pid_nr_ns(shp->shm_cprid, pid_ns),
1701 pid_nr_ns(shp->shm_lprid, pid_ns),
Joe Perches7f032d62015-04-15 16:17:54 -07001702 shp->shm_nattch,
1703 from_kuid_munged(user_ns, shp->shm_perm.uid),
1704 from_kgid_munged(user_ns, shp->shm_perm.gid),
1705 from_kuid_munged(user_ns, shp->shm_perm.cuid),
1706 from_kgid_munged(user_ns, shp->shm_perm.cgid),
1707 shp->shm_atim,
1708 shp->shm_dtim,
1709 shp->shm_ctim,
1710 rss * PAGE_SIZE,
1711 swp * PAGE_SIZE);
1712
1713 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001714}
1715#endif