Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
| 3 | * linux/ipc/shm.c |
| 4 | * Copyright (C) 1992, 1993 Krishna Balasubramanian |
| 5 | * Many improvements/fixes by Bruno Haible. |
| 6 | * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994. |
| 7 | * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli. |
| 8 | * |
| 9 | * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com> |
| 10 | * BIGMEM support, Andrea Arcangeli <andrea@suse.de> |
| 11 | * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr> |
| 12 | * HIGHMEM support, Ingo Molnar <mingo@redhat.com> |
| 13 | * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com> |
| 14 | * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com> |
| 15 | * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com> |
| 16 | * |
Steve Grubb | 073115d | 2006-04-02 17:07:33 -0400 | [diff] [blame] | 17 | * support for audit of ipc object properties and permission changes |
| 18 | * Dustin Kirkland <dustin.kirkland@us.ibm.com> |
Kirill Korotaev | 4e98231 | 2006-10-02 02:18:22 -0700 | [diff] [blame] | 19 | * |
| 20 | * namespaces support |
| 21 | * OpenVZ, SWsoft Inc. |
| 22 | * Pavel Emelianov <xemul@openvz.org> |
Davidlohr Bueso | c2c737a | 2013-09-11 14:26:23 -0700 | [diff] [blame] | 23 | * |
| 24 | * Better ipc lock (kern_ipc_perm.lock) handling |
| 25 | * Davidlohr Bueso <davidlohr.bueso@hp.com>, June 2013. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | */ |
| 27 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | #include <linux/slab.h> |
| 29 | #include <linux/mm.h> |
| 30 | #include <linux/hugetlb.h> |
| 31 | #include <linux/shm.h> |
| 32 | #include <linux/init.h> |
| 33 | #include <linux/file.h> |
| 34 | #include <linux/mman.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | #include <linux/shmem_fs.h> |
| 36 | #include <linux/security.h> |
| 37 | #include <linux/syscalls.h> |
| 38 | #include <linux/audit.h> |
Randy.Dunlap | c59ede7 | 2006-01-11 12:17:46 -0800 | [diff] [blame] | 39 | #include <linux/capability.h> |
Stephen Rothwell | 7d87e14c | 2005-05-01 08:59:12 -0700 | [diff] [blame] | 40 | #include <linux/ptrace.h> |
Mike Waychison | 19b4946 | 2005-09-06 15:17:10 -0700 | [diff] [blame] | 41 | #include <linux/seq_file.h> |
Nadia Derbey | 3e148c7 | 2007-10-18 23:40:54 -0700 | [diff] [blame] | 42 | #include <linux/rwsem.h> |
Kirill Korotaev | 4e98231 | 2006-10-02 02:18:22 -0700 | [diff] [blame] | 43 | #include <linux/nsproxy.h> |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 44 | #include <linux/mount.h> |
Pavel Emelyanov | ae5e1b2 | 2008-02-08 04:18:22 -0800 | [diff] [blame] | 45 | #include <linux/ipc_namespace.h> |
NeilBrown | 0eb71a9 | 2018-06-18 12:52:50 +1000 | [diff] [blame] | 46 | #include <linux/rhashtable.h> |
Stephen Rothwell | 7d87e14c | 2005-05-01 08:59:12 -0700 | [diff] [blame] | 47 | |
Paul McQuade | 7153e40 | 2014-06-06 14:37:37 -0700 | [diff] [blame] | 48 | #include <linux/uaccess.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 49 | |
| 50 | #include "util.h" |
| 51 | |
Eric W. Biederman | a2e102c | 2018-03-22 21:34:44 -0500 | [diff] [blame] | 52 | struct shmid_kernel /* private to the kernel */ |
| 53 | { |
| 54 | struct kern_ipc_perm shm_perm; |
| 55 | struct file *shm_file; |
| 56 | unsigned long shm_nattch; |
| 57 | unsigned long shm_segsz; |
| 58 | time64_t shm_atim; |
| 59 | time64_t shm_dtim; |
| 60 | time64_t shm_ctim; |
Eric W. Biederman | 98f929b | 2018-03-23 00:29:57 -0500 | [diff] [blame] | 61 | struct pid *shm_cprid; |
| 62 | struct pid *shm_lprid; |
Eric W. Biederman | a2e102c | 2018-03-22 21:34:44 -0500 | [diff] [blame] | 63 | struct user_struct *mlock_user; |
| 64 | |
| 65 | /* The task created the shm object. NULL if the task is dead. */ |
| 66 | struct task_struct *shm_creator; |
| 67 | struct list_head shm_clist; /* list by creator */ |
| 68 | } __randomize_layout; |
| 69 | |
| 70 | /* shm_mode upper byte flags */ |
| 71 | #define SHM_DEST 01000 /* segment will be destroyed on last detach */ |
| 72 | #define SHM_LOCKED 02000 /* segment will not be swapped */ |
| 73 | |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 74 | struct shm_file_data { |
| 75 | int id; |
| 76 | struct ipc_namespace *ns; |
| 77 | struct file *file; |
| 78 | const struct vm_operations_struct *vm_ops; |
| 79 | }; |
| 80 | |
| 81 | #define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data)) |
| 82 | |
Arjan van de Ven | 9a32144 | 2007-02-12 00:55:35 -0800 | [diff] [blame] | 83 | static const struct file_operations shm_file_operations; |
Alexey Dobriyan | f0f37e2f | 2009-09-27 22:29:37 +0400 | [diff] [blame] | 84 | static const struct vm_operations_struct shm_vm_ops; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 85 | |
Pierre Peiffer | ed2ddbf | 2008-02-08 04:18:57 -0800 | [diff] [blame] | 86 | #define shm_ids(ns) ((ns)->ids[IPC_SHM_IDS]) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 87 | |
Kirill Korotaev | 4e98231 | 2006-10-02 02:18:22 -0700 | [diff] [blame] | 88 | #define shm_unlock(shp) \ |
| 89 | ipc_unlock(&(shp)->shm_perm) |
Kirill Korotaev | 4e98231 | 2006-10-02 02:18:22 -0700 | [diff] [blame] | 90 | |
Nadia Derbey | 7748dbf | 2007-10-18 23:40:49 -0700 | [diff] [blame] | 91 | static int newseg(struct ipc_namespace *, struct ipc_params *); |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 92 | static void shm_open(struct vm_area_struct *vma); |
| 93 | static void shm_close(struct vm_area_struct *vma); |
Manfred Spraul | 239521f | 2014-01-27 17:07:04 -0800 | [diff] [blame] | 94 | static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 95 | #ifdef CONFIG_PROC_FS |
Mike Waychison | 19b4946 | 2005-09-06 15:17:10 -0700 | [diff] [blame] | 96 | static int sysvipc_shm_proc_show(struct seq_file *s, void *it); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 97 | #endif |
| 98 | |
Davidlohr Bueso | eae04d2 | 2018-08-21 22:01:56 -0700 | [diff] [blame] | 99 | void shm_init_ns(struct ipc_namespace *ns) |
Kirill Korotaev | 4e98231 | 2006-10-02 02:18:22 -0700 | [diff] [blame] | 100 | { |
Kirill Korotaev | 4e98231 | 2006-10-02 02:18:22 -0700 | [diff] [blame] | 101 | ns->shm_ctlmax = SHMMAX; |
| 102 | ns->shm_ctlall = SHMALL; |
| 103 | ns->shm_ctlmni = SHMMNI; |
Vasiliy Kulikov | b34a6b1 | 2011-07-26 16:08:48 -0700 | [diff] [blame] | 104 | ns->shm_rmid_forced = 0; |
Kirill Korotaev | 4e98231 | 2006-10-02 02:18:22 -0700 | [diff] [blame] | 105 | ns->shm_tot = 0; |
Davidlohr Bueso | eae04d2 | 2018-08-21 22:01:56 -0700 | [diff] [blame] | 106 | ipc_init_ids(&shm_ids(ns)); |
Kirill Korotaev | 4e98231 | 2006-10-02 02:18:22 -0700 | [diff] [blame] | 107 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 108 | |
Nadia Derbey | f4566f0 | 2007-10-18 23:40:53 -0700 | [diff] [blame] | 109 | /* |
Davidlohr Bueso | d9a605e | 2013-09-11 14:26:24 -0700 | [diff] [blame] | 110 | * Called with shm_ids.rwsem (writer) and the shp structure locked. |
| 111 | * Only shm_ids.rwsem remains locked on exit. |
Nadia Derbey | f4566f0 | 2007-10-18 23:40:53 -0700 | [diff] [blame] | 112 | */ |
Pierre Peiffer | 01b8b07 | 2008-02-08 04:18:57 -0800 | [diff] [blame] | 113 | static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) |
Kirill Korotaev | 4e98231 | 2006-10-02 02:18:22 -0700 | [diff] [blame] | 114 | { |
Pierre Peiffer | 01b8b07 | 2008-02-08 04:18:57 -0800 | [diff] [blame] | 115 | struct shmid_kernel *shp; |
Shailesh Pandey | 63980c8 | 2016-12-14 15:06:10 -0800 | [diff] [blame] | 116 | |
Pierre Peiffer | 01b8b07 | 2008-02-08 04:18:57 -0800 | [diff] [blame] | 117 | shp = container_of(ipcp, struct shmid_kernel, shm_perm); |
| 118 | |
Manfred Spraul | 239521f | 2014-01-27 17:07:04 -0800 | [diff] [blame] | 119 | if (shp->shm_nattch) { |
Kirill Korotaev | 4e98231 | 2006-10-02 02:18:22 -0700 | [diff] [blame] | 120 | shp->shm_perm.mode |= SHM_DEST; |
| 121 | /* Do not find it any more */ |
Guillaume Knispel | 0cfb6ae | 2017-09-08 16:17:55 -0700 | [diff] [blame] | 122 | ipc_set_key_private(&shm_ids(ns), &shp->shm_perm); |
Kirill Korotaev | 4e98231 | 2006-10-02 02:18:22 -0700 | [diff] [blame] | 123 | shm_unlock(shp); |
| 124 | } else |
| 125 | shm_destroy(ns, shp); |
| 126 | } |
| 127 | |
Pavel Emelyanov | ae5e1b2 | 2008-02-08 04:18:22 -0800 | [diff] [blame] | 128 | #ifdef CONFIG_IPC_NS |
Kirill Korotaev | 4e98231 | 2006-10-02 02:18:22 -0700 | [diff] [blame] | 129 | void shm_exit_ns(struct ipc_namespace *ns) |
| 130 | { |
Pierre Peiffer | 01b8b07 | 2008-02-08 04:18:57 -0800 | [diff] [blame] | 131 | free_ipcs(ns, &shm_ids(ns), do_shm_rmid); |
Serge E. Hallyn | 7d6feeb | 2009-12-15 16:47:27 -0800 | [diff] [blame] | 132 | idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr); |
Guillaume Knispel | 0cfb6ae | 2017-09-08 16:17:55 -0700 | [diff] [blame] | 133 | rhashtable_destroy(&ns->ids[IPC_SHM_IDS].key_ht); |
Kirill Korotaev | 4e98231 | 2006-10-02 02:18:22 -0700 | [diff] [blame] | 134 | } |
Pavel Emelyanov | ae5e1b2 | 2008-02-08 04:18:22 -0800 | [diff] [blame] | 135 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 136 | |
Linus Torvalds | 140d0b2 | 2011-08-04 19:35:59 -1000 | [diff] [blame] | 137 | static int __init ipc_ns_init(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 138 | { |
Davidlohr Bueso | eae04d2 | 2018-08-21 22:01:56 -0700 | [diff] [blame] | 139 | shm_init_ns(&init_ipc_ns); |
| 140 | return 0; |
Linus Torvalds | 140d0b2 | 2011-08-04 19:35:59 -1000 | [diff] [blame] | 141 | } |
| 142 | |
| 143 | pure_initcall(ipc_ns_init); |
| 144 | |
Manfred Spraul | 239521f | 2014-01-27 17:07:04 -0800 | [diff] [blame] | 145 | void __init shm_init(void) |
Linus Torvalds | 140d0b2 | 2011-08-04 19:35:59 -1000 | [diff] [blame] | 146 | { |
Mike Waychison | 19b4946 | 2005-09-06 15:17:10 -0700 | [diff] [blame] | 147 | ipc_init_proc_interface("sysvipc/shm", |
Helge Deller | b795218 | 2010-10-27 15:34:16 -0700 | [diff] [blame] | 148 | #if BITS_PER_LONG <= 32 |
| 149 | " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n", |
| 150 | #else |
| 151 | " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n", |
| 152 | #endif |
Kirill Korotaev | 4e98231 | 2006-10-02 02:18:22 -0700 | [diff] [blame] | 153 | IPC_SHM_IDS, sysvipc_shm_proc_show); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 154 | } |
| 155 | |
Davidlohr Bueso | 8b8d52a | 2013-09-11 14:26:15 -0700 | [diff] [blame] | 156 | static inline struct shmid_kernel *shm_obtain_object(struct ipc_namespace *ns, int id) |
| 157 | { |
Davidlohr Bueso | 55b7ae5 | 2015-06-30 14:58:42 -0700 | [diff] [blame] | 158 | struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&shm_ids(ns), id); |
Davidlohr Bueso | 8b8d52a | 2013-09-11 14:26:15 -0700 | [diff] [blame] | 159 | |
| 160 | if (IS_ERR(ipcp)) |
| 161 | return ERR_CAST(ipcp); |
| 162 | |
| 163 | return container_of(ipcp, struct shmid_kernel, shm_perm); |
| 164 | } |
| 165 | |
| 166 | static inline struct shmid_kernel *shm_obtain_object_check(struct ipc_namespace *ns, int id) |
| 167 | { |
| 168 | struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&shm_ids(ns), id); |
| 169 | |
| 170 | if (IS_ERR(ipcp)) |
| 171 | return ERR_CAST(ipcp); |
| 172 | |
| 173 | return container_of(ipcp, struct shmid_kernel, shm_perm); |
| 174 | } |
| 175 | |
Nadia Derbey | 3e148c7 | 2007-10-18 23:40:54 -0700 | [diff] [blame] | 176 | /* |
Davidlohr Bueso | d9a605e | 2013-09-11 14:26:24 -0700 | [diff] [blame] | 177 | * shm_lock_(check_) routines are called in the paths where the rwsem |
Nadia Derbey | 00c2bf8 | 2008-07-25 01:48:03 -0700 | [diff] [blame] | 178 | * is not necessarily held. |
Nadia Derbey | 3e148c7 | 2007-10-18 23:40:54 -0700 | [diff] [blame] | 179 | */ |
Nadia Derbey | 023a535 | 2007-10-18 23:40:51 -0700 | [diff] [blame] | 180 | static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 181 | { |
Davidlohr Bueso | 82061c5 | 2018-08-21 22:01:41 -0700 | [diff] [blame] | 182 | struct kern_ipc_perm *ipcp; |
Nadia Derbey | 03f02c7 | 2007-10-18 23:40:51 -0700 | [diff] [blame] | 183 | |
Davidlohr Bueso | 82061c5 | 2018-08-21 22:01:41 -0700 | [diff] [blame] | 184 | rcu_read_lock(); |
| 185 | ipcp = ipc_obtain_object_idr(&shm_ids(ns), id); |
| 186 | if (IS_ERR(ipcp)) |
| 187 | goto err; |
| 188 | |
| 189 | ipc_lock_object(ipcp); |
| 190 | /* |
| 191 | * ipc_rmid() may have already freed the ID while ipc_lock_object() |
| 192 | * was spinning: here verify that the structure is still valid. |
| 193 | * Upon races with RMID, return -EIDRM, thus indicating that |
| 194 | * the ID points to a removed identifier. |
| 195 | */ |
| 196 | if (ipc_valid_object(ipcp)) { |
| 197 | /* return a locked ipc object upon success */ |
| 198 | return container_of(ipcp, struct shmid_kernel, shm_perm); |
| 199 | } |
| 200 | |
| 201 | ipc_unlock_object(ipcp); |
Davidlohr Bueso | 9c21dae | 2018-09-04 15:46:02 -0700 | [diff] [blame] | 202 | ipcp = ERR_PTR(-EIDRM); |
Davidlohr Bueso | 82061c5 | 2018-08-21 22:01:41 -0700 | [diff] [blame] | 203 | err: |
| 204 | rcu_read_unlock(); |
Davidlohr Bueso | c5c8975 | 2015-06-30 14:58:36 -0700 | [diff] [blame] | 205 | /* |
Kirill A. Shutemov | 1ac0b6d | 2016-02-17 13:11:35 -0800 | [diff] [blame] | 206 | * Callers of shm_lock() must validate the status of the returned ipc |
Davidlohr Bueso | 82061c5 | 2018-08-21 22:01:41 -0700 | [diff] [blame] | 207 | * object pointer and error out as appropriate. |
Davidlohr Bueso | c5c8975 | 2015-06-30 14:58:36 -0700 | [diff] [blame] | 208 | */ |
Kees Cook | 59cf0a9 | 2018-10-05 15:51:48 -0700 | [diff] [blame] | 209 | return ERR_CAST(ipcp); |
Nadia Derbey | 023a535 | 2007-10-18 23:40:51 -0700 | [diff] [blame] | 210 | } |
| 211 | |
Vasiliy Kulikov | 4c677e2 | 2011-07-29 03:56:40 +0400 | [diff] [blame] | 212 | static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp) |
| 213 | { |
| 214 | rcu_read_lock(); |
Davidlohr Bueso | cf9d5d7 | 2013-07-08 16:01:11 -0700 | [diff] [blame] | 215 | ipc_lock_object(&ipcp->shm_perm); |
Vasiliy Kulikov | 4c677e2 | 2011-07-29 03:56:40 +0400 | [diff] [blame] | 216 | } |
| 217 | |
Davidlohr Bueso | 53dad6d | 2013-09-23 17:04:45 -0700 | [diff] [blame] | 218 | static void shm_rcu_free(struct rcu_head *head) |
| 219 | { |
Manfred Spraul | dba4cdd | 2017-07-12 14:34:41 -0700 | [diff] [blame] | 220 | struct kern_ipc_perm *ptr = container_of(head, struct kern_ipc_perm, |
| 221 | rcu); |
| 222 | struct shmid_kernel *shp = container_of(ptr, struct shmid_kernel, |
| 223 | shm_perm); |
Eric W. Biederman | 7191adf | 2018-03-22 21:08:27 -0500 | [diff] [blame] | 224 | security_shm_free(&shp->shm_perm); |
Kees Cook | 42e618f | 2017-07-12 14:35:25 -0700 | [diff] [blame] | 225 | kvfree(shp); |
Davidlohr Bueso | 53dad6d | 2013-09-23 17:04:45 -0700 | [diff] [blame] | 226 | } |
| 227 | |
Nadia Derbey | 7ca7e56 | 2007-10-18 23:40:48 -0700 | [diff] [blame] | 228 | static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 229 | { |
Jack Miller | ab602f7 | 2014-08-08 14:23:19 -0700 | [diff] [blame] | 230 | list_del(&s->shm_clist); |
Nadia Derbey | 7ca7e56 | 2007-10-18 23:40:48 -0700 | [diff] [blame] | 231 | ipc_rmid(&shm_ids(ns), &s->shm_perm); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 232 | } |
| 233 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 234 | |
Kirill A. Shutemov | 1ac0b6d | 2016-02-17 13:11:35 -0800 | [diff] [blame] | 235 | static int __shm_open(struct vm_area_struct *vma) |
Kirill Korotaev | 4e98231 | 2006-10-02 02:18:22 -0700 | [diff] [blame] | 236 | { |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 237 | struct file *file = vma->vm_file; |
| 238 | struct shm_file_data *sfd = shm_file_data(file); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 239 | struct shmid_kernel *shp; |
| 240 | |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 241 | shp = shm_lock(sfd->ns, sfd->id); |
Kirill A. Shutemov | 1ac0b6d | 2016-02-17 13:11:35 -0800 | [diff] [blame] | 242 | |
| 243 | if (IS_ERR(shp)) |
| 244 | return PTR_ERR(shp); |
| 245 | |
Eric Biggers | 3f05317 | 2018-04-13 15:35:30 -0700 | [diff] [blame] | 246 | if (shp->shm_file != sfd->file) { |
| 247 | /* ID was reused */ |
| 248 | shm_unlock(shp); |
| 249 | return -EINVAL; |
| 250 | } |
| 251 | |
Deepa Dinamani | 7ff2819 | 2017-08-02 19:51:14 -0700 | [diff] [blame] | 252 | shp->shm_atim = ktime_get_real_seconds(); |
Eric W. Biederman | 98f929b | 2018-03-23 00:29:57 -0500 | [diff] [blame] | 253 | ipc_update_pid(&shp->shm_lprid, task_tgid(current)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 254 | shp->shm_nattch++; |
| 255 | shm_unlock(shp); |
Kirill A. Shutemov | 1ac0b6d | 2016-02-17 13:11:35 -0800 | [diff] [blame] | 256 | return 0; |
| 257 | } |
| 258 | |
| 259 | /* This is called by fork, once for every shm attach. */ |
| 260 | static void shm_open(struct vm_area_struct *vma) |
| 261 | { |
| 262 | int err = __shm_open(vma); |
| 263 | /* |
| 264 | * We raced in the idr lookup or with shm_destroy(). |
| 265 | * Either way, the ID is busted. |
| 266 | */ |
| 267 | WARN_ON_ONCE(err); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 268 | } |
| 269 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 270 | /* |
| 271 | * shm_destroy - free the struct shmid_kernel |
| 272 | * |
Nadia Derbey | f4566f0 | 2007-10-18 23:40:53 -0700 | [diff] [blame] | 273 | * @ns: namespace |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 274 | * @shp: struct to free |
| 275 | * |
Davidlohr Bueso | d9a605e | 2013-09-11 14:26:24 -0700 | [diff] [blame] | 276 | * It has to be called with shp and shm_ids.rwsem (writer) locked, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 277 | * but returns with shp unlocked and freed. |
| 278 | */ |
Kirill Korotaev | 4e98231 | 2006-10-02 02:18:22 -0700 | [diff] [blame] | 279 | static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 280 | { |
Greg Thelen | a399b29 | 2013-11-21 14:32:00 -0800 | [diff] [blame] | 281 | struct file *shm_file; |
| 282 | |
| 283 | shm_file = shp->shm_file; |
| 284 | shp->shm_file = NULL; |
Kirill Korotaev | 4e98231 | 2006-10-02 02:18:22 -0700 | [diff] [blame] | 285 | ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT; |
Nadia Derbey | 7ca7e56 | 2007-10-18 23:40:48 -0700 | [diff] [blame] | 286 | shm_rmid(ns, shp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 287 | shm_unlock(shp); |
Greg Thelen | a399b29 | 2013-11-21 14:32:00 -0800 | [diff] [blame] | 288 | if (!is_file_hugepages(shm_file)) |
| 289 | shmem_lock(shm_file, 0, shp->mlock_user); |
Hugh Dickins | 353d5c3 | 2009-08-24 16:30:28 +0100 | [diff] [blame] | 290 | else if (shp->mlock_user) |
Dave Hansen | 07a46ed | 2014-12-12 16:58:22 -0800 | [diff] [blame] | 291 | user_shm_unlock(i_size_read(file_inode(shm_file)), |
| 292 | shp->mlock_user); |
Greg Thelen | a399b29 | 2013-11-21 14:32:00 -0800 | [diff] [blame] | 293 | fput(shm_file); |
Eric W. Biederman | 98f929b | 2018-03-23 00:29:57 -0500 | [diff] [blame] | 294 | ipc_update_pid(&shp->shm_cprid, NULL); |
| 295 | ipc_update_pid(&shp->shm_lprid, NULL); |
Manfred Spraul | dba4cdd | 2017-07-12 14:34:41 -0700 | [diff] [blame] | 296 | ipc_rcu_putref(&shp->shm_perm, shm_rcu_free); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 297 | } |
| 298 | |
| 299 | /* |
Vasiliy Kulikov | b34a6b1 | 2011-07-26 16:08:48 -0700 | [diff] [blame] | 300 | * shm_may_destroy - identifies whether shm segment should be destroyed now |
| 301 | * |
| 302 | * Returns true if and only if there are no active users of the segment and |
| 303 | * one of the following is true: |
| 304 | * |
| 305 | * 1) shmctl(id, IPC_RMID, NULL) was called for this shp |
| 306 | * |
| 307 | * 2) sysctl kernel.shm_rmid_forced is set to 1. |
| 308 | */ |
| 309 | static bool shm_may_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) |
| 310 | { |
| 311 | return (shp->shm_nattch == 0) && |
| 312 | (ns->shm_rmid_forced || |
| 313 | (shp->shm_perm.mode & SHM_DEST)); |
| 314 | } |
| 315 | |
| 316 | /* |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 317 | * remove the attach descriptor vma. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 318 | * free memory for segment if it is marked destroyed. |
| 319 | * The descriptor has already been removed from the current->mm->mmap list |
| 320 | * and will later be kfree()d. |
| 321 | */ |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 322 | static void shm_close(struct vm_area_struct *vma) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 323 | { |
Manfred Spraul | 239521f | 2014-01-27 17:07:04 -0800 | [diff] [blame] | 324 | struct file *file = vma->vm_file; |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 325 | struct shm_file_data *sfd = shm_file_data(file); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 326 | struct shmid_kernel *shp; |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 327 | struct ipc_namespace *ns = sfd->ns; |
Kirill Korotaev | 4e98231 | 2006-10-02 02:18:22 -0700 | [diff] [blame] | 328 | |
Davidlohr Bueso | d9a605e | 2013-09-11 14:26:24 -0700 | [diff] [blame] | 329 | down_write(&shm_ids(ns).rwsem); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 330 | /* remove from the list of attaches of the shm segment */ |
Nadia Derbey | 00c2bf8 | 2008-07-25 01:48:03 -0700 | [diff] [blame] | 331 | shp = shm_lock(ns, sfd->id); |
Kirill A. Shutemov | 1ac0b6d | 2016-02-17 13:11:35 -0800 | [diff] [blame] | 332 | |
| 333 | /* |
| 334 | * We raced in the idr lookup or with shm_destroy(). |
| 335 | * Either way, the ID is busted. |
| 336 | */ |
| 337 | if (WARN_ON_ONCE(IS_ERR(shp))) |
| 338 | goto done; /* no-op */ |
| 339 | |
Eric W. Biederman | 98f929b | 2018-03-23 00:29:57 -0500 | [diff] [blame] | 340 | ipc_update_pid(&shp->shm_lprid, task_tgid(current)); |
Deepa Dinamani | 7ff2819 | 2017-08-02 19:51:14 -0700 | [diff] [blame] | 341 | shp->shm_dtim = ktime_get_real_seconds(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 342 | shp->shm_nattch--; |
Vasiliy Kulikov | b34a6b1 | 2011-07-26 16:08:48 -0700 | [diff] [blame] | 343 | if (shm_may_destroy(ns, shp)) |
Kirill Korotaev | 4e98231 | 2006-10-02 02:18:22 -0700 | [diff] [blame] | 344 | shm_destroy(ns, shp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 345 | else |
| 346 | shm_unlock(shp); |
Kirill A. Shutemov | 1ac0b6d | 2016-02-17 13:11:35 -0800 | [diff] [blame] | 347 | done: |
Davidlohr Bueso | d9a605e | 2013-09-11 14:26:24 -0700 | [diff] [blame] | 348 | up_write(&shm_ids(ns).rwsem); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 349 | } |
| 350 | |
Davidlohr Bueso | d9a605e | 2013-09-11 14:26:24 -0700 | [diff] [blame] | 351 | /* Called with ns->shm_ids(ns).rwsem locked */ |
Vasiliy Kulikov | b34a6b1 | 2011-07-26 16:08:48 -0700 | [diff] [blame] | 352 | static int shm_try_destroy_orphaned(int id, void *p, void *data) |
| 353 | { |
| 354 | struct ipc_namespace *ns = data; |
Vasiliy Kulikov | 4c677e2 | 2011-07-29 03:56:40 +0400 | [diff] [blame] | 355 | struct kern_ipc_perm *ipcp = p; |
| 356 | struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm); |
Vasiliy Kulikov | b34a6b1 | 2011-07-26 16:08:48 -0700 | [diff] [blame] | 357 | |
| 358 | /* |
| 359 | * We want to destroy segments without users and with already |
| 360 | * exit'ed originating process. |
Vasiliy Kulikov | 4c677e2 | 2011-07-29 03:56:40 +0400 | [diff] [blame] | 361 | * |
Davidlohr Bueso | d9a605e | 2013-09-11 14:26:24 -0700 | [diff] [blame] | 362 | * As shp->* are changed under rwsem, it's safe to skip shp locking. |
Vasiliy Kulikov | b34a6b1 | 2011-07-26 16:08:48 -0700 | [diff] [blame] | 363 | */ |
Vasiliy Kulikov | 4c677e2 | 2011-07-29 03:56:40 +0400 | [diff] [blame] | 364 | if (shp->shm_creator != NULL) |
Vasiliy Kulikov | b34a6b1 | 2011-07-26 16:08:48 -0700 | [diff] [blame] | 365 | return 0; |
Vasiliy Kulikov | b34a6b1 | 2011-07-26 16:08:48 -0700 | [diff] [blame] | 366 | |
Vasiliy Kulikov | 4c677e2 | 2011-07-29 03:56:40 +0400 | [diff] [blame] | 367 | if (shm_may_destroy(ns, shp)) { |
| 368 | shm_lock_by_ptr(shp); |
Vasiliy Kulikov | b34a6b1 | 2011-07-26 16:08:48 -0700 | [diff] [blame] | 369 | shm_destroy(ns, shp); |
Vasiliy Kulikov | 4c677e2 | 2011-07-29 03:56:40 +0400 | [diff] [blame] | 370 | } |
Vasiliy Kulikov | b34a6b1 | 2011-07-26 16:08:48 -0700 | [diff] [blame] | 371 | return 0; |
| 372 | } |
| 373 | |
| 374 | void shm_destroy_orphaned(struct ipc_namespace *ns) |
| 375 | { |
Davidlohr Bueso | d9a605e | 2013-09-11 14:26:24 -0700 | [diff] [blame] | 376 | down_write(&shm_ids(ns).rwsem); |
Vasiliy Kulikov | 33a30ed | 2011-08-03 22:26:55 +0400 | [diff] [blame] | 377 | if (shm_ids(ns).in_use) |
Vasiliy Kulikov | 4c677e2 | 2011-07-29 03:56:40 +0400 | [diff] [blame] | 378 | idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns); |
Davidlohr Bueso | d9a605e | 2013-09-11 14:26:24 -0700 | [diff] [blame] | 379 | up_write(&shm_ids(ns).rwsem); |
Vasiliy Kulikov | b34a6b1 | 2011-07-26 16:08:48 -0700 | [diff] [blame] | 380 | } |
| 381 | |
Jack Miller | 83293c0f5 | 2014-08-08 14:23:21 -0700 | [diff] [blame] | 382 | /* Locking assumes this will only be called with task == current */ |
Vasiliy Kulikov | b34a6b1 | 2011-07-26 16:08:48 -0700 | [diff] [blame] | 383 | void exit_shm(struct task_struct *task) |
| 384 | { |
Vasiliy Kulikov | 4c677e2 | 2011-07-29 03:56:40 +0400 | [diff] [blame] | 385 | struct ipc_namespace *ns = task->nsproxy->ipc_ns; |
Jack Miller | ab602f7 | 2014-08-08 14:23:19 -0700 | [diff] [blame] | 386 | struct shmid_kernel *shp, *n; |
Vasiliy Kulikov | b34a6b1 | 2011-07-26 16:08:48 -0700 | [diff] [blame] | 387 | |
Jack Miller | 83293c0f5 | 2014-08-08 14:23:21 -0700 | [diff] [blame] | 388 | if (list_empty(&task->sysvshm.shm_clist)) |
Vasiliy Kulikov | 298507d | 2011-08-03 22:28:26 +0400 | [diff] [blame] | 389 | return; |
| 390 | |
Jack Miller | 83293c0f5 | 2014-08-08 14:23:21 -0700 | [diff] [blame] | 391 | /* |
| 392 | * If kernel.shm_rmid_forced is not set then only keep track of |
| 393 | * which shmids are orphaned, so that a later set of the sysctl |
| 394 | * can clean them up. |
| 395 | */ |
| 396 | if (!ns->shm_rmid_forced) { |
| 397 | down_read(&shm_ids(ns).rwsem); |
| 398 | list_for_each_entry(shp, &task->sysvshm.shm_clist, shm_clist) |
| 399 | shp->shm_creator = NULL; |
| 400 | /* |
| 401 | * Only under read lock but we are only called on current |
| 402 | * so no entry on the list will be shared. |
| 403 | */ |
| 404 | list_del(&task->sysvshm.shm_clist); |
| 405 | up_read(&shm_ids(ns).rwsem); |
| 406 | return; |
| 407 | } |
| 408 | |
| 409 | /* |
| 410 | * Destroy all already created segments, that were not yet mapped, |
| 411 | * and mark any mapped as orphan to cover the sysctl toggling. |
| 412 | * Destroy is skipped if shm_may_destroy() returns false. |
| 413 | */ |
Davidlohr Bueso | d9a605e | 2013-09-11 14:26:24 -0700 | [diff] [blame] | 414 | down_write(&shm_ids(ns).rwsem); |
Jack Miller | 83293c0f5 | 2014-08-08 14:23:21 -0700 | [diff] [blame] | 415 | list_for_each_entry_safe(shp, n, &task->sysvshm.shm_clist, shm_clist) { |
| 416 | shp->shm_creator = NULL; |
| 417 | |
| 418 | if (shm_may_destroy(ns, shp)) { |
| 419 | shm_lock_by_ptr(shp); |
| 420 | shm_destroy(ns, shp); |
| 421 | } |
| 422 | } |
| 423 | |
| 424 | /* Remove the list head from any segments still attached. */ |
Jack Miller | ab602f7 | 2014-08-08 14:23:19 -0700 | [diff] [blame] | 425 | list_del(&task->sysvshm.shm_clist); |
Davidlohr Bueso | d9a605e | 2013-09-11 14:26:24 -0700 | [diff] [blame] | 426 | up_write(&shm_ids(ns).rwsem); |
Vasiliy Kulikov | b34a6b1 | 2011-07-26 16:08:48 -0700 | [diff] [blame] | 427 | } |
| 428 | |
Souptick Joarder | 14f28f57 | 2018-06-14 15:27:55 -0700 | [diff] [blame] | 429 | static vm_fault_t shm_fault(struct vm_fault *vmf) |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 430 | { |
Dave Jiang | 11bac80 | 2017-02-24 14:56:41 -0800 | [diff] [blame] | 431 | struct file *file = vmf->vma->vm_file; |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 432 | struct shm_file_data *sfd = shm_file_data(file); |
| 433 | |
Dave Jiang | 11bac80 | 2017-02-24 14:56:41 -0800 | [diff] [blame] | 434 | return sfd->vm_ops->fault(vmf); |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 435 | } |
| 436 | |
Mike Kravetz | 3d942ee | 2018-03-28 16:01:01 -0700 | [diff] [blame] | 437 | static int shm_split(struct vm_area_struct *vma, unsigned long addr) |
| 438 | { |
| 439 | struct file *file = vma->vm_file; |
| 440 | struct shm_file_data *sfd = shm_file_data(file); |
| 441 | |
Andrew Morton | a61fc2c | 2018-04-10 16:35:42 -0700 | [diff] [blame] | 442 | if (sfd->vm_ops->split) |
Mike Kravetz | 3d942ee | 2018-03-28 16:01:01 -0700 | [diff] [blame] | 443 | return sfd->vm_ops->split(vma, addr); |
| 444 | |
| 445 | return 0; |
| 446 | } |
| 447 | |
Jane Chu | eec3636 | 2018-08-02 15:36:05 -0700 | [diff] [blame] | 448 | static unsigned long shm_pagesize(struct vm_area_struct *vma) |
| 449 | { |
| 450 | struct file *file = vma->vm_file; |
| 451 | struct shm_file_data *sfd = shm_file_data(file); |
| 452 | |
| 453 | if (sfd->vm_ops->pagesize) |
| 454 | return sfd->vm_ops->pagesize(vma); |
| 455 | |
| 456 | return PAGE_SIZE; |
| 457 | } |
| 458 | |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 459 | #ifdef CONFIG_NUMA |
Adrian Bunk | d823e3e | 2007-10-16 23:26:42 -0700 | [diff] [blame] | 460 | static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new) |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 461 | { |
| 462 | struct file *file = vma->vm_file; |
| 463 | struct shm_file_data *sfd = shm_file_data(file); |
| 464 | int err = 0; |
Shailesh Pandey | 63980c8 | 2016-12-14 15:06:10 -0800 | [diff] [blame] | 465 | |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 466 | if (sfd->vm_ops->set_policy) |
| 467 | err = sfd->vm_ops->set_policy(vma, new); |
| 468 | return err; |
| 469 | } |
| 470 | |
Adrian Bunk | d823e3e | 2007-10-16 23:26:42 -0700 | [diff] [blame] | 471 | static struct mempolicy *shm_get_policy(struct vm_area_struct *vma, |
| 472 | unsigned long addr) |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 473 | { |
| 474 | struct file *file = vma->vm_file; |
| 475 | struct shm_file_data *sfd = shm_file_data(file); |
| 476 | struct mempolicy *pol = NULL; |
| 477 | |
| 478 | if (sfd->vm_ops->get_policy) |
| 479 | pol = sfd->vm_ops->get_policy(vma, addr); |
Lee Schermerhorn | 52cd3b0 | 2008-04-28 02:13:16 -0700 | [diff] [blame] | 480 | else if (vma->vm_policy) |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 481 | pol = vma->vm_policy; |
Lee Schermerhorn | 52cd3b0 | 2008-04-28 02:13:16 -0700 | [diff] [blame] | 482 | |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 483 | return pol; |
| 484 | } |
| 485 | #endif |
| 486 | |
Manfred Spraul | 239521f | 2014-01-27 17:07:04 -0800 | [diff] [blame] | 487 | static int shm_mmap(struct file *file, struct vm_area_struct *vma) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 488 | { |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 489 | struct shm_file_data *sfd = shm_file_data(file); |
David Howells | b0e1519 | 2006-01-06 00:11:42 -0800 | [diff] [blame] | 490 | int ret; |
| 491 | |
Kirill A. Shutemov | 1ac0b6d | 2016-02-17 13:11:35 -0800 | [diff] [blame] | 492 | /* |
Eric Biggers | 3f05317 | 2018-04-13 15:35:30 -0700 | [diff] [blame] | 493 | * In case of remap_file_pages() emulation, the file can represent an |
| 494 | * IPC ID that was removed, and possibly even reused by another shm |
| 495 | * segment already. Propagate this case as an error to caller. |
Kirill A. Shutemov | 1ac0b6d | 2016-02-17 13:11:35 -0800 | [diff] [blame] | 496 | */ |
Shailesh Pandey | 63980c8 | 2016-12-14 15:06:10 -0800 | [diff] [blame] | 497 | ret = __shm_open(vma); |
Kirill A. Shutemov | 1ac0b6d | 2016-02-17 13:11:35 -0800 | [diff] [blame] | 498 | if (ret) |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 499 | return ret; |
Kirill A. Shutemov | 1ac0b6d | 2016-02-17 13:11:35 -0800 | [diff] [blame] | 500 | |
Miklos Szeredi | f74ac01 | 2017-02-20 16:51:23 +0100 | [diff] [blame] | 501 | ret = call_mmap(sfd->file, vma); |
Kirill A. Shutemov | 1ac0b6d | 2016-02-17 13:11:35 -0800 | [diff] [blame] | 502 | if (ret) { |
| 503 | shm_close(vma); |
| 504 | return ret; |
| 505 | } |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 506 | sfd->vm_ops = vma->vm_ops; |
David Howells | 2e92a3b | 2007-07-31 00:37:24 -0700 | [diff] [blame] | 507 | #ifdef CONFIG_MMU |
Davidlohr Bueso | d0edd85 | 2015-09-09 15:39:20 -0700 | [diff] [blame] | 508 | WARN_ON(!sfd->vm_ops->fault); |
David Howells | 2e92a3b | 2007-07-31 00:37:24 -0700 | [diff] [blame] | 509 | #endif |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 510 | vma->vm_ops = &shm_vm_ops; |
Kirill A. Shutemov | 1ac0b6d | 2016-02-17 13:11:35 -0800 | [diff] [blame] | 511 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 512 | } |
| 513 | |
Kirill Korotaev | 4e98231 | 2006-10-02 02:18:22 -0700 | [diff] [blame] | 514 | static int shm_release(struct inode *ino, struct file *file) |
| 515 | { |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 516 | struct shm_file_data *sfd = shm_file_data(file); |
Kirill Korotaev | 4e98231 | 2006-10-02 02:18:22 -0700 | [diff] [blame] | 517 | |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 518 | put_ipc_ns(sfd->ns); |
Eric Biggers | 3f05317 | 2018-04-13 15:35:30 -0700 | [diff] [blame] | 519 | fput(sfd->file); |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 520 | shm_file_data(file) = NULL; |
| 521 | kfree(sfd); |
Kirill Korotaev | 4e98231 | 2006-10-02 02:18:22 -0700 | [diff] [blame] | 522 | return 0; |
| 523 | } |
| 524 | |
Josef Bacik | 02c24a8 | 2011-07-16 20:44:56 -0400 | [diff] [blame] | 525 | static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync) |
Adam Litke | 516dffd | 2007-03-01 15:46:08 -0800 | [diff] [blame] | 526 | { |
Adam Litke | 516dffd | 2007-03-01 15:46:08 -0800 | [diff] [blame] | 527 | struct shm_file_data *sfd = shm_file_data(file); |
Adam Litke | 516dffd | 2007-03-01 15:46:08 -0800 | [diff] [blame] | 528 | |
Christoph Hellwig | 7ea8085 | 2010-05-26 17:53:25 +0200 | [diff] [blame] | 529 | if (!sfd->file->f_op->fsync) |
| 530 | return -EINVAL; |
Jeff Layton | 0f41074 | 2017-07-05 15:26:50 -0400 | [diff] [blame] | 531 | return sfd->file->f_op->fsync(sfd->file, start, end, datasync); |
Adam Litke | 516dffd | 2007-03-01 15:46:08 -0800 | [diff] [blame] | 532 | } |
| 533 | |
Will Deacon | 7d8a456 | 2012-06-07 14:21:13 -0700 | [diff] [blame] | 534 | static long shm_fallocate(struct file *file, int mode, loff_t offset, |
| 535 | loff_t len) |
| 536 | { |
| 537 | struct shm_file_data *sfd = shm_file_data(file); |
| 538 | |
| 539 | if (!sfd->file->f_op->fallocate) |
| 540 | return -EOPNOTSUPP; |
| 541 | return sfd->file->f_op->fallocate(file, mode, offset, len); |
| 542 | } |
| 543 | |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 544 | static unsigned long shm_get_unmapped_area(struct file *file, |
| 545 | unsigned long addr, unsigned long len, unsigned long pgoff, |
| 546 | unsigned long flags) |
| 547 | { |
| 548 | struct shm_file_data *sfd = shm_file_data(file); |
Shailesh Pandey | 63980c8 | 2016-12-14 15:06:10 -0800 | [diff] [blame] | 549 | |
Al Viro | c4caa77 | 2009-11-30 08:38:43 -0500 | [diff] [blame] | 550 | return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len, |
| 551 | pgoff, flags); |
Adam Litke | 516dffd | 2007-03-01 15:46:08 -0800 | [diff] [blame] | 552 | } |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 553 | |
Arjan van de Ven | 9a32144 | 2007-02-12 00:55:35 -0800 | [diff] [blame] | 554 | static const struct file_operations shm_file_operations = { |
Kirill Korotaev | 4e98231 | 2006-10-02 02:18:22 -0700 | [diff] [blame] | 555 | .mmap = shm_mmap, |
Adam Litke | 516dffd | 2007-03-01 15:46:08 -0800 | [diff] [blame] | 556 | .fsync = shm_fsync, |
Kirill Korotaev | 4e98231 | 2006-10-02 02:18:22 -0700 | [diff] [blame] | 557 | .release = shm_release, |
David Howells | ed5e589 | 2010-01-15 17:01:32 -0800 | [diff] [blame] | 558 | .get_unmapped_area = shm_get_unmapped_area, |
Arnd Bergmann | 6038f37 | 2010-08-15 18:52:59 +0200 | [diff] [blame] | 559 | .llseek = noop_llseek, |
Will Deacon | 7d8a456 | 2012-06-07 14:21:13 -0700 | [diff] [blame] | 560 | .fallocate = shm_fallocate, |
Al Viro | c4caa77 | 2009-11-30 08:38:43 -0500 | [diff] [blame] | 561 | }; |
| 562 | |
Hugh Dickins | c01d5b3 | 2016-07-26 15:26:15 -0700 | [diff] [blame] | 563 | /* |
| 564 | * shm_file_operations_huge is now identical to shm_file_operations, |
| 565 | * but we keep it distinct for the sake of is_file_shm_hugepages(). |
| 566 | */ |
Al Viro | c4caa77 | 2009-11-30 08:38:43 -0500 | [diff] [blame] | 567 | static const struct file_operations shm_file_operations_huge = { |
| 568 | .mmap = shm_mmap, |
| 569 | .fsync = shm_fsync, |
| 570 | .release = shm_release, |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 571 | .get_unmapped_area = shm_get_unmapped_area, |
Arnd Bergmann | 6038f37 | 2010-08-15 18:52:59 +0200 | [diff] [blame] | 572 | .llseek = noop_llseek, |
Will Deacon | 7d8a456 | 2012-06-07 14:21:13 -0700 | [diff] [blame] | 573 | .fallocate = shm_fallocate, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 574 | }; |
| 575 | |
Yaowei Bai | 2954e440 | 2016-01-20 15:01:11 -0800 | [diff] [blame] | 576 | bool is_file_shm_hugepages(struct file *file) |
Al Viro | c4caa77 | 2009-11-30 08:38:43 -0500 | [diff] [blame] | 577 | { |
| 578 | return file->f_op == &shm_file_operations_huge; |
| 579 | } |
| 580 | |
Alexey Dobriyan | f0f37e2f | 2009-09-27 22:29:37 +0400 | [diff] [blame] | 581 | static const struct vm_operations_struct shm_vm_ops = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 582 | .open = shm_open, /* callback for a new vm-area open */ |
| 583 | .close = shm_close, /* callback for when the vm-area is released */ |
Nick Piggin | 54cb882 | 2007-07-19 01:46:59 -0700 | [diff] [blame] | 584 | .fault = shm_fault, |
Mike Kravetz | 3d942ee | 2018-03-28 16:01:01 -0700 | [diff] [blame] | 585 | .split = shm_split, |
Jane Chu | eec3636 | 2018-08-02 15:36:05 -0700 | [diff] [blame] | 586 | .pagesize = shm_pagesize, |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 587 | #if defined(CONFIG_NUMA) |
| 588 | .set_policy = shm_set_policy, |
| 589 | .get_policy = shm_get_policy, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 590 | #endif |
| 591 | }; |
| 592 | |
Nadia Derbey | f4566f0 | 2007-10-18 23:40:53 -0700 | [diff] [blame] | 593 | /** |
| 594 | * newseg - Create a new shared memory segment |
| 595 | * @ns: namespace |
| 596 | * @params: ptr to the structure that contains key, size and shmflg |
| 597 | * |
Davidlohr Bueso | d9a605e | 2013-09-11 14:26:24 -0700 | [diff] [blame] | 598 | * Called with shm_ids.rwsem held as a writer. |
Nadia Derbey | f4566f0 | 2007-10-18 23:40:53 -0700 | [diff] [blame] | 599 | */ |
Nadia Derbey | 7748dbf | 2007-10-18 23:40:49 -0700 | [diff] [blame] | 600 | static int newseg(struct ipc_namespace *ns, struct ipc_params *params) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 601 | { |
Nadia Derbey | 7748dbf | 2007-10-18 23:40:49 -0700 | [diff] [blame] | 602 | key_t key = params->key; |
| 603 | int shmflg = params->flg; |
| 604 | size_t size = params->u.size; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 605 | int error; |
| 606 | struct shmid_kernel *shp; |
Robin Holt | d69f3ba | 2013-04-30 19:15:54 -0700 | [diff] [blame] | 607 | size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; |
Manfred Spraul | 239521f | 2014-01-27 17:07:04 -0800 | [diff] [blame] | 608 | struct file *file; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 609 | char name[13]; |
KOSAKI Motohiro | ca16d14 | 2011-05-26 19:16:19 +0900 | [diff] [blame] | 610 | vm_flags_t acctflag = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 611 | |
Kirill Korotaev | 4e98231 | 2006-10-02 02:18:22 -0700 | [diff] [blame] | 612 | if (size < SHMMIN || size > ns->shm_ctlmax) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 613 | return -EINVAL; |
| 614 | |
Manfred Spraul | 1376327 | 2014-06-06 14:37:41 -0700 | [diff] [blame] | 615 | if (numpages << PAGE_SHIFT < size) |
| 616 | return -ENOSPC; |
| 617 | |
Manfred Spraul | 09c6eb1 | 2014-06-06 14:37:40 -0700 | [diff] [blame] | 618 | if (ns->shm_tot + numpages < ns->shm_tot || |
| 619 | ns->shm_tot + numpages > ns->shm_ctlall) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 620 | return -ENOSPC; |
| 621 | |
Kees Cook | 42e618f | 2017-07-12 14:35:25 -0700 | [diff] [blame] | 622 | shp = kvmalloc(sizeof(*shp), GFP_KERNEL); |
| 623 | if (unlikely(!shp)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 624 | return -ENOMEM; |
| 625 | |
| 626 | shp->shm_perm.key = key; |
Andrew Morton | b33291c | 2006-01-08 01:02:21 -0800 | [diff] [blame] | 627 | shp->shm_perm.mode = (shmflg & S_IRWXUGO); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 628 | shp->mlock_user = NULL; |
| 629 | |
| 630 | shp->shm_perm.security = NULL; |
Eric W. Biederman | 7191adf | 2018-03-22 21:08:27 -0500 | [diff] [blame] | 631 | error = security_shm_alloc(&shp->shm_perm); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 632 | if (error) { |
Kees Cook | 42e618f | 2017-07-12 14:35:25 -0700 | [diff] [blame] | 633 | kvfree(shp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 634 | return error; |
| 635 | } |
| 636 | |
Manfred Spraul | 239521f | 2014-01-27 17:07:04 -0800 | [diff] [blame] | 637 | sprintf(name, "SYSV%08x", key); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 638 | if (shmflg & SHM_HUGETLB) { |
Andrew Morton | c103a4d | 2013-07-08 16:01:08 -0700 | [diff] [blame] | 639 | struct hstate *hs; |
Li Zefan | 091d0d5 | 2013-05-09 15:08:15 +0800 | [diff] [blame] | 640 | size_t hugesize; |
| 641 | |
Andrew Morton | c103a4d | 2013-07-08 16:01:08 -0700 | [diff] [blame] | 642 | hs = hstate_sizelog((shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK); |
Li Zefan | 091d0d5 | 2013-05-09 15:08:15 +0800 | [diff] [blame] | 643 | if (!hs) { |
| 644 | error = -EINVAL; |
| 645 | goto no_file; |
| 646 | } |
| 647 | hugesize = ALIGN(size, huge_page_size(hs)); |
Naoya Horiguchi | af73e4d | 2013-05-07 16:18:13 -0700 | [diff] [blame] | 648 | |
Mel Gorman | 5a6fe12 | 2009-02-10 14:02:27 +0000 | [diff] [blame] | 649 | /* hugetlb_file_setup applies strict accounting */ |
| 650 | if (shmflg & SHM_NORESERVE) |
| 651 | acctflag = VM_NORESERVE; |
Naoya Horiguchi | af73e4d | 2013-05-07 16:18:13 -0700 | [diff] [blame] | 652 | file = hugetlb_file_setup(name, hugesize, acctflag, |
Andi Kleen | 42d7395 | 2012-12-11 16:01:34 -0800 | [diff] [blame] | 653 | &shp->mlock_user, HUGETLB_SHMFS_INODE, |
| 654 | (shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 655 | } else { |
Badari Pulavarty | bf8f972 | 2005-11-07 00:59:27 -0800 | [diff] [blame] | 656 | /* |
| 657 | * Do not allow no accounting for OVERCOMMIT_NEVER, even |
Manfred Spraul | 239521f | 2014-01-27 17:07:04 -0800 | [diff] [blame] | 658 | * if it's asked for. |
Badari Pulavarty | bf8f972 | 2005-11-07 00:59:27 -0800 | [diff] [blame] | 659 | */ |
| 660 | if ((shmflg & SHM_NORESERVE) && |
| 661 | sysctl_overcommit_memory != OVERCOMMIT_NEVER) |
Linus Torvalds | fc8744a | 2009-01-31 15:08:56 -0800 | [diff] [blame] | 662 | acctflag = VM_NORESERVE; |
Stephen Smalley | e1832f2 | 2015-08-06 15:46:55 -0700 | [diff] [blame] | 663 | file = shmem_kernel_file_setup(name, size, acctflag); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 664 | } |
| 665 | error = PTR_ERR(file); |
| 666 | if (IS_ERR(file)) |
| 667 | goto no_file; |
| 668 | |
Eric W. Biederman | 98f929b | 2018-03-23 00:29:57 -0500 | [diff] [blame] | 669 | shp->shm_cprid = get_pid(task_tgid(current)); |
| 670 | shp->shm_lprid = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 671 | shp->shm_atim = shp->shm_dtim = 0; |
Deepa Dinamani | 7ff2819 | 2017-08-02 19:51:14 -0700 | [diff] [blame] | 672 | shp->shm_ctim = ktime_get_real_seconds(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 673 | shp->shm_segsz = size; |
| 674 | shp->shm_nattch = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 675 | shp->shm_file = file; |
Vasiliy Kulikov | 5774ed0 | 2011-07-29 03:55:31 +0400 | [diff] [blame] | 676 | shp->shm_creator = current; |
Linus Torvalds | b9a5322 | 2015-09-30 12:48:40 -0400 | [diff] [blame] | 677 | |
Davidlohr Bueso | 39c96a1 | 2017-11-17 15:31:11 -0800 | [diff] [blame] | 678 | /* ipc_addid() locks shp upon success. */ |
Manfred Spraul | a2642f8 | 2017-07-12 14:35:16 -0700 | [diff] [blame] | 679 | error = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni); |
| 680 | if (error < 0) |
Linus Torvalds | b9a5322 | 2015-09-30 12:48:40 -0400 | [diff] [blame] | 681 | goto no_id; |
Linus Torvalds | b9a5322 | 2015-09-30 12:48:40 -0400 | [diff] [blame] | 682 | |
Jack Miller | ab602f7 | 2014-08-08 14:23:19 -0700 | [diff] [blame] | 683 | list_add(&shp->shm_clist, ¤t->sysvshm.shm_clist); |
Davidlohr Bueso | dbfcd91 | 2013-07-08 16:01:09 -0700 | [diff] [blame] | 684 | |
Badari Pulavarty | 30475cc | 2007-06-16 10:15:59 -0700 | [diff] [blame] | 685 | /* |
| 686 | * shmid gets reported as "inode#" in /proc/pid/maps. |
| 687 | * proc-ps tools use this. Changing this will break them. |
| 688 | */ |
Al Viro | 496ad9a | 2013-01-23 17:07:38 -0500 | [diff] [blame] | 689 | file_inode(file)->i_ino = shp->shm_perm.id; |
Krishnakumar R | 551110a | 2005-10-29 18:16:45 -0700 | [diff] [blame] | 690 | |
Kirill Korotaev | 4e98231 | 2006-10-02 02:18:22 -0700 | [diff] [blame] | 691 | ns->shm_tot += numpages; |
Nadia Derbey | 7ca7e56 | 2007-10-18 23:40:48 -0700 | [diff] [blame] | 692 | error = shp->shm_perm.id; |
Davidlohr Bueso | dbfcd91 | 2013-07-08 16:01:09 -0700 | [diff] [blame] | 693 | |
Davidlohr Bueso | cf9d5d7 | 2013-07-08 16:01:11 -0700 | [diff] [blame] | 694 | ipc_unlock_object(&shp->shm_perm); |
Davidlohr Bueso | dbfcd91 | 2013-07-08 16:01:09 -0700 | [diff] [blame] | 695 | rcu_read_unlock(); |
Nadia Derbey | 7ca7e56 | 2007-10-18 23:40:48 -0700 | [diff] [blame] | 696 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 697 | |
| 698 | no_id: |
Eric W. Biederman | 2236d4d | 2018-03-28 13:38:55 -0500 | [diff] [blame] | 699 | ipc_update_pid(&shp->shm_cprid, NULL); |
| 700 | ipc_update_pid(&shp->shm_lprid, NULL); |
Hugh Dickins | 2195d28 | 2009-09-12 12:21:27 +0100 | [diff] [blame] | 701 | if (is_file_hugepages(file) && shp->mlock_user) |
Hugh Dickins | 353d5c3 | 2009-08-24 16:30:28 +0100 | [diff] [blame] | 702 | user_shm_unlock(size, shp->mlock_user); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 703 | fput(file); |
Manfred Spraul | 39cfffd | 2018-08-21 22:01:29 -0700 | [diff] [blame] | 704 | ipc_rcu_putref(&shp->shm_perm, shm_rcu_free); |
| 705 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 706 | no_file: |
Manfred Spraul | a2642f8 | 2017-07-12 14:35:16 -0700 | [diff] [blame] | 707 | call_rcu(&shp->shm_perm.rcu, shm_rcu_free); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 708 | return error; |
| 709 | } |
| 710 | |
Nadia Derbey | f4566f0 | 2007-10-18 23:40:53 -0700 | [diff] [blame] | 711 | /* |
Davidlohr Bueso | d9a605e | 2013-09-11 14:26:24 -0700 | [diff] [blame] | 712 | * Called with shm_ids.rwsem and ipcp locked. |
Nadia Derbey | f4566f0 | 2007-10-18 23:40:53 -0700 | [diff] [blame] | 713 | */ |
Nadia Derbey | 03f02c7 | 2007-10-18 23:40:51 -0700 | [diff] [blame] | 714 | static inline int shm_more_checks(struct kern_ipc_perm *ipcp, |
| 715 | struct ipc_params *params) |
Nadia Derbey | 7748dbf | 2007-10-18 23:40:49 -0700 | [diff] [blame] | 716 | { |
Nadia Derbey | 03f02c7 | 2007-10-18 23:40:51 -0700 | [diff] [blame] | 717 | struct shmid_kernel *shp; |
| 718 | |
| 719 | shp = container_of(ipcp, struct shmid_kernel, shm_perm); |
| 720 | if (shp->shm_segsz < params->u.size) |
Nadia Derbey | 7748dbf | 2007-10-18 23:40:49 -0700 | [diff] [blame] | 721 | return -EINVAL; |
| 722 | |
| 723 | return 0; |
| 724 | } |
| 725 | |
Dominik Brodowski | 65749e0 | 2018-03-20 20:07:53 +0100 | [diff] [blame] | 726 | long ksys_shmget(key_t key, size_t size, int shmflg) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 727 | { |
Kirill Korotaev | 4e98231 | 2006-10-02 02:18:22 -0700 | [diff] [blame] | 728 | struct ipc_namespace *ns; |
Mathias Krause | eb66ec4 | 2014-06-06 14:37:36 -0700 | [diff] [blame] | 729 | static const struct ipc_ops shm_ops = { |
| 730 | .getnew = newseg, |
Eric W. Biederman | 50ab44b | 2018-03-23 23:41:55 -0500 | [diff] [blame] | 731 | .associate = security_shm_associate, |
Mathias Krause | eb66ec4 | 2014-06-06 14:37:36 -0700 | [diff] [blame] | 732 | .more_checks = shm_more_checks, |
| 733 | }; |
Nadia Derbey | 7748dbf | 2007-10-18 23:40:49 -0700 | [diff] [blame] | 734 | struct ipc_params shm_params; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 735 | |
Kirill Korotaev | 4e98231 | 2006-10-02 02:18:22 -0700 | [diff] [blame] | 736 | ns = current->nsproxy->ipc_ns; |
| 737 | |
Nadia Derbey | 7748dbf | 2007-10-18 23:40:49 -0700 | [diff] [blame] | 738 | shm_params.key = key; |
| 739 | shm_params.flg = shmflg; |
| 740 | shm_params.u.size = size; |
Nadia Derbey | 7ca7e56 | 2007-10-18 23:40:48 -0700 | [diff] [blame] | 741 | |
Nadia Derbey | 7748dbf | 2007-10-18 23:40:49 -0700 | [diff] [blame] | 742 | return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 743 | } |
| 744 | |
Dominik Brodowski | 65749e0 | 2018-03-20 20:07:53 +0100 | [diff] [blame] | 745 | SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg) |
| 746 | { |
| 747 | return ksys_shmget(key, size, shmflg); |
| 748 | } |
| 749 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 750 | static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version) |
| 751 | { |
Manfred Spraul | 239521f | 2014-01-27 17:07:04 -0800 | [diff] [blame] | 752 | switch (version) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 753 | case IPC_64: |
| 754 | return copy_to_user(buf, in, sizeof(*in)); |
| 755 | case IPC_OLD: |
| 756 | { |
| 757 | struct shmid_ds out; |
| 758 | |
Vasiliy Kulikov | 3af54c9 | 2010-10-30 18:22:49 +0400 | [diff] [blame] | 759 | memset(&out, 0, sizeof(out)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 760 | ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm); |
| 761 | out.shm_segsz = in->shm_segsz; |
| 762 | out.shm_atime = in->shm_atime; |
| 763 | out.shm_dtime = in->shm_dtime; |
| 764 | out.shm_ctime = in->shm_ctime; |
| 765 | out.shm_cpid = in->shm_cpid; |
| 766 | out.shm_lpid = in->shm_lpid; |
| 767 | out.shm_nattch = in->shm_nattch; |
| 768 | |
| 769 | return copy_to_user(buf, &out, sizeof(out)); |
| 770 | } |
| 771 | default: |
| 772 | return -EINVAL; |
| 773 | } |
| 774 | } |
| 775 | |
Pierre Peiffer | 016d713 | 2008-04-29 01:00:50 -0700 | [diff] [blame] | 776 | static inline unsigned long |
| 777 | copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 778 | { |
Manfred Spraul | 239521f | 2014-01-27 17:07:04 -0800 | [diff] [blame] | 779 | switch (version) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 780 | case IPC_64: |
Pierre Peiffer | 016d713 | 2008-04-29 01:00:50 -0700 | [diff] [blame] | 781 | if (copy_from_user(out, buf, sizeof(*out))) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 782 | return -EFAULT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 783 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 784 | case IPC_OLD: |
| 785 | { |
| 786 | struct shmid_ds tbuf_old; |
| 787 | |
| 788 | if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old))) |
| 789 | return -EFAULT; |
| 790 | |
Pierre Peiffer | 016d713 | 2008-04-29 01:00:50 -0700 | [diff] [blame] | 791 | out->shm_perm.uid = tbuf_old.shm_perm.uid; |
| 792 | out->shm_perm.gid = tbuf_old.shm_perm.gid; |
| 793 | out->shm_perm.mode = tbuf_old.shm_perm.mode; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 794 | |
| 795 | return 0; |
| 796 | } |
| 797 | default: |
| 798 | return -EINVAL; |
| 799 | } |
| 800 | } |
| 801 | |
| 802 | static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version) |
| 803 | { |
Manfred Spraul | 239521f | 2014-01-27 17:07:04 -0800 | [diff] [blame] | 804 | switch (version) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 805 | case IPC_64: |
| 806 | return copy_to_user(buf, in, sizeof(*in)); |
| 807 | case IPC_OLD: |
| 808 | { |
| 809 | struct shminfo out; |
| 810 | |
Manfred Spraul | 239521f | 2014-01-27 17:07:04 -0800 | [diff] [blame] | 811 | if (in->shmmax > INT_MAX) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 812 | out.shmmax = INT_MAX; |
| 813 | else |
| 814 | out.shmmax = (int)in->shmmax; |
| 815 | |
| 816 | out.shmmin = in->shmmin; |
| 817 | out.shmmni = in->shmmni; |
| 818 | out.shmseg = in->shmseg; |
Paul McQuade | 46c0a8c | 2014-06-06 14:37:37 -0700 | [diff] [blame] | 819 | out.shmall = in->shmall; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 820 | |
| 821 | return copy_to_user(buf, &out, sizeof(out)); |
| 822 | } |
| 823 | default: |
| 824 | return -EINVAL; |
| 825 | } |
| 826 | } |
| 827 | |
Nadia Derbey | f4566f0 | 2007-10-18 23:40:53 -0700 | [diff] [blame] | 828 | /* |
Helge Deller | b795218 | 2010-10-27 15:34:16 -0700 | [diff] [blame] | 829 | * Calculate and add used RSS and swap pages of a shm. |
Davidlohr Bueso | d9a605e | 2013-09-11 14:26:24 -0700 | [diff] [blame] | 830 | * Called with shm_ids.rwsem held as a reader |
Helge Deller | b795218 | 2010-10-27 15:34:16 -0700 | [diff] [blame] | 831 | */ |
| 832 | static void shm_add_rss_swap(struct shmid_kernel *shp, |
| 833 | unsigned long *rss_add, unsigned long *swp_add) |
| 834 | { |
| 835 | struct inode *inode; |
| 836 | |
Al Viro | 496ad9a | 2013-01-23 17:07:38 -0500 | [diff] [blame] | 837 | inode = file_inode(shp->shm_file); |
Helge Deller | b795218 | 2010-10-27 15:34:16 -0700 | [diff] [blame] | 838 | |
| 839 | if (is_file_hugepages(shp->shm_file)) { |
| 840 | struct address_space *mapping = inode->i_mapping; |
| 841 | struct hstate *h = hstate_file(shp->shm_file); |
| 842 | *rss_add += pages_per_huge_page(h) * mapping->nrpages; |
| 843 | } else { |
| 844 | #ifdef CONFIG_SHMEM |
| 845 | struct shmem_inode_info *info = SHMEM_I(inode); |
Shailesh Pandey | 63980c8 | 2016-12-14 15:06:10 -0800 | [diff] [blame] | 846 | |
Kirill A. Shutemov | 4595ef8 | 2016-07-26 15:26:29 -0700 | [diff] [blame] | 847 | spin_lock_irq(&info->lock); |
Helge Deller | b795218 | 2010-10-27 15:34:16 -0700 | [diff] [blame] | 848 | *rss_add += inode->i_mapping->nrpages; |
| 849 | *swp_add += info->swapped; |
Kirill A. Shutemov | 4595ef8 | 2016-07-26 15:26:29 -0700 | [diff] [blame] | 850 | spin_unlock_irq(&info->lock); |
Helge Deller | b795218 | 2010-10-27 15:34:16 -0700 | [diff] [blame] | 851 | #else |
| 852 | *rss_add += inode->i_mapping->nrpages; |
| 853 | #endif |
| 854 | } |
| 855 | } |
| 856 | |
| 857 | /* |
Davidlohr Bueso | d9a605e | 2013-09-11 14:26:24 -0700 | [diff] [blame] | 858 | * Called with shm_ids.rwsem held as a reader |
Nadia Derbey | f4566f0 | 2007-10-18 23:40:53 -0700 | [diff] [blame] | 859 | */ |
Kirill Korotaev | 4e98231 | 2006-10-02 02:18:22 -0700 | [diff] [blame] | 860 | static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss, |
| 861 | unsigned long *swp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 862 | { |
Nadia Derbey | 7ca7e56 | 2007-10-18 23:40:48 -0700 | [diff] [blame] | 863 | int next_id; |
| 864 | int total, in_use; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 865 | |
| 866 | *rss = 0; |
| 867 | *swp = 0; |
| 868 | |
Nadia Derbey | 7ca7e56 | 2007-10-18 23:40:48 -0700 | [diff] [blame] | 869 | in_use = shm_ids(ns).in_use; |
| 870 | |
| 871 | for (total = 0, next_id = 0; total < in_use; next_id++) { |
Tony Battersby | e562aeb | 2009-04-02 16:58:26 -0700 | [diff] [blame] | 872 | struct kern_ipc_perm *ipc; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 873 | struct shmid_kernel *shp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 874 | |
Tony Battersby | e562aeb | 2009-04-02 16:58:26 -0700 | [diff] [blame] | 875 | ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id); |
| 876 | if (ipc == NULL) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 877 | continue; |
Tony Battersby | e562aeb | 2009-04-02 16:58:26 -0700 | [diff] [blame] | 878 | shp = container_of(ipc, struct shmid_kernel, shm_perm); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 879 | |
Helge Deller | b795218 | 2010-10-27 15:34:16 -0700 | [diff] [blame] | 880 | shm_add_rss_swap(shp, rss, swp); |
Nadia Derbey | 7ca7e56 | 2007-10-18 23:40:48 -0700 | [diff] [blame] | 881 | |
| 882 | total++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 883 | } |
| 884 | } |
| 885 | |
Pierre Peiffer | 8d4cc8b | 2008-04-29 01:00:47 -0700 | [diff] [blame] | 886 | /* |
Davidlohr Bueso | d9a605e | 2013-09-11 14:26:24 -0700 | [diff] [blame] | 887 | * This function handles some shmctl commands which require the rwsem |
Pierre Peiffer | 8d4cc8b | 2008-04-29 01:00:47 -0700 | [diff] [blame] | 888 | * to be held in write mode. |
Davidlohr Bueso | d9a605e | 2013-09-11 14:26:24 -0700 | [diff] [blame] | 889 | * NOTE: no locks must be held, the rwsem is taken inside this function. |
Pierre Peiffer | 8d4cc8b | 2008-04-29 01:00:47 -0700 | [diff] [blame] | 890 | */ |
| 891 | static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd, |
Al Viro | 9ba720c | 2017-07-08 20:58:06 -0400 | [diff] [blame] | 892 | struct shmid64_ds *shmid64) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 893 | { |
Pierre Peiffer | 8d4cc8b | 2008-04-29 01:00:47 -0700 | [diff] [blame] | 894 | struct kern_ipc_perm *ipcp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 895 | struct shmid_kernel *shp; |
Pierre Peiffer | 8d4cc8b | 2008-04-29 01:00:47 -0700 | [diff] [blame] | 896 | int err; |
| 897 | |
Davidlohr Bueso | d9a605e | 2013-09-11 14:26:24 -0700 | [diff] [blame] | 898 | down_write(&shm_ids(ns).rwsem); |
Davidlohr Bueso | 7b4cc5d | 2013-07-08 16:01:12 -0700 | [diff] [blame] | 899 | rcu_read_lock(); |
| 900 | |
Manfred Spraul | 4241c1a | 2018-08-21 22:01:34 -0700 | [diff] [blame] | 901 | ipcp = ipcctl_obtain_check(ns, &shm_ids(ns), shmid, cmd, |
Al Viro | 9ba720c | 2017-07-08 20:58:06 -0400 | [diff] [blame] | 902 | &shmid64->shm_perm, 0); |
Davidlohr Bueso | 7b4cc5d | 2013-07-08 16:01:12 -0700 | [diff] [blame] | 903 | if (IS_ERR(ipcp)) { |
| 904 | err = PTR_ERR(ipcp); |
Davidlohr Bueso | 7b4cc5d | 2013-07-08 16:01:12 -0700 | [diff] [blame] | 905 | goto out_unlock1; |
| 906 | } |
Pierre Peiffer | 8d4cc8b | 2008-04-29 01:00:47 -0700 | [diff] [blame] | 907 | |
Pierre Peiffer | a5f75e7 | 2008-04-29 01:00:54 -0700 | [diff] [blame] | 908 | shp = container_of(ipcp, struct shmid_kernel, shm_perm); |
Pierre Peiffer | 8d4cc8b | 2008-04-29 01:00:47 -0700 | [diff] [blame] | 909 | |
Eric W. Biederman | 7191adf | 2018-03-22 21:08:27 -0500 | [diff] [blame] | 910 | err = security_shm_shmctl(&shp->shm_perm, cmd); |
Pierre Peiffer | 8d4cc8b | 2008-04-29 01:00:47 -0700 | [diff] [blame] | 911 | if (err) |
Davidlohr Bueso | 79ccf0f | 2013-09-11 14:26:16 -0700 | [diff] [blame] | 912 | goto out_unlock1; |
Davidlohr Bueso | 7b4cc5d | 2013-07-08 16:01:12 -0700 | [diff] [blame] | 913 | |
Pierre Peiffer | 8d4cc8b | 2008-04-29 01:00:47 -0700 | [diff] [blame] | 914 | switch (cmd) { |
| 915 | case IPC_RMID: |
Davidlohr Bueso | 79ccf0f | 2013-09-11 14:26:16 -0700 | [diff] [blame] | 916 | ipc_lock_object(&shp->shm_perm); |
Davidlohr Bueso | 7b4cc5d | 2013-07-08 16:01:12 -0700 | [diff] [blame] | 917 | /* do_shm_rmid unlocks the ipc object and rcu */ |
Pierre Peiffer | 8d4cc8b | 2008-04-29 01:00:47 -0700 | [diff] [blame] | 918 | do_shm_rmid(ns, ipcp); |
| 919 | goto out_up; |
| 920 | case IPC_SET: |
Davidlohr Bueso | 79ccf0f | 2013-09-11 14:26:16 -0700 | [diff] [blame] | 921 | ipc_lock_object(&shp->shm_perm); |
Al Viro | 9ba720c | 2017-07-08 20:58:06 -0400 | [diff] [blame] | 922 | err = ipc_update_perm(&shmid64->shm_perm, ipcp); |
Eric W. Biederman | 1efdb69 | 2012-02-07 16:54:11 -0800 | [diff] [blame] | 923 | if (err) |
Davidlohr Bueso | 7b4cc5d | 2013-07-08 16:01:12 -0700 | [diff] [blame] | 924 | goto out_unlock0; |
Deepa Dinamani | 7ff2819 | 2017-08-02 19:51:14 -0700 | [diff] [blame] | 925 | shp->shm_ctim = ktime_get_real_seconds(); |
Pierre Peiffer | 8d4cc8b | 2008-04-29 01:00:47 -0700 | [diff] [blame] | 926 | break; |
| 927 | default: |
| 928 | err = -EINVAL; |
Davidlohr Bueso | 79ccf0f | 2013-09-11 14:26:16 -0700 | [diff] [blame] | 929 | goto out_unlock1; |
Pierre Peiffer | 8d4cc8b | 2008-04-29 01:00:47 -0700 | [diff] [blame] | 930 | } |
Davidlohr Bueso | 7b4cc5d | 2013-07-08 16:01:12 -0700 | [diff] [blame] | 931 | |
| 932 | out_unlock0: |
| 933 | ipc_unlock_object(&shp->shm_perm); |
| 934 | out_unlock1: |
| 935 | rcu_read_unlock(); |
Pierre Peiffer | 8d4cc8b | 2008-04-29 01:00:47 -0700 | [diff] [blame] | 936 | out_up: |
Davidlohr Bueso | d9a605e | 2013-09-11 14:26:24 -0700 | [diff] [blame] | 937 | up_write(&shm_ids(ns).rwsem); |
Pierre Peiffer | 8d4cc8b | 2008-04-29 01:00:47 -0700 | [diff] [blame] | 938 | return err; |
| 939 | } |
| 940 | |
Al Viro | 9ba720c | 2017-07-08 20:58:06 -0400 | [diff] [blame] | 941 | static int shmctl_ipc_info(struct ipc_namespace *ns, |
| 942 | struct shminfo64 *shminfo) |
Pierre Peiffer | 8d4cc8b | 2008-04-29 01:00:47 -0700 | [diff] [blame] | 943 | { |
Al Viro | 9ba720c | 2017-07-08 20:58:06 -0400 | [diff] [blame] | 944 | int err = security_shm_shmctl(NULL, IPC_INFO); |
| 945 | if (!err) { |
| 946 | memset(shminfo, 0, sizeof(*shminfo)); |
| 947 | shminfo->shmmni = shminfo->shmseg = ns->shm_ctlmni; |
| 948 | shminfo->shmmax = ns->shm_ctlmax; |
| 949 | shminfo->shmall = ns->shm_ctlall; |
| 950 | shminfo->shmmin = SHMMIN; |
Davidlohr Bueso | d9a605e | 2013-09-11 14:26:24 -0700 | [diff] [blame] | 951 | down_read(&shm_ids(ns).rwsem); |
Manfred Spraul | 27c331a | 2018-08-21 22:02:00 -0700 | [diff] [blame] | 952 | err = ipc_get_maxidx(&shm_ids(ns)); |
Davidlohr Bueso | d9a605e | 2013-09-11 14:26:24 -0700 | [diff] [blame] | 953 | up_read(&shm_ids(ns).rwsem); |
Manfred Spraul | 239521f | 2014-01-27 17:07:04 -0800 | [diff] [blame] | 954 | if (err < 0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 955 | err = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 956 | } |
Al Viro | 9ba720c | 2017-07-08 20:58:06 -0400 | [diff] [blame] | 957 | return err; |
| 958 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 959 | |
Al Viro | 9ba720c | 2017-07-08 20:58:06 -0400 | [diff] [blame] | 960 | static int shmctl_shm_info(struct ipc_namespace *ns, |
| 961 | struct shm_info *shm_info) |
| 962 | { |
| 963 | int err = security_shm_shmctl(NULL, SHM_INFO); |
| 964 | if (!err) { |
| 965 | memset(shm_info, 0, sizeof(*shm_info)); |
Davidlohr Bueso | d9a605e | 2013-09-11 14:26:24 -0700 | [diff] [blame] | 966 | down_read(&shm_ids(ns).rwsem); |
Al Viro | 9ba720c | 2017-07-08 20:58:06 -0400 | [diff] [blame] | 967 | shm_info->used_ids = shm_ids(ns).in_use; |
| 968 | shm_get_stat(ns, &shm_info->shm_rss, &shm_info->shm_swp); |
| 969 | shm_info->shm_tot = ns->shm_tot; |
| 970 | shm_info->swap_attempts = 0; |
| 971 | shm_info->swap_successes = 0; |
Manfred Spraul | 27c331a | 2018-08-21 22:02:00 -0700 | [diff] [blame] | 972 | err = ipc_get_maxidx(&shm_ids(ns)); |
Davidlohr Bueso | d9a605e | 2013-09-11 14:26:24 -0700 | [diff] [blame] | 973 | up_read(&shm_ids(ns).rwsem); |
Al Viro | 9ba720c | 2017-07-08 20:58:06 -0400 | [diff] [blame] | 974 | if (err < 0) |
| 975 | err = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 976 | } |
Al Viro | 9ba720c | 2017-07-08 20:58:06 -0400 | [diff] [blame] | 977 | return err; |
| 978 | } |
Nadia Derbey | 023a535 | 2007-10-18 23:40:51 -0700 | [diff] [blame] | 979 | |
Al Viro | 9ba720c | 2017-07-08 20:58:06 -0400 | [diff] [blame] | 980 | static int shmctl_stat(struct ipc_namespace *ns, int shmid, |
| 981 | int cmd, struct shmid64_ds *tbuf) |
| 982 | { |
| 983 | struct shmid_kernel *shp; |
Al Viro | 9ba720c | 2017-07-08 20:58:06 -0400 | [diff] [blame] | 984 | int err; |
Davidlohr Bueso | c97cb9c | 2013-09-11 14:26:20 -0700 | [diff] [blame] | 985 | |
Philippe Mikoyan | 87ad4b0 | 2018-02-06 15:40:49 -0800 | [diff] [blame] | 986 | memset(tbuf, 0, sizeof(*tbuf)); |
| 987 | |
Al Viro | 9ba720c | 2017-07-08 20:58:06 -0400 | [diff] [blame] | 988 | rcu_read_lock(); |
Davidlohr Bueso | c21a697 | 2018-04-10 16:35:23 -0700 | [diff] [blame] | 989 | if (cmd == SHM_STAT || cmd == SHM_STAT_ANY) { |
Al Viro | 9ba720c | 2017-07-08 20:58:06 -0400 | [diff] [blame] | 990 | shp = shm_obtain_object(ns, shmid); |
| 991 | if (IS_ERR(shp)) { |
| 992 | err = PTR_ERR(shp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 993 | goto out_unlock; |
Al Viro | 9ba720c | 2017-07-08 20:58:06 -0400 | [diff] [blame] | 994 | } |
Davidlohr Bueso | c21a697 | 2018-04-10 16:35:23 -0700 | [diff] [blame] | 995 | } else { /* IPC_STAT */ |
Al Viro | 9ba720c | 2017-07-08 20:58:06 -0400 | [diff] [blame] | 996 | shp = shm_obtain_object_check(ns, shmid); |
| 997 | if (IS_ERR(shp)) { |
| 998 | err = PTR_ERR(shp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 999 | goto out_unlock; |
Al Viro | 9ba720c | 2017-07-08 20:58:06 -0400 | [diff] [blame] | 1000 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1001 | } |
Al Viro | 9ba720c | 2017-07-08 20:58:06 -0400 | [diff] [blame] | 1002 | |
Davidlohr Bueso | c21a697 | 2018-04-10 16:35:23 -0700 | [diff] [blame] | 1003 | /* |
| 1004 | * Semantically SHM_STAT_ANY ought to be identical to |
| 1005 | * that functionality provided by the /proc/sysvipc/ |
| 1006 | * interface. As such, only audit these calls and |
| 1007 | * do not do traditional S_IRUGO permission checks on |
| 1008 | * the ipc object. |
| 1009 | */ |
| 1010 | if (cmd == SHM_STAT_ANY) |
| 1011 | audit_ipc_obj(&shp->shm_perm); |
| 1012 | else { |
| 1013 | err = -EACCES; |
| 1014 | if (ipcperms(ns, &shp->shm_perm, S_IRUGO)) |
| 1015 | goto out_unlock; |
| 1016 | } |
Al Viro | 9ba720c | 2017-07-08 20:58:06 -0400 | [diff] [blame] | 1017 | |
Eric W. Biederman | 7191adf | 2018-03-22 21:08:27 -0500 | [diff] [blame] | 1018 | err = security_shm_shmctl(&shp->shm_perm, cmd); |
Al Viro | 9ba720c | 2017-07-08 20:58:06 -0400 | [diff] [blame] | 1019 | if (err) |
| 1020 | goto out_unlock; |
| 1021 | |
Philippe Mikoyan | 87ad4b0 | 2018-02-06 15:40:49 -0800 | [diff] [blame] | 1022 | ipc_lock_object(&shp->shm_perm); |
| 1023 | |
| 1024 | if (!ipc_valid_object(&shp->shm_perm)) { |
| 1025 | ipc_unlock_object(&shp->shm_perm); |
| 1026 | err = -EIDRM; |
| 1027 | goto out_unlock; |
| 1028 | } |
| 1029 | |
Al Viro | 9ba720c | 2017-07-08 20:58:06 -0400 | [diff] [blame] | 1030 | kernel_to_ipc64_perm(&shp->shm_perm, &tbuf->shm_perm); |
| 1031 | tbuf->shm_segsz = shp->shm_segsz; |
| 1032 | tbuf->shm_atime = shp->shm_atim; |
| 1033 | tbuf->shm_dtime = shp->shm_dtim; |
| 1034 | tbuf->shm_ctime = shp->shm_ctim; |
Arnd Bergmann | c2ab975 | 2015-04-28 21:39:50 +0200 | [diff] [blame] | 1035 | #ifndef CONFIG_64BIT |
| 1036 | tbuf->shm_atime_high = shp->shm_atim >> 32; |
| 1037 | tbuf->shm_dtime_high = shp->shm_dtim >> 32; |
| 1038 | tbuf->shm_ctime_high = shp->shm_ctim >> 32; |
| 1039 | #endif |
Eric W. Biederman | 98f929b | 2018-03-23 00:29:57 -0500 | [diff] [blame] | 1040 | tbuf->shm_cpid = pid_vnr(shp->shm_cprid); |
| 1041 | tbuf->shm_lpid = pid_vnr(shp->shm_lprid); |
Al Viro | 9ba720c | 2017-07-08 20:58:06 -0400 | [diff] [blame] | 1042 | tbuf->shm_nattch = shp->shm_nattch; |
Philippe Mikoyan | 87ad4b0 | 2018-02-06 15:40:49 -0800 | [diff] [blame] | 1043 | |
Manfred Spraul | 615c999 | 2018-08-21 22:01:21 -0700 | [diff] [blame] | 1044 | if (cmd == IPC_STAT) { |
| 1045 | /* |
| 1046 | * As defined in SUS: |
| 1047 | * Return 0 on success |
| 1048 | */ |
| 1049 | err = 0; |
| 1050 | } else { |
| 1051 | /* |
| 1052 | * SHM_STAT and SHM_STAT_ANY (both Linux specific) |
| 1053 | * Return the full id, including the sequence number |
| 1054 | */ |
| 1055 | err = shp->shm_perm.id; |
| 1056 | } |
Davidlohr Bueso | 68eccc1 | 2013-09-11 14:26:18 -0700 | [diff] [blame] | 1057 | |
Manfred Spraul | 615c999 | 2018-08-21 22:01:21 -0700 | [diff] [blame] | 1058 | ipc_unlock_object(&shp->shm_perm); |
Davidlohr Bueso | 68eccc1 | 2013-09-11 14:26:18 -0700 | [diff] [blame] | 1059 | out_unlock: |
Davidlohr Bueso | c97cb9c | 2013-09-11 14:26:20 -0700 | [diff] [blame] | 1060 | rcu_read_unlock(); |
Al Viro | 9ba720c | 2017-07-08 20:58:06 -0400 | [diff] [blame] | 1061 | return err; |
| 1062 | } |
| 1063 | |
| 1064 | static int shmctl_do_lock(struct ipc_namespace *ns, int shmid, int cmd) |
| 1065 | { |
| 1066 | struct shmid_kernel *shp; |
| 1067 | struct file *shm_file; |
| 1068 | int err; |
| 1069 | |
| 1070 | rcu_read_lock(); |
| 1071 | shp = shm_obtain_object_check(ns, shmid); |
| 1072 | if (IS_ERR(shp)) { |
| 1073 | err = PTR_ERR(shp); |
| 1074 | goto out_unlock1; |
| 1075 | } |
| 1076 | |
| 1077 | audit_ipc_obj(&(shp->shm_perm)); |
Eric W. Biederman | 7191adf | 2018-03-22 21:08:27 -0500 | [diff] [blame] | 1078 | err = security_shm_shmctl(&shp->shm_perm, cmd); |
Al Viro | 9ba720c | 2017-07-08 20:58:06 -0400 | [diff] [blame] | 1079 | if (err) |
| 1080 | goto out_unlock1; |
| 1081 | |
| 1082 | ipc_lock_object(&shp->shm_perm); |
| 1083 | |
| 1084 | /* check if shm_destroy() is tearing down shp */ |
| 1085 | if (!ipc_valid_object(&shp->shm_perm)) { |
| 1086 | err = -EIDRM; |
| 1087 | goto out_unlock0; |
| 1088 | } |
| 1089 | |
| 1090 | if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) { |
| 1091 | kuid_t euid = current_euid(); |
| 1092 | |
| 1093 | if (!uid_eq(euid, shp->shm_perm.uid) && |
| 1094 | !uid_eq(euid, shp->shm_perm.cuid)) { |
| 1095 | err = -EPERM; |
| 1096 | goto out_unlock0; |
| 1097 | } |
| 1098 | if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK)) { |
| 1099 | err = -EPERM; |
| 1100 | goto out_unlock0; |
| 1101 | } |
| 1102 | } |
| 1103 | |
| 1104 | shm_file = shp->shm_file; |
| 1105 | if (is_file_hugepages(shm_file)) |
| 1106 | goto out_unlock0; |
| 1107 | |
| 1108 | if (cmd == SHM_LOCK) { |
| 1109 | struct user_struct *user = current_user(); |
| 1110 | |
| 1111 | err = shmem_lock(shm_file, 1, user); |
| 1112 | if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) { |
| 1113 | shp->shm_perm.mode |= SHM_LOCKED; |
| 1114 | shp->mlock_user = user; |
| 1115 | } |
| 1116 | goto out_unlock0; |
| 1117 | } |
| 1118 | |
| 1119 | /* SHM_UNLOCK */ |
| 1120 | if (!(shp->shm_perm.mode & SHM_LOCKED)) |
| 1121 | goto out_unlock0; |
| 1122 | shmem_lock(shm_file, 0, shp->mlock_user); |
| 1123 | shp->shm_perm.mode &= ~SHM_LOCKED; |
| 1124 | shp->mlock_user = NULL; |
| 1125 | get_file(shm_file); |
| 1126 | ipc_unlock_object(&shp->shm_perm); |
| 1127 | rcu_read_unlock(); |
| 1128 | shmem_unlock_mapping(shm_file->f_mapping); |
| 1129 | |
| 1130 | fput(shm_file); |
| 1131 | return err; |
| 1132 | |
| 1133 | out_unlock0: |
| 1134 | ipc_unlock_object(&shp->shm_perm); |
| 1135 | out_unlock1: |
| 1136 | rcu_read_unlock(); |
Davidlohr Bueso | 68eccc1 | 2013-09-11 14:26:18 -0700 | [diff] [blame] | 1137 | return err; |
| 1138 | } |
| 1139 | |
Arnd Bergmann | 275f221 | 2018-12-31 22:22:40 +0100 | [diff] [blame] | 1140 | static long ksys_shmctl(int shmid, int cmd, struct shmid_ds __user *buf, int version) |
Davidlohr Bueso | 68eccc1 | 2013-09-11 14:26:18 -0700 | [diff] [blame] | 1141 | { |
Arnd Bergmann | 275f221 | 2018-12-31 22:22:40 +0100 | [diff] [blame] | 1142 | int err; |
Davidlohr Bueso | 68eccc1 | 2013-09-11 14:26:18 -0700 | [diff] [blame] | 1143 | struct ipc_namespace *ns; |
Al Viro | 553f770 | 2017-07-08 22:52:47 -0400 | [diff] [blame] | 1144 | struct shmid64_ds sem64; |
Davidlohr Bueso | 68eccc1 | 2013-09-11 14:26:18 -0700 | [diff] [blame] | 1145 | |
Davidlohr Bueso | 2caacaa | 2013-09-11 14:26:21 -0700 | [diff] [blame] | 1146 | if (cmd < 0 || shmid < 0) |
| 1147 | return -EINVAL; |
Davidlohr Bueso | 68eccc1 | 2013-09-11 14:26:18 -0700 | [diff] [blame] | 1148 | |
Davidlohr Bueso | 68eccc1 | 2013-09-11 14:26:18 -0700 | [diff] [blame] | 1149 | ns = current->nsproxy->ipc_ns; |
| 1150 | |
| 1151 | switch (cmd) { |
Al Viro | 9ba720c | 2017-07-08 20:58:06 -0400 | [diff] [blame] | 1152 | case IPC_INFO: { |
| 1153 | struct shminfo64 shminfo; |
| 1154 | err = shmctl_ipc_info(ns, &shminfo); |
| 1155 | if (err < 0) |
| 1156 | return err; |
| 1157 | if (copy_shminfo_to_user(buf, &shminfo, version)) |
| 1158 | err = -EFAULT; |
Pierre Peiffer | 8d4cc8b | 2008-04-29 01:00:47 -0700 | [diff] [blame] | 1159 | return err; |
Davidlohr Bueso | 2caacaa | 2013-09-11 14:26:21 -0700 | [diff] [blame] | 1160 | } |
Al Viro | 9ba720c | 2017-07-08 20:58:06 -0400 | [diff] [blame] | 1161 | case SHM_INFO: { |
| 1162 | struct shm_info shm_info; |
| 1163 | err = shmctl_shm_info(ns, &shm_info); |
| 1164 | if (err < 0) |
| 1165 | return err; |
| 1166 | if (copy_to_user(buf, &shm_info, sizeof(shm_info))) |
| 1167 | err = -EFAULT; |
| 1168 | return err; |
| 1169 | } |
| 1170 | case SHM_STAT: |
Davidlohr Bueso | c21a697 | 2018-04-10 16:35:23 -0700 | [diff] [blame] | 1171 | case SHM_STAT_ANY: |
Al Viro | 9ba720c | 2017-07-08 20:58:06 -0400 | [diff] [blame] | 1172 | case IPC_STAT: { |
Al Viro | 553f770 | 2017-07-08 22:52:47 -0400 | [diff] [blame] | 1173 | err = shmctl_stat(ns, shmid, cmd, &sem64); |
Al Viro | 9ba720c | 2017-07-08 20:58:06 -0400 | [diff] [blame] | 1174 | if (err < 0) |
| 1175 | return err; |
Al Viro | 553f770 | 2017-07-08 22:52:47 -0400 | [diff] [blame] | 1176 | if (copy_shmid_to_user(buf, &sem64, version)) |
Al Viro | 9ba720c | 2017-07-08 20:58:06 -0400 | [diff] [blame] | 1177 | err = -EFAULT; |
| 1178 | return err; |
| 1179 | } |
| 1180 | case IPC_SET: |
Al Viro | 553f770 | 2017-07-08 22:52:47 -0400 | [diff] [blame] | 1181 | if (copy_shmid_from_user(&sem64, buf, version)) |
Al Viro | 9ba720c | 2017-07-08 20:58:06 -0400 | [diff] [blame] | 1182 | return -EFAULT; |
Al Viro | 553f770 | 2017-07-08 22:52:47 -0400 | [diff] [blame] | 1183 | /* fallthru */ |
Al Viro | 9ba720c | 2017-07-08 20:58:06 -0400 | [diff] [blame] | 1184 | case IPC_RMID: |
Al Viro | 553f770 | 2017-07-08 22:52:47 -0400 | [diff] [blame] | 1185 | return shmctl_down(ns, shmid, cmd, &sem64); |
Al Viro | 9ba720c | 2017-07-08 20:58:06 -0400 | [diff] [blame] | 1186 | case SHM_LOCK: |
| 1187 | case SHM_UNLOCK: |
| 1188 | return shmctl_do_lock(ns, shmid, cmd); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1189 | default: |
Pierre Peiffer | 8d4cc8b | 2008-04-29 01:00:47 -0700 | [diff] [blame] | 1190 | return -EINVAL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1191 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1192 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1193 | |
Dominik Brodowski | c84d079 | 2018-03-20 20:12:33 +0100 | [diff] [blame] | 1194 | SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf) |
| 1195 | { |
Arnd Bergmann | 275f221 | 2018-12-31 22:22:40 +0100 | [diff] [blame] | 1196 | return ksys_shmctl(shmid, cmd, buf, IPC_64); |
Dominik Brodowski | c84d079 | 2018-03-20 20:12:33 +0100 | [diff] [blame] | 1197 | } |
| 1198 | |
Arnd Bergmann | 275f221 | 2018-12-31 22:22:40 +0100 | [diff] [blame] | 1199 | #ifdef CONFIG_ARCH_WANT_IPC_PARSE_VERSION |
| 1200 | long ksys_old_shmctl(int shmid, int cmd, struct shmid_ds __user *buf) |
| 1201 | { |
| 1202 | int version = ipc_parse_version(&cmd); |
| 1203 | |
| 1204 | return ksys_shmctl(shmid, cmd, buf, version); |
| 1205 | } |
| 1206 | |
| 1207 | SYSCALL_DEFINE3(old_shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf) |
| 1208 | { |
| 1209 | return ksys_old_shmctl(shmid, cmd, buf); |
| 1210 | } |
| 1211 | #endif |
| 1212 | |
Al Viro | 553f770 | 2017-07-08 22:52:47 -0400 | [diff] [blame] | 1213 | #ifdef CONFIG_COMPAT |
| 1214 | |
| 1215 | struct compat_shmid_ds { |
| 1216 | struct compat_ipc_perm shm_perm; |
| 1217 | int shm_segsz; |
Arnd Bergmann | 9afc5ee | 2018-07-13 12:52:28 +0200 | [diff] [blame] | 1218 | old_time32_t shm_atime; |
| 1219 | old_time32_t shm_dtime; |
| 1220 | old_time32_t shm_ctime; |
Al Viro | 553f770 | 2017-07-08 22:52:47 -0400 | [diff] [blame] | 1221 | compat_ipc_pid_t shm_cpid; |
| 1222 | compat_ipc_pid_t shm_lpid; |
| 1223 | unsigned short shm_nattch; |
| 1224 | unsigned short shm_unused; |
| 1225 | compat_uptr_t shm_unused2; |
| 1226 | compat_uptr_t shm_unused3; |
| 1227 | }; |
| 1228 | |
| 1229 | struct compat_shminfo64 { |
| 1230 | compat_ulong_t shmmax; |
| 1231 | compat_ulong_t shmmin; |
| 1232 | compat_ulong_t shmmni; |
| 1233 | compat_ulong_t shmseg; |
| 1234 | compat_ulong_t shmall; |
| 1235 | compat_ulong_t __unused1; |
| 1236 | compat_ulong_t __unused2; |
| 1237 | compat_ulong_t __unused3; |
| 1238 | compat_ulong_t __unused4; |
| 1239 | }; |
| 1240 | |
| 1241 | struct compat_shm_info { |
| 1242 | compat_int_t used_ids; |
| 1243 | compat_ulong_t shm_tot, shm_rss, shm_swp; |
| 1244 | compat_ulong_t swap_attempts, swap_successes; |
| 1245 | }; |
| 1246 | |
| 1247 | static int copy_compat_shminfo_to_user(void __user *buf, struct shminfo64 *in, |
| 1248 | int version) |
| 1249 | { |
| 1250 | if (in->shmmax > INT_MAX) |
| 1251 | in->shmmax = INT_MAX; |
| 1252 | if (version == IPC_64) { |
| 1253 | struct compat_shminfo64 info; |
| 1254 | memset(&info, 0, sizeof(info)); |
| 1255 | info.shmmax = in->shmmax; |
| 1256 | info.shmmin = in->shmmin; |
| 1257 | info.shmmni = in->shmmni; |
| 1258 | info.shmseg = in->shmseg; |
| 1259 | info.shmall = in->shmall; |
| 1260 | return copy_to_user(buf, &info, sizeof(info)); |
| 1261 | } else { |
| 1262 | struct shminfo info; |
| 1263 | memset(&info, 0, sizeof(info)); |
| 1264 | info.shmmax = in->shmmax; |
| 1265 | info.shmmin = in->shmmin; |
| 1266 | info.shmmni = in->shmmni; |
| 1267 | info.shmseg = in->shmseg; |
| 1268 | info.shmall = in->shmall; |
| 1269 | return copy_to_user(buf, &info, sizeof(info)); |
| 1270 | } |
| 1271 | } |
| 1272 | |
| 1273 | static int put_compat_shm_info(struct shm_info *ip, |
| 1274 | struct compat_shm_info __user *uip) |
| 1275 | { |
| 1276 | struct compat_shm_info info; |
| 1277 | |
| 1278 | memset(&info, 0, sizeof(info)); |
| 1279 | info.used_ids = ip->used_ids; |
| 1280 | info.shm_tot = ip->shm_tot; |
| 1281 | info.shm_rss = ip->shm_rss; |
| 1282 | info.shm_swp = ip->shm_swp; |
| 1283 | info.swap_attempts = ip->swap_attempts; |
| 1284 | info.swap_successes = ip->swap_successes; |
Al Viro | b776e4b | 2017-09-25 20:38:45 -0400 | [diff] [blame] | 1285 | return copy_to_user(uip, &info, sizeof(info)); |
Al Viro | 553f770 | 2017-07-08 22:52:47 -0400 | [diff] [blame] | 1286 | } |
| 1287 | |
| 1288 | static int copy_compat_shmid_to_user(void __user *buf, struct shmid64_ds *in, |
| 1289 | int version) |
| 1290 | { |
| 1291 | if (version == IPC_64) { |
| 1292 | struct compat_shmid64_ds v; |
| 1293 | memset(&v, 0, sizeof(v)); |
Al Viro | 28327fa | 2017-07-09 10:10:32 -0400 | [diff] [blame] | 1294 | to_compat_ipc64_perm(&v.shm_perm, &in->shm_perm); |
Arnd Bergmann | c2ab975 | 2015-04-28 21:39:50 +0200 | [diff] [blame] | 1295 | v.shm_atime = lower_32_bits(in->shm_atime); |
| 1296 | v.shm_atime_high = upper_32_bits(in->shm_atime); |
| 1297 | v.shm_dtime = lower_32_bits(in->shm_dtime); |
| 1298 | v.shm_dtime_high = upper_32_bits(in->shm_dtime); |
| 1299 | v.shm_ctime = lower_32_bits(in->shm_ctime); |
| 1300 | v.shm_ctime_high = upper_32_bits(in->shm_ctime); |
Al Viro | 553f770 | 2017-07-08 22:52:47 -0400 | [diff] [blame] | 1301 | v.shm_segsz = in->shm_segsz; |
| 1302 | v.shm_nattch = in->shm_nattch; |
| 1303 | v.shm_cpid = in->shm_cpid; |
| 1304 | v.shm_lpid = in->shm_lpid; |
| 1305 | return copy_to_user(buf, &v, sizeof(v)); |
| 1306 | } else { |
| 1307 | struct compat_shmid_ds v; |
| 1308 | memset(&v, 0, sizeof(v)); |
Al Viro | 28327fa | 2017-07-09 10:10:32 -0400 | [diff] [blame] | 1309 | to_compat_ipc_perm(&v.shm_perm, &in->shm_perm); |
Al Viro | 553f770 | 2017-07-08 22:52:47 -0400 | [diff] [blame] | 1310 | v.shm_perm.key = in->shm_perm.key; |
Al Viro | 553f770 | 2017-07-08 22:52:47 -0400 | [diff] [blame] | 1311 | v.shm_atime = in->shm_atime; |
| 1312 | v.shm_dtime = in->shm_dtime; |
| 1313 | v.shm_ctime = in->shm_ctime; |
| 1314 | v.shm_segsz = in->shm_segsz; |
| 1315 | v.shm_nattch = in->shm_nattch; |
| 1316 | v.shm_cpid = in->shm_cpid; |
| 1317 | v.shm_lpid = in->shm_lpid; |
| 1318 | return copy_to_user(buf, &v, sizeof(v)); |
| 1319 | } |
| 1320 | } |
| 1321 | |
| 1322 | static int copy_compat_shmid_from_user(struct shmid64_ds *out, void __user *buf, |
| 1323 | int version) |
| 1324 | { |
| 1325 | memset(out, 0, sizeof(*out)); |
| 1326 | if (version == IPC_64) { |
Linus Torvalds | 6aa211e | 2017-09-25 18:37:28 -0700 | [diff] [blame] | 1327 | struct compat_shmid64_ds __user *p = buf; |
Al Viro | 28327fa | 2017-07-09 10:10:32 -0400 | [diff] [blame] | 1328 | return get_compat_ipc64_perm(&out->shm_perm, &p->shm_perm); |
Al Viro | 553f770 | 2017-07-08 22:52:47 -0400 | [diff] [blame] | 1329 | } else { |
Linus Torvalds | 6aa211e | 2017-09-25 18:37:28 -0700 | [diff] [blame] | 1330 | struct compat_shmid_ds __user *p = buf; |
Al Viro | 28327fa | 2017-07-09 10:10:32 -0400 | [diff] [blame] | 1331 | return get_compat_ipc_perm(&out->shm_perm, &p->shm_perm); |
Al Viro | 553f770 | 2017-07-08 22:52:47 -0400 | [diff] [blame] | 1332 | } |
Al Viro | 553f770 | 2017-07-08 22:52:47 -0400 | [diff] [blame] | 1333 | } |
| 1334 | |
Jason Yan | 1cd377b | 2020-04-06 20:12:56 -0700 | [diff] [blame] | 1335 | static long compat_ksys_shmctl(int shmid, int cmd, void __user *uptr, int version) |
Al Viro | 553f770 | 2017-07-08 22:52:47 -0400 | [diff] [blame] | 1336 | { |
| 1337 | struct ipc_namespace *ns; |
| 1338 | struct shmid64_ds sem64; |
Al Viro | 553f770 | 2017-07-08 22:52:47 -0400 | [diff] [blame] | 1339 | int err; |
| 1340 | |
| 1341 | ns = current->nsproxy->ipc_ns; |
| 1342 | |
| 1343 | if (cmd < 0 || shmid < 0) |
| 1344 | return -EINVAL; |
| 1345 | |
| 1346 | switch (cmd) { |
| 1347 | case IPC_INFO: { |
| 1348 | struct shminfo64 shminfo; |
| 1349 | err = shmctl_ipc_info(ns, &shminfo); |
| 1350 | if (err < 0) |
| 1351 | return err; |
| 1352 | if (copy_compat_shminfo_to_user(uptr, &shminfo, version)) |
| 1353 | err = -EFAULT; |
| 1354 | return err; |
| 1355 | } |
| 1356 | case SHM_INFO: { |
| 1357 | struct shm_info shm_info; |
| 1358 | err = shmctl_shm_info(ns, &shm_info); |
| 1359 | if (err < 0) |
| 1360 | return err; |
| 1361 | if (put_compat_shm_info(&shm_info, uptr)) |
| 1362 | err = -EFAULT; |
| 1363 | return err; |
| 1364 | } |
| 1365 | case IPC_STAT: |
Davidlohr Bueso | c21a697 | 2018-04-10 16:35:23 -0700 | [diff] [blame] | 1366 | case SHM_STAT_ANY: |
Al Viro | 553f770 | 2017-07-08 22:52:47 -0400 | [diff] [blame] | 1367 | case SHM_STAT: |
| 1368 | err = shmctl_stat(ns, shmid, cmd, &sem64); |
| 1369 | if (err < 0) |
| 1370 | return err; |
Will Deacon | 58aff0a | 2017-09-18 17:47:38 +0100 | [diff] [blame] | 1371 | if (copy_compat_shmid_to_user(uptr, &sem64, version)) |
Al Viro | 553f770 | 2017-07-08 22:52:47 -0400 | [diff] [blame] | 1372 | err = -EFAULT; |
| 1373 | return err; |
| 1374 | |
| 1375 | case IPC_SET: |
| 1376 | if (copy_compat_shmid_from_user(&sem64, uptr, version)) |
| 1377 | return -EFAULT; |
| 1378 | /* fallthru */ |
| 1379 | case IPC_RMID: |
| 1380 | return shmctl_down(ns, shmid, cmd, &sem64); |
| 1381 | case SHM_LOCK: |
| 1382 | case SHM_UNLOCK: |
| 1383 | return shmctl_do_lock(ns, shmid, cmd); |
| 1384 | break; |
| 1385 | default: |
| 1386 | return -EINVAL; |
| 1387 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1388 | return err; |
| 1389 | } |
Dominik Brodowski | c84d079 | 2018-03-20 20:12:33 +0100 | [diff] [blame] | 1390 | |
| 1391 | COMPAT_SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, void __user *, uptr) |
| 1392 | { |
Arnd Bergmann | 275f221 | 2018-12-31 22:22:40 +0100 | [diff] [blame] | 1393 | return compat_ksys_shmctl(shmid, cmd, uptr, IPC_64); |
Dominik Brodowski | c84d079 | 2018-03-20 20:12:33 +0100 | [diff] [blame] | 1394 | } |
Arnd Bergmann | 275f221 | 2018-12-31 22:22:40 +0100 | [diff] [blame] | 1395 | |
| 1396 | #ifdef CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION |
| 1397 | long compat_ksys_old_shmctl(int shmid, int cmd, void __user *uptr) |
| 1398 | { |
| 1399 | int version = compat_ipc_parse_version(&cmd); |
| 1400 | |
| 1401 | return compat_ksys_shmctl(shmid, cmd, uptr, version); |
| 1402 | } |
| 1403 | |
| 1404 | COMPAT_SYSCALL_DEFINE3(old_shmctl, int, shmid, int, cmd, void __user *, uptr) |
| 1405 | { |
| 1406 | return compat_ksys_old_shmctl(shmid, cmd, uptr); |
| 1407 | } |
| 1408 | #endif |
Al Viro | 553f770 | 2017-07-08 22:52:47 -0400 | [diff] [blame] | 1409 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1410 | |
| 1411 | /* |
| 1412 | * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists. |
| 1413 | * |
| 1414 | * NOTE! Despite the name, this is NOT a direct system call entrypoint. The |
| 1415 | * "raddr" thing points to kernel space, and there has to be a wrapper around |
| 1416 | * this. |
| 1417 | */ |
Davidlohr Bueso | 95e91b8 | 2017-02-27 14:28:24 -0800 | [diff] [blame] | 1418 | long do_shmat(int shmid, char __user *shmaddr, int shmflg, |
| 1419 | ulong *raddr, unsigned long shmlba) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1420 | { |
| 1421 | struct shmid_kernel *shp; |
Davidlohr Bueso | f0cb880 | 2017-05-08 15:57:03 -0700 | [diff] [blame] | 1422 | unsigned long addr = (unsigned long)shmaddr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1423 | unsigned long size; |
Al Viro | 4f089ac | 2018-06-17 12:24:00 -0400 | [diff] [blame] | 1424 | struct file *file, *base; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1425 | int err; |
Davidlohr Bueso | f0cb880 | 2017-05-08 15:57:03 -0700 | [diff] [blame] | 1426 | unsigned long flags = MAP_SHARED; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1427 | unsigned long prot; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1428 | int acc_mode; |
Kirill Korotaev | 4e98231 | 2006-10-02 02:18:22 -0700 | [diff] [blame] | 1429 | struct ipc_namespace *ns; |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 1430 | struct shm_file_data *sfd; |
Al Viro | c9c554f | 2018-07-11 14:19:04 -0400 | [diff] [blame] | 1431 | int f_flags; |
Michel Lespinasse | 41badc1 | 2013-02-22 16:32:47 -0800 | [diff] [blame] | 1432 | unsigned long populate = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1433 | |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 1434 | err = -EINVAL; |
| 1435 | if (shmid < 0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1436 | goto out; |
Davidlohr Bueso | f0cb880 | 2017-05-08 15:57:03 -0700 | [diff] [blame] | 1437 | |
| 1438 | if (addr) { |
Will Deacon | 079a96a | 2012-07-30 14:42:38 -0700 | [diff] [blame] | 1439 | if (addr & (shmlba - 1)) { |
Davidlohr Bueso | 8f89c00 | 2018-05-25 14:47:30 -0700 | [diff] [blame] | 1440 | if (shmflg & SHM_RND) { |
Davidlohr Bueso | a73ab24 | 2018-05-25 14:47:27 -0700 | [diff] [blame] | 1441 | addr &= ~(shmlba - 1); /* round down */ |
Davidlohr Bueso | 8f89c00 | 2018-05-25 14:47:30 -0700 | [diff] [blame] | 1442 | |
| 1443 | /* |
| 1444 | * Ensure that the round-down is non-nil |
| 1445 | * when remapping. This can happen for |
| 1446 | * cases when addr < shmlba. |
| 1447 | */ |
| 1448 | if (!addr && (shmflg & SHM_REMAP)) |
| 1449 | goto out; |
| 1450 | } else |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1451 | #ifndef __ARCH_FORCE_SHMLBA |
| 1452 | if (addr & ~PAGE_MASK) |
| 1453 | #endif |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 1454 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1455 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1456 | |
Davidlohr Bueso | f0cb880 | 2017-05-08 15:57:03 -0700 | [diff] [blame] | 1457 | flags |= MAP_FIXED; |
| 1458 | } else if ((shmflg & SHM_REMAP)) |
| 1459 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1460 | |
| 1461 | if (shmflg & SHM_RDONLY) { |
| 1462 | prot = PROT_READ; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1463 | acc_mode = S_IRUGO; |
Al Viro | c9c554f | 2018-07-11 14:19:04 -0400 | [diff] [blame] | 1464 | f_flags = O_RDONLY; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1465 | } else { |
| 1466 | prot = PROT_READ | PROT_WRITE; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1467 | acc_mode = S_IRUGO | S_IWUGO; |
Al Viro | c9c554f | 2018-07-11 14:19:04 -0400 | [diff] [blame] | 1468 | f_flags = O_RDWR; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1469 | } |
| 1470 | if (shmflg & SHM_EXEC) { |
| 1471 | prot |= PROT_EXEC; |
| 1472 | acc_mode |= S_IXUGO; |
| 1473 | } |
| 1474 | |
| 1475 | /* |
| 1476 | * We cannot rely on the fs check since SYSV IPC does have an |
| 1477 | * additional creator id... |
| 1478 | */ |
Kirill Korotaev | 4e98231 | 2006-10-02 02:18:22 -0700 | [diff] [blame] | 1479 | ns = current->nsproxy->ipc_ns; |
Davidlohr Bueso | c2c737a | 2013-09-11 14:26:23 -0700 | [diff] [blame] | 1480 | rcu_read_lock(); |
| 1481 | shp = shm_obtain_object_check(ns, shmid); |
Nadia Derbey | 023a535 | 2007-10-18 23:40:51 -0700 | [diff] [blame] | 1482 | if (IS_ERR(shp)) { |
| 1483 | err = PTR_ERR(shp); |
Davidlohr Bueso | c2c737a | 2013-09-11 14:26:23 -0700 | [diff] [blame] | 1484 | goto out_unlock; |
Nadia Derbey | 023a535 | 2007-10-18 23:40:51 -0700 | [diff] [blame] | 1485 | } |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 1486 | |
| 1487 | err = -EACCES; |
Serge E. Hallyn | b0e7759 | 2011-03-23 16:43:24 -0700 | [diff] [blame] | 1488 | if (ipcperms(ns, &shp->shm_perm, acc_mode)) |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 1489 | goto out_unlock; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1490 | |
Eric W. Biederman | 7191adf | 2018-03-22 21:08:27 -0500 | [diff] [blame] | 1491 | err = security_shm_shmat(&shp->shm_perm, shmaddr, shmflg); |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 1492 | if (err) |
| 1493 | goto out_unlock; |
| 1494 | |
Davidlohr Bueso | c2c737a | 2013-09-11 14:26:23 -0700 | [diff] [blame] | 1495 | ipc_lock_object(&shp->shm_perm); |
Greg Thelen | a399b29 | 2013-11-21 14:32:00 -0800 | [diff] [blame] | 1496 | |
| 1497 | /* check if shm_destroy() is tearing down shp */ |
Rafael Aquini | 0f3d2b0 | 2014-01-27 17:07:01 -0800 | [diff] [blame] | 1498 | if (!ipc_valid_object(&shp->shm_perm)) { |
Greg Thelen | a399b29 | 2013-11-21 14:32:00 -0800 | [diff] [blame] | 1499 | ipc_unlock_object(&shp->shm_perm); |
| 1500 | err = -EIDRM; |
| 1501 | goto out_unlock; |
| 1502 | } |
| 1503 | |
Eric Biggers | 3f05317 | 2018-04-13 15:35:30 -0700 | [diff] [blame] | 1504 | /* |
| 1505 | * We need to take a reference to the real shm file to prevent the |
| 1506 | * pointer from becoming stale in cases where the lifetime of the outer |
| 1507 | * file extends beyond that of the shm segment. It's not usually |
| 1508 | * possible, but it can happen during remap_file_pages() emulation as |
| 1509 | * that unmaps the memory, then does ->mmap() via file reference only. |
| 1510 | * We'll deny the ->mmap() if the shm segment was since removed, but to |
| 1511 | * detect shm ID reuse we need to compare the file pointers. |
| 1512 | */ |
Al Viro | 4f089ac | 2018-06-17 12:24:00 -0400 | [diff] [blame] | 1513 | base = get_file(shp->shm_file); |
| 1514 | shp->shm_nattch++; |
| 1515 | size = i_size_read(file_inode(base)); |
| 1516 | ipc_unlock_object(&shp->shm_perm); |
| 1517 | rcu_read_unlock(); |
| 1518 | |
| 1519 | err = -ENOMEM; |
| 1520 | sfd = kzalloc(sizeof(*sfd), GFP_KERNEL); |
| 1521 | if (!sfd) { |
| 1522 | fput(base); |
| 1523 | goto out_nattch; |
| 1524 | } |
| 1525 | |
| 1526 | file = alloc_file_clone(base, f_flags, |
| 1527 | is_file_hugepages(base) ? |
| 1528 | &shm_file_operations_huge : |
| 1529 | &shm_file_operations); |
| 1530 | err = PTR_ERR(file); |
| 1531 | if (IS_ERR(file)) { |
| 1532 | kfree(sfd); |
| 1533 | fput(base); |
| 1534 | goto out_nattch; |
| 1535 | } |
| 1536 | |
| 1537 | sfd->id = shp->shm_perm.id; |
| 1538 | sfd->ns = get_ipc_ns(ns); |
| 1539 | sfd->file = base; |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 1540 | sfd->vm_ops = NULL; |
Al Viro | 4f089ac | 2018-06-17 12:24:00 -0400 | [diff] [blame] | 1541 | file->private_data = sfd; |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 1542 | |
Al Viro | 8b3ec68 | 2012-05-30 17:11:23 -0400 | [diff] [blame] | 1543 | err = security_mmap_file(file, prot, flags); |
| 1544 | if (err) |
| 1545 | goto out_fput; |
| 1546 | |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 1547 | if (mmap_write_lock_killable(current->mm)) { |
Michal Hocko | 91f4f94 | 2016-05-23 16:25:51 -0700 | [diff] [blame] | 1548 | err = -EINTR; |
| 1549 | goto out_fput; |
| 1550 | } |
| 1551 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1552 | if (addr && !(shmflg & SHM_REMAP)) { |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 1553 | err = -EINVAL; |
Manfred Spraul | 247a8ce | 2014-06-06 14:37:38 -0700 | [diff] [blame] | 1554 | if (addr + size < addr) |
| 1555 | goto invalid; |
| 1556 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1557 | if (find_vma_intersection(current->mm, addr, addr + size)) |
| 1558 | goto invalid; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1559 | } |
Davidlohr Bueso | f42569b | 2013-09-11 14:26:22 -0700 | [diff] [blame] | 1560 | |
Mike Rapoport | 897ab3e | 2017-02-24 14:58:22 -0800 | [diff] [blame] | 1561 | addr = do_mmap_pgoff(file, addr, size, prot, flags, 0, &populate, NULL); |
Michel Lespinasse | bebeb3d | 2013-02-22 16:32:37 -0800 | [diff] [blame] | 1562 | *raddr = addr; |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 1563 | err = 0; |
Michel Lespinasse | bebeb3d | 2013-02-22 16:32:37 -0800 | [diff] [blame] | 1564 | if (IS_ERR_VALUE(addr)) |
| 1565 | err = (long)addr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1566 | invalid: |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 1567 | mmap_write_unlock(current->mm); |
Michel Lespinasse | bebeb3d | 2013-02-22 16:32:37 -0800 | [diff] [blame] | 1568 | if (populate) |
Michel Lespinasse | 41badc1 | 2013-02-22 16:32:47 -0800 | [diff] [blame] | 1569 | mm_populate(addr, populate); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1570 | |
Al Viro | 8b3ec68 | 2012-05-30 17:11:23 -0400 | [diff] [blame] | 1571 | out_fput: |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 1572 | fput(file); |
| 1573 | |
| 1574 | out_nattch: |
Davidlohr Bueso | d9a605e | 2013-09-11 14:26:24 -0700 | [diff] [blame] | 1575 | down_write(&shm_ids(ns).rwsem); |
Nadia Derbey | 00c2bf8 | 2008-07-25 01:48:03 -0700 | [diff] [blame] | 1576 | shp = shm_lock(ns, shmid); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1577 | shp->shm_nattch--; |
Vasiliy Kulikov | b34a6b1 | 2011-07-26 16:08:48 -0700 | [diff] [blame] | 1578 | if (shm_may_destroy(ns, shp)) |
Kirill Korotaev | 4e98231 | 2006-10-02 02:18:22 -0700 | [diff] [blame] | 1579 | shm_destroy(ns, shp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1580 | else |
| 1581 | shm_unlock(shp); |
Davidlohr Bueso | d9a605e | 2013-09-11 14:26:24 -0700 | [diff] [blame] | 1582 | up_write(&shm_ids(ns).rwsem); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1583 | return err; |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 1584 | |
| 1585 | out_unlock: |
Davidlohr Bueso | c2c737a | 2013-09-11 14:26:23 -0700 | [diff] [blame] | 1586 | rcu_read_unlock(); |
Davidlohr Bueso | f42569b | 2013-09-11 14:26:22 -0700 | [diff] [blame] | 1587 | out: |
| 1588 | return err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1589 | } |
| 1590 | |
Heiko Carstens | d5460c9 | 2009-01-14 14:14:27 +0100 | [diff] [blame] | 1591 | SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg) |
Stephen Rothwell | 7d87e14c | 2005-05-01 08:59:12 -0700 | [diff] [blame] | 1592 | { |
| 1593 | unsigned long ret; |
| 1594 | long err; |
| 1595 | |
Will Deacon | 079a96a | 2012-07-30 14:42:38 -0700 | [diff] [blame] | 1596 | err = do_shmat(shmid, shmaddr, shmflg, &ret, SHMLBA); |
Stephen Rothwell | 7d87e14c | 2005-05-01 08:59:12 -0700 | [diff] [blame] | 1597 | if (err) |
| 1598 | return err; |
| 1599 | force_successful_syscall_return(); |
| 1600 | return (long)ret; |
| 1601 | } |
| 1602 | |
Al Viro | a78ee9e | 2017-07-09 10:38:28 -0400 | [diff] [blame] | 1603 | #ifdef CONFIG_COMPAT |
| 1604 | |
| 1605 | #ifndef COMPAT_SHMLBA |
| 1606 | #define COMPAT_SHMLBA SHMLBA |
| 1607 | #endif |
| 1608 | |
| 1609 | COMPAT_SYSCALL_DEFINE3(shmat, int, shmid, compat_uptr_t, shmaddr, int, shmflg) |
| 1610 | { |
| 1611 | unsigned long ret; |
| 1612 | long err; |
| 1613 | |
| 1614 | err = do_shmat(shmid, compat_ptr(shmaddr), shmflg, &ret, COMPAT_SHMLBA); |
| 1615 | if (err) |
| 1616 | return err; |
| 1617 | force_successful_syscall_return(); |
| 1618 | return (long)ret; |
| 1619 | } |
| 1620 | #endif |
| 1621 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1622 | /* |
| 1623 | * detach and kill segment if marked destroyed. |
| 1624 | * The work is done in shm_close. |
| 1625 | */ |
Dominik Brodowski | da1e2744 | 2018-03-20 20:09:48 +0100 | [diff] [blame] | 1626 | long ksys_shmdt(char __user *shmaddr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1627 | { |
| 1628 | struct mm_struct *mm = current->mm; |
Mike Frysinger | 586c7e6 | 2009-06-09 16:26:23 -0700 | [diff] [blame] | 1629 | struct vm_area_struct *vma; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1630 | unsigned long addr = (unsigned long)shmaddr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1631 | int retval = -EINVAL; |
Mike Frysinger | 586c7e6 | 2009-06-09 16:26:23 -0700 | [diff] [blame] | 1632 | #ifdef CONFIG_MMU |
| 1633 | loff_t size = 0; |
Dave Hansen | d3c9790 | 2014-12-12 16:58:19 -0800 | [diff] [blame] | 1634 | struct file *file; |
Mike Frysinger | 586c7e6 | 2009-06-09 16:26:23 -0700 | [diff] [blame] | 1635 | struct vm_area_struct *next; |
| 1636 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1637 | |
Hugh Dickins | df1e2fb | 2006-03-24 03:18:06 -0800 | [diff] [blame] | 1638 | if (addr & ~PAGE_MASK) |
| 1639 | return retval; |
| 1640 | |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 1641 | if (mmap_write_lock_killable(mm)) |
Michal Hocko | 91f4f94 | 2016-05-23 16:25:51 -0700 | [diff] [blame] | 1642 | return -EINTR; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1643 | |
| 1644 | /* |
| 1645 | * This function tries to be smart and unmap shm segments that |
| 1646 | * were modified by partial mlock or munmap calls: |
| 1647 | * - It first determines the size of the shm segment that should be |
| 1648 | * unmapped: It searches for a vma that is backed by shm and that |
| 1649 | * started at address shmaddr. It records it's size and then unmaps |
| 1650 | * it. |
| 1651 | * - Then it unmaps all shm vmas that started at shmaddr and that |
Dave Hansen | d3c9790 | 2014-12-12 16:58:19 -0800 | [diff] [blame] | 1652 | * are within the initially determined size and that are from the |
| 1653 | * same shm segment from which we determined the size. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1654 | * Errors from do_munmap are ignored: the function only fails if |
| 1655 | * it's called with invalid parameters or if it's called to unmap |
| 1656 | * a part of a vma. Both calls in this function are for full vmas, |
| 1657 | * the parameters are directly copied from the vma itself and always |
| 1658 | * valid - therefore do_munmap cannot fail. (famous last words?) |
| 1659 | */ |
| 1660 | /* |
| 1661 | * If it had been mremap()'d, the starting address would not |
| 1662 | * match the usual checks anyway. So assume all vma's are |
| 1663 | * above the starting address given. |
| 1664 | */ |
| 1665 | vma = find_vma(mm, addr); |
| 1666 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1667 | #ifdef CONFIG_MMU |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1668 | while (vma) { |
| 1669 | next = vma->vm_next; |
| 1670 | |
| 1671 | /* |
| 1672 | * Check if the starting address would match, i.e. it's |
| 1673 | * a fragment created by mprotect() and/or munmap(), or it |
| 1674 | * otherwise it starts at this address with no hassles. |
| 1675 | */ |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 1676 | if ((vma->vm_ops == &shm_vm_ops) && |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1677 | (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) { |
| 1678 | |
Dave Hansen | d3c9790 | 2014-12-12 16:58:19 -0800 | [diff] [blame] | 1679 | /* |
| 1680 | * Record the file of the shm segment being |
| 1681 | * unmapped. With mremap(), someone could place |
| 1682 | * page from another segment but with equal offsets |
| 1683 | * in the range we are unmapping. |
| 1684 | */ |
| 1685 | file = vma->vm_file; |
Dave Hansen | 07a46ed | 2014-12-12 16:58:22 -0800 | [diff] [blame] | 1686 | size = i_size_read(file_inode(vma->vm_file)); |
Mike Rapoport | 897ab3e | 2017-02-24 14:58:22 -0800 | [diff] [blame] | 1687 | do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1688 | /* |
| 1689 | * We discovered the size of the shm segment, so |
| 1690 | * break out of here and fall through to the next |
| 1691 | * loop that uses the size information to stop |
| 1692 | * searching for matching vma's. |
| 1693 | */ |
| 1694 | retval = 0; |
| 1695 | vma = next; |
| 1696 | break; |
| 1697 | } |
| 1698 | vma = next; |
| 1699 | } |
| 1700 | |
| 1701 | /* |
| 1702 | * We need look no further than the maximum address a fragment |
| 1703 | * could possibly have landed at. Also cast things to loff_t to |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 1704 | * prevent overflows and make comparisons vs. equal-width types. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1705 | */ |
KAMEZAWA Hiroyuki | 8e36709 | 2006-02-10 01:51:12 -0800 | [diff] [blame] | 1706 | size = PAGE_ALIGN(size); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1707 | while (vma && (loff_t)(vma->vm_end - addr) <= size) { |
| 1708 | next = vma->vm_next; |
| 1709 | |
| 1710 | /* finding a matching vma now does not alter retval */ |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 1711 | if ((vma->vm_ops == &shm_vm_ops) && |
Dave Hansen | d3c9790 | 2014-12-12 16:58:19 -0800 | [diff] [blame] | 1712 | ((vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) && |
| 1713 | (vma->vm_file == file)) |
Mike Rapoport | 897ab3e | 2017-02-24 14:58:22 -0800 | [diff] [blame] | 1714 | do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1715 | vma = next; |
| 1716 | } |
| 1717 | |
Shailesh Pandey | 63980c8 | 2016-12-14 15:06:10 -0800 | [diff] [blame] | 1718 | #else /* CONFIG_MMU */ |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1719 | /* under NOMMU conditions, the exact address to be destroyed must be |
Shailesh Pandey | 63980c8 | 2016-12-14 15:06:10 -0800 | [diff] [blame] | 1720 | * given |
| 1721 | */ |
Davidlohr Bueso | 530fcd16 | 2013-09-11 14:26:28 -0700 | [diff] [blame] | 1722 | if (vma && vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) { |
Mike Rapoport | 897ab3e | 2017-02-24 14:58:22 -0800 | [diff] [blame] | 1723 | do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL); |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1724 | retval = 0; |
| 1725 | } |
| 1726 | |
| 1727 | #endif |
| 1728 | |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 1729 | mmap_write_unlock(mm); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1730 | return retval; |
| 1731 | } |
| 1732 | |
Dominik Brodowski | da1e2744 | 2018-03-20 20:09:48 +0100 | [diff] [blame] | 1733 | SYSCALL_DEFINE1(shmdt, char __user *, shmaddr) |
| 1734 | { |
| 1735 | return ksys_shmdt(shmaddr); |
| 1736 | } |
| 1737 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1738 | #ifdef CONFIG_PROC_FS |
Mike Waychison | 19b4946 | 2005-09-06 15:17:10 -0700 | [diff] [blame] | 1739 | static int sysvipc_shm_proc_show(struct seq_file *s, void *it) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1740 | { |
Eric W. Biederman | 98f929b | 2018-03-23 00:29:57 -0500 | [diff] [blame] | 1741 | struct pid_namespace *pid_ns = ipc_seq_pid_ns(s); |
Eric W. Biederman | 1efdb69 | 2012-02-07 16:54:11 -0800 | [diff] [blame] | 1742 | struct user_namespace *user_ns = seq_user_ns(s); |
Kees Cook | ade9f91 | 2017-08-02 13:32:21 -0700 | [diff] [blame] | 1743 | struct kern_ipc_perm *ipcp = it; |
| 1744 | struct shmid_kernel *shp; |
Helge Deller | b795218 | 2010-10-27 15:34:16 -0700 | [diff] [blame] | 1745 | unsigned long rss = 0, swp = 0; |
| 1746 | |
Kees Cook | ade9f91 | 2017-08-02 13:32:21 -0700 | [diff] [blame] | 1747 | shp = container_of(ipcp, struct shmid_kernel, shm_perm); |
Helge Deller | b795218 | 2010-10-27 15:34:16 -0700 | [diff] [blame] | 1748 | shm_add_rss_swap(shp, &rss, &swp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1749 | |
Paul Menage | 6c82681 | 2008-06-12 15:21:49 -0700 | [diff] [blame] | 1750 | #if BITS_PER_LONG <= 32 |
| 1751 | #define SIZE_SPEC "%10lu" |
| 1752 | #else |
| 1753 | #define SIZE_SPEC "%21lu" |
| 1754 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1755 | |
Joe Perches | 7f032d6 | 2015-04-15 16:17:54 -0700 | [diff] [blame] | 1756 | seq_printf(s, |
| 1757 | "%10d %10d %4o " SIZE_SPEC " %5u %5u " |
Deepa Dinamani | 7ff2819 | 2017-08-02 19:51:14 -0700 | [diff] [blame] | 1758 | "%5lu %5u %5u %5u %5u %10llu %10llu %10llu " |
Joe Perches | 7f032d6 | 2015-04-15 16:17:54 -0700 | [diff] [blame] | 1759 | SIZE_SPEC " " SIZE_SPEC "\n", |
| 1760 | shp->shm_perm.key, |
| 1761 | shp->shm_perm.id, |
| 1762 | shp->shm_perm.mode, |
| 1763 | shp->shm_segsz, |
Eric W. Biederman | 98f929b | 2018-03-23 00:29:57 -0500 | [diff] [blame] | 1764 | pid_nr_ns(shp->shm_cprid, pid_ns), |
| 1765 | pid_nr_ns(shp->shm_lprid, pid_ns), |
Joe Perches | 7f032d6 | 2015-04-15 16:17:54 -0700 | [diff] [blame] | 1766 | shp->shm_nattch, |
| 1767 | from_kuid_munged(user_ns, shp->shm_perm.uid), |
| 1768 | from_kgid_munged(user_ns, shp->shm_perm.gid), |
| 1769 | from_kuid_munged(user_ns, shp->shm_perm.cuid), |
| 1770 | from_kgid_munged(user_ns, shp->shm_perm.cgid), |
| 1771 | shp->shm_atim, |
| 1772 | shp->shm_dtim, |
| 1773 | shp->shm_ctim, |
| 1774 | rss * PAGE_SIZE, |
| 1775 | swp * PAGE_SIZE); |
| 1776 | |
| 1777 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1778 | } |
| 1779 | #endif |