Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
| 3 | * linux/ipc/shm.c |
| 4 | * Copyright (C) 1992, 1993 Krishna Balasubramanian |
| 5 | * Many improvements/fixes by Bruno Haible. |
| 6 | * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994. |
| 7 | * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli. |
| 8 | * |
| 9 | * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com> |
| 10 | * BIGMEM support, Andrea Arcangeli <andrea@suse.de> |
| 11 | * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr> |
| 12 | * HIGHMEM support, Ingo Molnar <mingo@redhat.com> |
| 13 | * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com> |
| 14 | * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com> |
| 15 | * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com> |
| 16 | * |
Steve Grubb | 073115d | 2006-04-02 17:07:33 -0400 | [diff] [blame] | 17 | * support for audit of ipc object properties and permission changes |
| 18 | * Dustin Kirkland <dustin.kirkland@us.ibm.com> |
Kirill Korotaev | 4e98231 | 2006-10-02 02:18:22 -0700 | [diff] [blame] | 19 | * |
| 20 | * namespaces support |
| 21 | * OpenVZ, SWsoft Inc. |
| 22 | * Pavel Emelianov <xemul@openvz.org> |
Davidlohr Bueso | c2c737a | 2013-09-11 14:26:23 -0700 | [diff] [blame] | 23 | * |
| 24 | * Better ipc lock (kern_ipc_perm.lock) handling |
| 25 | * Davidlohr Bueso <davidlohr.bueso@hp.com>, June 2013. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | */ |
| 27 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | #include <linux/slab.h> |
| 29 | #include <linux/mm.h> |
| 30 | #include <linux/hugetlb.h> |
| 31 | #include <linux/shm.h> |
| 32 | #include <linux/init.h> |
| 33 | #include <linux/file.h> |
| 34 | #include <linux/mman.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | #include <linux/shmem_fs.h> |
| 36 | #include <linux/security.h> |
| 37 | #include <linux/syscalls.h> |
| 38 | #include <linux/audit.h> |
Randy.Dunlap | c59ede7 | 2006-01-11 12:17:46 -0800 | [diff] [blame] | 39 | #include <linux/capability.h> |
Stephen Rothwell | 7d87e14c | 2005-05-01 08:59:12 -0700 | [diff] [blame] | 40 | #include <linux/ptrace.h> |
Mike Waychison | 19b4946 | 2005-09-06 15:17:10 -0700 | [diff] [blame] | 41 | #include <linux/seq_file.h> |
Nadia Derbey | 3e148c7 | 2007-10-18 23:40:54 -0700 | [diff] [blame] | 42 | #include <linux/rwsem.h> |
Kirill Korotaev | 4e98231 | 2006-10-02 02:18:22 -0700 | [diff] [blame] | 43 | #include <linux/nsproxy.h> |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 44 | #include <linux/mount.h> |
Pavel Emelyanov | ae5e1b2 | 2008-02-08 04:18:22 -0800 | [diff] [blame] | 45 | #include <linux/ipc_namespace.h> |
NeilBrown | 0eb71a9 | 2018-06-18 12:52:50 +1000 | [diff] [blame] | 46 | #include <linux/rhashtable.h> |
Stephen Rothwell | 7d87e14c | 2005-05-01 08:59:12 -0700 | [diff] [blame] | 47 | |
Paul McQuade | 7153e40 | 2014-06-06 14:37:37 -0700 | [diff] [blame] | 48 | #include <linux/uaccess.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 49 | |
| 50 | #include "util.h" |
| 51 | |
Eric W. Biederman | a2e102c | 2018-03-22 21:34:44 -0500 | [diff] [blame] | 52 | struct shmid_kernel /* private to the kernel */ |
| 53 | { |
| 54 | struct kern_ipc_perm shm_perm; |
| 55 | struct file *shm_file; |
| 56 | unsigned long shm_nattch; |
| 57 | unsigned long shm_segsz; |
| 58 | time64_t shm_atim; |
| 59 | time64_t shm_dtim; |
| 60 | time64_t shm_ctim; |
Eric W. Biederman | 98f929b | 2018-03-23 00:29:57 -0500 | [diff] [blame] | 61 | struct pid *shm_cprid; |
| 62 | struct pid *shm_lprid; |
Eric W. Biederman | a2e102c | 2018-03-22 21:34:44 -0500 | [diff] [blame] | 63 | struct user_struct *mlock_user; |
| 64 | |
Alexander Mikhalitsyn | a15261d | 2021-11-19 16:43:21 -0800 | [diff] [blame] | 65 | /* |
| 66 | * The task created the shm object, for |
| 67 | * task_lock(shp->shm_creator) |
| 68 | */ |
Eric W. Biederman | a2e102c | 2018-03-22 21:34:44 -0500 | [diff] [blame] | 69 | struct task_struct *shm_creator; |
Alexander Mikhalitsyn | a15261d | 2021-11-19 16:43:21 -0800 | [diff] [blame] | 70 | |
| 71 | /* |
| 72 | * List by creator. task_lock(->shm_creator) required for read/write. |
| 73 | * If list_empty(), then the creator is dead already. |
| 74 | */ |
| 75 | struct list_head shm_clist; |
| 76 | struct ipc_namespace *ns; |
Eric W. Biederman | a2e102c | 2018-03-22 21:34:44 -0500 | [diff] [blame] | 77 | } __randomize_layout; |
| 78 | |
| 79 | /* shm_mode upper byte flags */ |
| 80 | #define SHM_DEST 01000 /* segment will be destroyed on last detach */ |
| 81 | #define SHM_LOCKED 02000 /* segment will not be swapped */ |
| 82 | |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 83 | struct shm_file_data { |
| 84 | int id; |
| 85 | struct ipc_namespace *ns; |
| 86 | struct file *file; |
| 87 | const struct vm_operations_struct *vm_ops; |
| 88 | }; |
| 89 | |
| 90 | #define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data)) |
| 91 | |
Arjan van de Ven | 9a32144 | 2007-02-12 00:55:35 -0800 | [diff] [blame] | 92 | static const struct file_operations shm_file_operations; |
Alexey Dobriyan | f0f37e2f | 2009-09-27 22:29:37 +0400 | [diff] [blame] | 93 | static const struct vm_operations_struct shm_vm_ops; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 94 | |
Pierre Peiffer | ed2ddbf | 2008-02-08 04:18:57 -0800 | [diff] [blame] | 95 | #define shm_ids(ns) ((ns)->ids[IPC_SHM_IDS]) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 96 | |
Kirill Korotaev | 4e98231 | 2006-10-02 02:18:22 -0700 | [diff] [blame] | 97 | #define shm_unlock(shp) \ |
| 98 | ipc_unlock(&(shp)->shm_perm) |
Kirill Korotaev | 4e98231 | 2006-10-02 02:18:22 -0700 | [diff] [blame] | 99 | |
Nadia Derbey | 7748dbf | 2007-10-18 23:40:49 -0700 | [diff] [blame] | 100 | static int newseg(struct ipc_namespace *, struct ipc_params *); |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 101 | static void shm_open(struct vm_area_struct *vma); |
| 102 | static void shm_close(struct vm_area_struct *vma); |
Manfred Spraul | 239521f | 2014-01-27 17:07:04 -0800 | [diff] [blame] | 103 | static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 104 | #ifdef CONFIG_PROC_FS |
Mike Waychison | 19b4946 | 2005-09-06 15:17:10 -0700 | [diff] [blame] | 105 | static int sysvipc_shm_proc_show(struct seq_file *s, void *it); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 106 | #endif |
| 107 | |
Davidlohr Bueso | eae04d2 | 2018-08-21 22:01:56 -0700 | [diff] [blame] | 108 | void shm_init_ns(struct ipc_namespace *ns) |
Kirill Korotaev | 4e98231 | 2006-10-02 02:18:22 -0700 | [diff] [blame] | 109 | { |
Kirill Korotaev | 4e98231 | 2006-10-02 02:18:22 -0700 | [diff] [blame] | 110 | ns->shm_ctlmax = SHMMAX; |
| 111 | ns->shm_ctlall = SHMALL; |
| 112 | ns->shm_ctlmni = SHMMNI; |
Vasiliy Kulikov | b34a6b1 | 2011-07-26 16:08:48 -0700 | [diff] [blame] | 113 | ns->shm_rmid_forced = 0; |
Kirill Korotaev | 4e98231 | 2006-10-02 02:18:22 -0700 | [diff] [blame] | 114 | ns->shm_tot = 0; |
Davidlohr Bueso | eae04d2 | 2018-08-21 22:01:56 -0700 | [diff] [blame] | 115 | ipc_init_ids(&shm_ids(ns)); |
Kirill Korotaev | 4e98231 | 2006-10-02 02:18:22 -0700 | [diff] [blame] | 116 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 117 | |
Nadia Derbey | f4566f0 | 2007-10-18 23:40:53 -0700 | [diff] [blame] | 118 | /* |
Davidlohr Bueso | d9a605e | 2013-09-11 14:26:24 -0700 | [diff] [blame] | 119 | * Called with shm_ids.rwsem (writer) and the shp structure locked. |
| 120 | * Only shm_ids.rwsem remains locked on exit. |
Nadia Derbey | f4566f0 | 2007-10-18 23:40:53 -0700 | [diff] [blame] | 121 | */ |
Pierre Peiffer | 01b8b07 | 2008-02-08 04:18:57 -0800 | [diff] [blame] | 122 | static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) |
Kirill Korotaev | 4e98231 | 2006-10-02 02:18:22 -0700 | [diff] [blame] | 123 | { |
Pierre Peiffer | 01b8b07 | 2008-02-08 04:18:57 -0800 | [diff] [blame] | 124 | struct shmid_kernel *shp; |
Shailesh Pandey | 63980c8 | 2016-12-14 15:06:10 -0800 | [diff] [blame] | 125 | |
Pierre Peiffer | 01b8b07 | 2008-02-08 04:18:57 -0800 | [diff] [blame] | 126 | shp = container_of(ipcp, struct shmid_kernel, shm_perm); |
Alexander Mikhalitsyn | a15261d | 2021-11-19 16:43:21 -0800 | [diff] [blame] | 127 | WARN_ON(ns != shp->ns); |
Pierre Peiffer | 01b8b07 | 2008-02-08 04:18:57 -0800 | [diff] [blame] | 128 | |
Manfred Spraul | 239521f | 2014-01-27 17:07:04 -0800 | [diff] [blame] | 129 | if (shp->shm_nattch) { |
Kirill Korotaev | 4e98231 | 2006-10-02 02:18:22 -0700 | [diff] [blame] | 130 | shp->shm_perm.mode |= SHM_DEST; |
| 131 | /* Do not find it any more */ |
Guillaume Knispel | 0cfb6ae | 2017-09-08 16:17:55 -0700 | [diff] [blame] | 132 | ipc_set_key_private(&shm_ids(ns), &shp->shm_perm); |
Kirill Korotaev | 4e98231 | 2006-10-02 02:18:22 -0700 | [diff] [blame] | 133 | shm_unlock(shp); |
| 134 | } else |
| 135 | shm_destroy(ns, shp); |
| 136 | } |
| 137 | |
Pavel Emelyanov | ae5e1b2 | 2008-02-08 04:18:22 -0800 | [diff] [blame] | 138 | #ifdef CONFIG_IPC_NS |
Kirill Korotaev | 4e98231 | 2006-10-02 02:18:22 -0700 | [diff] [blame] | 139 | void shm_exit_ns(struct ipc_namespace *ns) |
| 140 | { |
Pierre Peiffer | 01b8b07 | 2008-02-08 04:18:57 -0800 | [diff] [blame] | 141 | free_ipcs(ns, &shm_ids(ns), do_shm_rmid); |
Serge E. Hallyn | 7d6feeb | 2009-12-15 16:47:27 -0800 | [diff] [blame] | 142 | idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr); |
Guillaume Knispel | 0cfb6ae | 2017-09-08 16:17:55 -0700 | [diff] [blame] | 143 | rhashtable_destroy(&ns->ids[IPC_SHM_IDS].key_ht); |
Kirill Korotaev | 4e98231 | 2006-10-02 02:18:22 -0700 | [diff] [blame] | 144 | } |
Pavel Emelyanov | ae5e1b2 | 2008-02-08 04:18:22 -0800 | [diff] [blame] | 145 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 146 | |
Linus Torvalds | 140d0b2 | 2011-08-04 19:35:59 -1000 | [diff] [blame] | 147 | static int __init ipc_ns_init(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 148 | { |
Davidlohr Bueso | eae04d2 | 2018-08-21 22:01:56 -0700 | [diff] [blame] | 149 | shm_init_ns(&init_ipc_ns); |
| 150 | return 0; |
Linus Torvalds | 140d0b2 | 2011-08-04 19:35:59 -1000 | [diff] [blame] | 151 | } |
| 152 | |
| 153 | pure_initcall(ipc_ns_init); |
| 154 | |
Manfred Spraul | 239521f | 2014-01-27 17:07:04 -0800 | [diff] [blame] | 155 | void __init shm_init(void) |
Linus Torvalds | 140d0b2 | 2011-08-04 19:35:59 -1000 | [diff] [blame] | 156 | { |
Mike Waychison | 19b4946 | 2005-09-06 15:17:10 -0700 | [diff] [blame] | 157 | ipc_init_proc_interface("sysvipc/shm", |
Helge Deller | b795218 | 2010-10-27 15:34:16 -0700 | [diff] [blame] | 158 | #if BITS_PER_LONG <= 32 |
| 159 | " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n", |
| 160 | #else |
| 161 | " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n", |
| 162 | #endif |
Kirill Korotaev | 4e98231 | 2006-10-02 02:18:22 -0700 | [diff] [blame] | 163 | IPC_SHM_IDS, sysvipc_shm_proc_show); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 164 | } |
| 165 | |
Davidlohr Bueso | 8b8d52a | 2013-09-11 14:26:15 -0700 | [diff] [blame] | 166 | static inline struct shmid_kernel *shm_obtain_object(struct ipc_namespace *ns, int id) |
| 167 | { |
Davidlohr Bueso | 55b7ae5 | 2015-06-30 14:58:42 -0700 | [diff] [blame] | 168 | struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&shm_ids(ns), id); |
Davidlohr Bueso | 8b8d52a | 2013-09-11 14:26:15 -0700 | [diff] [blame] | 169 | |
| 170 | if (IS_ERR(ipcp)) |
| 171 | return ERR_CAST(ipcp); |
| 172 | |
| 173 | return container_of(ipcp, struct shmid_kernel, shm_perm); |
| 174 | } |
| 175 | |
| 176 | static inline struct shmid_kernel *shm_obtain_object_check(struct ipc_namespace *ns, int id) |
| 177 | { |
| 178 | struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&shm_ids(ns), id); |
| 179 | |
| 180 | if (IS_ERR(ipcp)) |
| 181 | return ERR_CAST(ipcp); |
| 182 | |
| 183 | return container_of(ipcp, struct shmid_kernel, shm_perm); |
| 184 | } |
| 185 | |
Nadia Derbey | 3e148c7 | 2007-10-18 23:40:54 -0700 | [diff] [blame] | 186 | /* |
Davidlohr Bueso | d9a605e | 2013-09-11 14:26:24 -0700 | [diff] [blame] | 187 | * shm_lock_(check_) routines are called in the paths where the rwsem |
Nadia Derbey | 00c2bf8 | 2008-07-25 01:48:03 -0700 | [diff] [blame] | 188 | * is not necessarily held. |
Nadia Derbey | 3e148c7 | 2007-10-18 23:40:54 -0700 | [diff] [blame] | 189 | */ |
Nadia Derbey | 023a535 | 2007-10-18 23:40:51 -0700 | [diff] [blame] | 190 | static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 191 | { |
Davidlohr Bueso | 82061c5 | 2018-08-21 22:01:41 -0700 | [diff] [blame] | 192 | struct kern_ipc_perm *ipcp; |
Nadia Derbey | 03f02c7 | 2007-10-18 23:40:51 -0700 | [diff] [blame] | 193 | |
Davidlohr Bueso | 82061c5 | 2018-08-21 22:01:41 -0700 | [diff] [blame] | 194 | rcu_read_lock(); |
| 195 | ipcp = ipc_obtain_object_idr(&shm_ids(ns), id); |
| 196 | if (IS_ERR(ipcp)) |
| 197 | goto err; |
| 198 | |
| 199 | ipc_lock_object(ipcp); |
| 200 | /* |
| 201 | * ipc_rmid() may have already freed the ID while ipc_lock_object() |
| 202 | * was spinning: here verify that the structure is still valid. |
| 203 | * Upon races with RMID, return -EIDRM, thus indicating that |
| 204 | * the ID points to a removed identifier. |
| 205 | */ |
| 206 | if (ipc_valid_object(ipcp)) { |
| 207 | /* return a locked ipc object upon success */ |
| 208 | return container_of(ipcp, struct shmid_kernel, shm_perm); |
| 209 | } |
| 210 | |
| 211 | ipc_unlock_object(ipcp); |
Davidlohr Bueso | 9c21dae | 2018-09-04 15:46:02 -0700 | [diff] [blame] | 212 | ipcp = ERR_PTR(-EIDRM); |
Davidlohr Bueso | 82061c5 | 2018-08-21 22:01:41 -0700 | [diff] [blame] | 213 | err: |
| 214 | rcu_read_unlock(); |
Davidlohr Bueso | c5c8975 | 2015-06-30 14:58:36 -0700 | [diff] [blame] | 215 | /* |
Kirill A. Shutemov | 1ac0b6d | 2016-02-17 13:11:35 -0800 | [diff] [blame] | 216 | * Callers of shm_lock() must validate the status of the returned ipc |
Davidlohr Bueso | 82061c5 | 2018-08-21 22:01:41 -0700 | [diff] [blame] | 217 | * object pointer and error out as appropriate. |
Davidlohr Bueso | c5c8975 | 2015-06-30 14:58:36 -0700 | [diff] [blame] | 218 | */ |
Kees Cook | 59cf0a9 | 2018-10-05 15:51:48 -0700 | [diff] [blame] | 219 | return ERR_CAST(ipcp); |
Nadia Derbey | 023a535 | 2007-10-18 23:40:51 -0700 | [diff] [blame] | 220 | } |
| 221 | |
Vasiliy Kulikov | 4c677e2 | 2011-07-29 03:56:40 +0400 | [diff] [blame] | 222 | static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp) |
| 223 | { |
| 224 | rcu_read_lock(); |
Davidlohr Bueso | cf9d5d7 | 2013-07-08 16:01:11 -0700 | [diff] [blame] | 225 | ipc_lock_object(&ipcp->shm_perm); |
Vasiliy Kulikov | 4c677e2 | 2011-07-29 03:56:40 +0400 | [diff] [blame] | 226 | } |
| 227 | |
Davidlohr Bueso | 53dad6d | 2013-09-23 17:04:45 -0700 | [diff] [blame] | 228 | static void shm_rcu_free(struct rcu_head *head) |
| 229 | { |
Manfred Spraul | dba4cdd | 2017-07-12 14:34:41 -0700 | [diff] [blame] | 230 | struct kern_ipc_perm *ptr = container_of(head, struct kern_ipc_perm, |
| 231 | rcu); |
| 232 | struct shmid_kernel *shp = container_of(ptr, struct shmid_kernel, |
| 233 | shm_perm); |
Eric W. Biederman | 7191adf | 2018-03-22 21:08:27 -0500 | [diff] [blame] | 234 | security_shm_free(&shp->shm_perm); |
Kees Cook | 42e618f | 2017-07-12 14:35:25 -0700 | [diff] [blame] | 235 | kvfree(shp); |
Davidlohr Bueso | 53dad6d | 2013-09-23 17:04:45 -0700 | [diff] [blame] | 236 | } |
| 237 | |
Alexander Mikhalitsyn | a15261d | 2021-11-19 16:43:21 -0800 | [diff] [blame] | 238 | /* |
| 239 | * It has to be called with shp locked. |
| 240 | * It must be called before ipc_rmid() |
| 241 | */ |
| 242 | static inline void shm_clist_rm(struct shmid_kernel *shp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 243 | { |
Alexander Mikhalitsyn | a15261d | 2021-11-19 16:43:21 -0800 | [diff] [blame] | 244 | struct task_struct *creator; |
| 245 | |
| 246 | /* ensure that shm_creator does not disappear */ |
| 247 | rcu_read_lock(); |
| 248 | |
| 249 | /* |
| 250 | * A concurrent exit_shm may do a list_del_init() as well. |
| 251 | * Just do nothing if exit_shm already did the work |
| 252 | */ |
| 253 | if (!list_empty(&shp->shm_clist)) { |
| 254 | /* |
| 255 | * shp->shm_creator is guaranteed to be valid *only* |
| 256 | * if shp->shm_clist is not empty. |
| 257 | */ |
| 258 | creator = shp->shm_creator; |
| 259 | |
| 260 | task_lock(creator); |
| 261 | /* |
| 262 | * list_del_init() is a nop if the entry was already removed |
| 263 | * from the list. |
| 264 | */ |
| 265 | list_del_init(&shp->shm_clist); |
| 266 | task_unlock(creator); |
| 267 | } |
| 268 | rcu_read_unlock(); |
| 269 | } |
| 270 | |
| 271 | static inline void shm_rmid(struct shmid_kernel *s) |
| 272 | { |
| 273 | shm_clist_rm(s); |
| 274 | ipc_rmid(&shm_ids(s->ns), &s->shm_perm); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 275 | } |
| 276 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 277 | |
Kirill A. Shutemov | 1ac0b6d | 2016-02-17 13:11:35 -0800 | [diff] [blame] | 278 | static int __shm_open(struct vm_area_struct *vma) |
Kirill Korotaev | 4e98231 | 2006-10-02 02:18:22 -0700 | [diff] [blame] | 279 | { |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 280 | struct file *file = vma->vm_file; |
| 281 | struct shm_file_data *sfd = shm_file_data(file); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 282 | struct shmid_kernel *shp; |
| 283 | |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 284 | shp = shm_lock(sfd->ns, sfd->id); |
Kirill A. Shutemov | 1ac0b6d | 2016-02-17 13:11:35 -0800 | [diff] [blame] | 285 | |
| 286 | if (IS_ERR(shp)) |
| 287 | return PTR_ERR(shp); |
| 288 | |
Eric Biggers | 3f05317 | 2018-04-13 15:35:30 -0700 | [diff] [blame] | 289 | if (shp->shm_file != sfd->file) { |
| 290 | /* ID was reused */ |
| 291 | shm_unlock(shp); |
| 292 | return -EINVAL; |
| 293 | } |
| 294 | |
Deepa Dinamani | 7ff2819 | 2017-08-02 19:51:14 -0700 | [diff] [blame] | 295 | shp->shm_atim = ktime_get_real_seconds(); |
Eric W. Biederman | 98f929b | 2018-03-23 00:29:57 -0500 | [diff] [blame] | 296 | ipc_update_pid(&shp->shm_lprid, task_tgid(current)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 297 | shp->shm_nattch++; |
| 298 | shm_unlock(shp); |
Kirill A. Shutemov | 1ac0b6d | 2016-02-17 13:11:35 -0800 | [diff] [blame] | 299 | return 0; |
| 300 | } |
| 301 | |
| 302 | /* This is called by fork, once for every shm attach. */ |
| 303 | static void shm_open(struct vm_area_struct *vma) |
| 304 | { |
| 305 | int err = __shm_open(vma); |
| 306 | /* |
| 307 | * We raced in the idr lookup or with shm_destroy(). |
| 308 | * Either way, the ID is busted. |
| 309 | */ |
| 310 | WARN_ON_ONCE(err); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 311 | } |
| 312 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 313 | /* |
| 314 | * shm_destroy - free the struct shmid_kernel |
| 315 | * |
Nadia Derbey | f4566f0 | 2007-10-18 23:40:53 -0700 | [diff] [blame] | 316 | * @ns: namespace |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 317 | * @shp: struct to free |
| 318 | * |
Davidlohr Bueso | d9a605e | 2013-09-11 14:26:24 -0700 | [diff] [blame] | 319 | * It has to be called with shp and shm_ids.rwsem (writer) locked, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 320 | * but returns with shp unlocked and freed. |
| 321 | */ |
Kirill Korotaev | 4e98231 | 2006-10-02 02:18:22 -0700 | [diff] [blame] | 322 | static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 323 | { |
Greg Thelen | a399b29 | 2013-11-21 14:32:00 -0800 | [diff] [blame] | 324 | struct file *shm_file; |
| 325 | |
| 326 | shm_file = shp->shm_file; |
| 327 | shp->shm_file = NULL; |
Kirill Korotaev | 4e98231 | 2006-10-02 02:18:22 -0700 | [diff] [blame] | 328 | ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT; |
Alexander Mikhalitsyn | a15261d | 2021-11-19 16:43:21 -0800 | [diff] [blame] | 329 | shm_rmid(shp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 330 | shm_unlock(shp); |
Greg Thelen | a399b29 | 2013-11-21 14:32:00 -0800 | [diff] [blame] | 331 | if (!is_file_hugepages(shm_file)) |
| 332 | shmem_lock(shm_file, 0, shp->mlock_user); |
Hugh Dickins | 353d5c3 | 2009-08-24 16:30:28 +0100 | [diff] [blame] | 333 | else if (shp->mlock_user) |
Dave Hansen | 07a46ed | 2014-12-12 16:58:22 -0800 | [diff] [blame] | 334 | user_shm_unlock(i_size_read(file_inode(shm_file)), |
| 335 | shp->mlock_user); |
Greg Thelen | a399b29 | 2013-11-21 14:32:00 -0800 | [diff] [blame] | 336 | fput(shm_file); |
Eric W. Biederman | 98f929b | 2018-03-23 00:29:57 -0500 | [diff] [blame] | 337 | ipc_update_pid(&shp->shm_cprid, NULL); |
| 338 | ipc_update_pid(&shp->shm_lprid, NULL); |
Manfred Spraul | dba4cdd | 2017-07-12 14:34:41 -0700 | [diff] [blame] | 339 | ipc_rcu_putref(&shp->shm_perm, shm_rcu_free); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 340 | } |
| 341 | |
| 342 | /* |
Vasiliy Kulikov | b34a6b1 | 2011-07-26 16:08:48 -0700 | [diff] [blame] | 343 | * shm_may_destroy - identifies whether shm segment should be destroyed now |
| 344 | * |
| 345 | * Returns true if and only if there are no active users of the segment and |
| 346 | * one of the following is true: |
| 347 | * |
| 348 | * 1) shmctl(id, IPC_RMID, NULL) was called for this shp |
| 349 | * |
| 350 | * 2) sysctl kernel.shm_rmid_forced is set to 1. |
| 351 | */ |
Alexander Mikhalitsyn | a15261d | 2021-11-19 16:43:21 -0800 | [diff] [blame] | 352 | static bool shm_may_destroy(struct shmid_kernel *shp) |
Vasiliy Kulikov | b34a6b1 | 2011-07-26 16:08:48 -0700 | [diff] [blame] | 353 | { |
| 354 | return (shp->shm_nattch == 0) && |
Alexander Mikhalitsyn | a15261d | 2021-11-19 16:43:21 -0800 | [diff] [blame] | 355 | (shp->ns->shm_rmid_forced || |
Vasiliy Kulikov | b34a6b1 | 2011-07-26 16:08:48 -0700 | [diff] [blame] | 356 | (shp->shm_perm.mode & SHM_DEST)); |
| 357 | } |
| 358 | |
| 359 | /* |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 360 | * remove the attach descriptor vma. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 361 | * free memory for segment if it is marked destroyed. |
| 362 | * The descriptor has already been removed from the current->mm->mmap list |
| 363 | * and will later be kfree()d. |
| 364 | */ |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 365 | static void shm_close(struct vm_area_struct *vma) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 366 | { |
Manfred Spraul | 239521f | 2014-01-27 17:07:04 -0800 | [diff] [blame] | 367 | struct file *file = vma->vm_file; |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 368 | struct shm_file_data *sfd = shm_file_data(file); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 369 | struct shmid_kernel *shp; |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 370 | struct ipc_namespace *ns = sfd->ns; |
Kirill Korotaev | 4e98231 | 2006-10-02 02:18:22 -0700 | [diff] [blame] | 371 | |
Davidlohr Bueso | d9a605e | 2013-09-11 14:26:24 -0700 | [diff] [blame] | 372 | down_write(&shm_ids(ns).rwsem); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 373 | /* remove from the list of attaches of the shm segment */ |
Nadia Derbey | 00c2bf8 | 2008-07-25 01:48:03 -0700 | [diff] [blame] | 374 | shp = shm_lock(ns, sfd->id); |
Kirill A. Shutemov | 1ac0b6d | 2016-02-17 13:11:35 -0800 | [diff] [blame] | 375 | |
| 376 | /* |
| 377 | * We raced in the idr lookup or with shm_destroy(). |
| 378 | * Either way, the ID is busted. |
| 379 | */ |
| 380 | if (WARN_ON_ONCE(IS_ERR(shp))) |
| 381 | goto done; /* no-op */ |
| 382 | |
Eric W. Biederman | 98f929b | 2018-03-23 00:29:57 -0500 | [diff] [blame] | 383 | ipc_update_pid(&shp->shm_lprid, task_tgid(current)); |
Deepa Dinamani | 7ff2819 | 2017-08-02 19:51:14 -0700 | [diff] [blame] | 384 | shp->shm_dtim = ktime_get_real_seconds(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 385 | shp->shm_nattch--; |
Alexander Mikhalitsyn | a15261d | 2021-11-19 16:43:21 -0800 | [diff] [blame] | 386 | if (shm_may_destroy(shp)) |
Kirill Korotaev | 4e98231 | 2006-10-02 02:18:22 -0700 | [diff] [blame] | 387 | shm_destroy(ns, shp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 388 | else |
| 389 | shm_unlock(shp); |
Kirill A. Shutemov | 1ac0b6d | 2016-02-17 13:11:35 -0800 | [diff] [blame] | 390 | done: |
Davidlohr Bueso | d9a605e | 2013-09-11 14:26:24 -0700 | [diff] [blame] | 391 | up_write(&shm_ids(ns).rwsem); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 392 | } |
| 393 | |
Davidlohr Bueso | d9a605e | 2013-09-11 14:26:24 -0700 | [diff] [blame] | 394 | /* Called with ns->shm_ids(ns).rwsem locked */ |
Vasiliy Kulikov | b34a6b1 | 2011-07-26 16:08:48 -0700 | [diff] [blame] | 395 | static int shm_try_destroy_orphaned(int id, void *p, void *data) |
| 396 | { |
| 397 | struct ipc_namespace *ns = data; |
Vasiliy Kulikov | 4c677e2 | 2011-07-29 03:56:40 +0400 | [diff] [blame] | 398 | struct kern_ipc_perm *ipcp = p; |
| 399 | struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm); |
Vasiliy Kulikov | b34a6b1 | 2011-07-26 16:08:48 -0700 | [diff] [blame] | 400 | |
| 401 | /* |
| 402 | * We want to destroy segments without users and with already |
| 403 | * exit'ed originating process. |
Vasiliy Kulikov | 4c677e2 | 2011-07-29 03:56:40 +0400 | [diff] [blame] | 404 | * |
Davidlohr Bueso | d9a605e | 2013-09-11 14:26:24 -0700 | [diff] [blame] | 405 | * As shp->* are changed under rwsem, it's safe to skip shp locking. |
Vasiliy Kulikov | b34a6b1 | 2011-07-26 16:08:48 -0700 | [diff] [blame] | 406 | */ |
Alexander Mikhalitsyn | a15261d | 2021-11-19 16:43:21 -0800 | [diff] [blame] | 407 | if (!list_empty(&shp->shm_clist)) |
Vasiliy Kulikov | b34a6b1 | 2011-07-26 16:08:48 -0700 | [diff] [blame] | 408 | return 0; |
Vasiliy Kulikov | b34a6b1 | 2011-07-26 16:08:48 -0700 | [diff] [blame] | 409 | |
Alexander Mikhalitsyn | a15261d | 2021-11-19 16:43:21 -0800 | [diff] [blame] | 410 | if (shm_may_destroy(shp)) { |
Vasiliy Kulikov | 4c677e2 | 2011-07-29 03:56:40 +0400 | [diff] [blame] | 411 | shm_lock_by_ptr(shp); |
Vasiliy Kulikov | b34a6b1 | 2011-07-26 16:08:48 -0700 | [diff] [blame] | 412 | shm_destroy(ns, shp); |
Vasiliy Kulikov | 4c677e2 | 2011-07-29 03:56:40 +0400 | [diff] [blame] | 413 | } |
Vasiliy Kulikov | b34a6b1 | 2011-07-26 16:08:48 -0700 | [diff] [blame] | 414 | return 0; |
| 415 | } |
| 416 | |
| 417 | void shm_destroy_orphaned(struct ipc_namespace *ns) |
| 418 | { |
Davidlohr Bueso | d9a605e | 2013-09-11 14:26:24 -0700 | [diff] [blame] | 419 | down_write(&shm_ids(ns).rwsem); |
Vasiliy Kulikov | 33a30ed | 2011-08-03 22:26:55 +0400 | [diff] [blame] | 420 | if (shm_ids(ns).in_use) |
Vasiliy Kulikov | 4c677e2 | 2011-07-29 03:56:40 +0400 | [diff] [blame] | 421 | idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns); |
Davidlohr Bueso | d9a605e | 2013-09-11 14:26:24 -0700 | [diff] [blame] | 422 | up_write(&shm_ids(ns).rwsem); |
Vasiliy Kulikov | b34a6b1 | 2011-07-26 16:08:48 -0700 | [diff] [blame] | 423 | } |
| 424 | |
Jack Miller | 83293c0f5 | 2014-08-08 14:23:21 -0700 | [diff] [blame] | 425 | /* Locking assumes this will only be called with task == current */ |
Vasiliy Kulikov | b34a6b1 | 2011-07-26 16:08:48 -0700 | [diff] [blame] | 426 | void exit_shm(struct task_struct *task) |
| 427 | { |
Alexander Mikhalitsyn | a15261d | 2021-11-19 16:43:21 -0800 | [diff] [blame] | 428 | for (;;) { |
| 429 | struct shmid_kernel *shp; |
| 430 | struct ipc_namespace *ns; |
Vasiliy Kulikov | b34a6b1 | 2011-07-26 16:08:48 -0700 | [diff] [blame] | 431 | |
Alexander Mikhalitsyn | a15261d | 2021-11-19 16:43:21 -0800 | [diff] [blame] | 432 | task_lock(task); |
Vasiliy Kulikov | 298507d | 2011-08-03 22:28:26 +0400 | [diff] [blame] | 433 | |
Alexander Mikhalitsyn | a15261d | 2021-11-19 16:43:21 -0800 | [diff] [blame] | 434 | if (list_empty(&task->sysvshm.shm_clist)) { |
| 435 | task_unlock(task); |
| 436 | break; |
Jack Miller | 83293c0f5 | 2014-08-08 14:23:21 -0700 | [diff] [blame] | 437 | } |
Jack Miller | 83293c0f5 | 2014-08-08 14:23:21 -0700 | [diff] [blame] | 438 | |
Alexander Mikhalitsyn | a15261d | 2021-11-19 16:43:21 -0800 | [diff] [blame] | 439 | shp = list_first_entry(&task->sysvshm.shm_clist, struct shmid_kernel, |
| 440 | shm_clist); |
| 441 | |
| 442 | /* |
| 443 | * 1) Get pointer to the ipc namespace. It is worth to say |
| 444 | * that this pointer is guaranteed to be valid because |
| 445 | * shp lifetime is always shorter than namespace lifetime |
| 446 | * in which shp lives. |
| 447 | * We taken task_lock it means that shp won't be freed. |
| 448 | */ |
| 449 | ns = shp->ns; |
| 450 | |
| 451 | /* |
| 452 | * 2) If kernel.shm_rmid_forced is not set then only keep track of |
| 453 | * which shmids are orphaned, so that a later set of the sysctl |
| 454 | * can clean them up. |
| 455 | */ |
| 456 | if (!ns->shm_rmid_forced) |
| 457 | goto unlink_continue; |
| 458 | |
| 459 | /* |
| 460 | * 3) get a reference to the namespace. |
| 461 | * The refcount could be already 0. If it is 0, then |
| 462 | * the shm objects will be free by free_ipc_work(). |
| 463 | */ |
| 464 | ns = get_ipc_ns_not_zero(ns); |
| 465 | if (!ns) { |
| 466 | unlink_continue: |
| 467 | list_del_init(&shp->shm_clist); |
| 468 | task_unlock(task); |
| 469 | continue; |
| 470 | } |
| 471 | |
| 472 | /* |
| 473 | * 4) get a reference to shp. |
| 474 | * This cannot fail: shm_clist_rm() is called before |
| 475 | * ipc_rmid(), thus the refcount cannot be 0. |
| 476 | */ |
| 477 | WARN_ON(!ipc_rcu_getref(&shp->shm_perm)); |
| 478 | |
| 479 | /* |
| 480 | * 5) unlink the shm segment from the list of segments |
| 481 | * created by current. |
| 482 | * This must be done last. After unlinking, |
| 483 | * only the refcounts obtained above prevent IPC_RMID |
| 484 | * from destroying the segment or the namespace. |
| 485 | */ |
| 486 | list_del_init(&shp->shm_clist); |
| 487 | |
| 488 | task_unlock(task); |
| 489 | |
| 490 | /* |
| 491 | * 6) we have all references |
| 492 | * Thus lock & if needed destroy shp. |
| 493 | */ |
| 494 | down_write(&shm_ids(ns).rwsem); |
| 495 | shm_lock_by_ptr(shp); |
| 496 | /* |
| 497 | * rcu_read_lock was implicitly taken in shm_lock_by_ptr, it's |
| 498 | * safe to call ipc_rcu_putref here |
| 499 | */ |
| 500 | ipc_rcu_putref(&shp->shm_perm, shm_rcu_free); |
| 501 | |
| 502 | if (ipc_valid_object(&shp->shm_perm)) { |
| 503 | if (shm_may_destroy(shp)) |
| 504 | shm_destroy(ns, shp); |
| 505 | else |
| 506 | shm_unlock(shp); |
| 507 | } else { |
| 508 | /* |
| 509 | * Someone else deleted the shp from namespace |
| 510 | * idr/kht while we have waited. |
| 511 | * Just unlock and continue. |
| 512 | */ |
| 513 | shm_unlock(shp); |
| 514 | } |
| 515 | |
| 516 | up_write(&shm_ids(ns).rwsem); |
| 517 | put_ipc_ns(ns); /* paired with get_ipc_ns_not_zero */ |
| 518 | } |
Vasiliy Kulikov | b34a6b1 | 2011-07-26 16:08:48 -0700 | [diff] [blame] | 519 | } |
| 520 | |
Souptick Joarder | 14f28f57 | 2018-06-14 15:27:55 -0700 | [diff] [blame] | 521 | static vm_fault_t shm_fault(struct vm_fault *vmf) |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 522 | { |
Dave Jiang | 11bac80 | 2017-02-24 14:56:41 -0800 | [diff] [blame] | 523 | struct file *file = vmf->vma->vm_file; |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 524 | struct shm_file_data *sfd = shm_file_data(file); |
| 525 | |
Dave Jiang | 11bac80 | 2017-02-24 14:56:41 -0800 | [diff] [blame] | 526 | return sfd->vm_ops->fault(vmf); |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 527 | } |
| 528 | |
Mike Kravetz | 3d942ee | 2018-03-28 16:01:01 -0700 | [diff] [blame] | 529 | static int shm_split(struct vm_area_struct *vma, unsigned long addr) |
| 530 | { |
| 531 | struct file *file = vma->vm_file; |
| 532 | struct shm_file_data *sfd = shm_file_data(file); |
| 533 | |
Andrew Morton | a61fc2c | 2018-04-10 16:35:42 -0700 | [diff] [blame] | 534 | if (sfd->vm_ops->split) |
Mike Kravetz | 3d942ee | 2018-03-28 16:01:01 -0700 | [diff] [blame] | 535 | return sfd->vm_ops->split(vma, addr); |
| 536 | |
| 537 | return 0; |
| 538 | } |
| 539 | |
Jane Chu | eec3636 | 2018-08-02 15:36:05 -0700 | [diff] [blame] | 540 | static unsigned long shm_pagesize(struct vm_area_struct *vma) |
| 541 | { |
| 542 | struct file *file = vma->vm_file; |
| 543 | struct shm_file_data *sfd = shm_file_data(file); |
| 544 | |
| 545 | if (sfd->vm_ops->pagesize) |
| 546 | return sfd->vm_ops->pagesize(vma); |
| 547 | |
| 548 | return PAGE_SIZE; |
| 549 | } |
| 550 | |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 551 | #ifdef CONFIG_NUMA |
Adrian Bunk | d823e3e | 2007-10-16 23:26:42 -0700 | [diff] [blame] | 552 | static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new) |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 553 | { |
| 554 | struct file *file = vma->vm_file; |
| 555 | struct shm_file_data *sfd = shm_file_data(file); |
| 556 | int err = 0; |
Shailesh Pandey | 63980c8 | 2016-12-14 15:06:10 -0800 | [diff] [blame] | 557 | |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 558 | if (sfd->vm_ops->set_policy) |
| 559 | err = sfd->vm_ops->set_policy(vma, new); |
| 560 | return err; |
| 561 | } |
| 562 | |
Adrian Bunk | d823e3e | 2007-10-16 23:26:42 -0700 | [diff] [blame] | 563 | static struct mempolicy *shm_get_policy(struct vm_area_struct *vma, |
| 564 | unsigned long addr) |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 565 | { |
| 566 | struct file *file = vma->vm_file; |
| 567 | struct shm_file_data *sfd = shm_file_data(file); |
| 568 | struct mempolicy *pol = NULL; |
| 569 | |
| 570 | if (sfd->vm_ops->get_policy) |
| 571 | pol = sfd->vm_ops->get_policy(vma, addr); |
Lee Schermerhorn | 52cd3b0 | 2008-04-28 02:13:16 -0700 | [diff] [blame] | 572 | else if (vma->vm_policy) |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 573 | pol = vma->vm_policy; |
Lee Schermerhorn | 52cd3b0 | 2008-04-28 02:13:16 -0700 | [diff] [blame] | 574 | |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 575 | return pol; |
| 576 | } |
| 577 | #endif |
| 578 | |
Manfred Spraul | 239521f | 2014-01-27 17:07:04 -0800 | [diff] [blame] | 579 | static int shm_mmap(struct file *file, struct vm_area_struct *vma) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 580 | { |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 581 | struct shm_file_data *sfd = shm_file_data(file); |
David Howells | b0e1519 | 2006-01-06 00:11:42 -0800 | [diff] [blame] | 582 | int ret; |
| 583 | |
Kirill A. Shutemov | 1ac0b6d | 2016-02-17 13:11:35 -0800 | [diff] [blame] | 584 | /* |
Eric Biggers | 3f05317 | 2018-04-13 15:35:30 -0700 | [diff] [blame] | 585 | * In case of remap_file_pages() emulation, the file can represent an |
| 586 | * IPC ID that was removed, and possibly even reused by another shm |
| 587 | * segment already. Propagate this case as an error to caller. |
Kirill A. Shutemov | 1ac0b6d | 2016-02-17 13:11:35 -0800 | [diff] [blame] | 588 | */ |
Shailesh Pandey | 63980c8 | 2016-12-14 15:06:10 -0800 | [diff] [blame] | 589 | ret = __shm_open(vma); |
Kirill A. Shutemov | 1ac0b6d | 2016-02-17 13:11:35 -0800 | [diff] [blame] | 590 | if (ret) |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 591 | return ret; |
Kirill A. Shutemov | 1ac0b6d | 2016-02-17 13:11:35 -0800 | [diff] [blame] | 592 | |
Miklos Szeredi | f74ac01 | 2017-02-20 16:51:23 +0100 | [diff] [blame] | 593 | ret = call_mmap(sfd->file, vma); |
Kirill A. Shutemov | 1ac0b6d | 2016-02-17 13:11:35 -0800 | [diff] [blame] | 594 | if (ret) { |
| 595 | shm_close(vma); |
| 596 | return ret; |
| 597 | } |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 598 | sfd->vm_ops = vma->vm_ops; |
David Howells | 2e92a3b | 2007-07-31 00:37:24 -0700 | [diff] [blame] | 599 | #ifdef CONFIG_MMU |
Davidlohr Bueso | d0edd85 | 2015-09-09 15:39:20 -0700 | [diff] [blame] | 600 | WARN_ON(!sfd->vm_ops->fault); |
David Howells | 2e92a3b | 2007-07-31 00:37:24 -0700 | [diff] [blame] | 601 | #endif |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 602 | vma->vm_ops = &shm_vm_ops; |
Kirill A. Shutemov | 1ac0b6d | 2016-02-17 13:11:35 -0800 | [diff] [blame] | 603 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 604 | } |
| 605 | |
Kirill Korotaev | 4e98231 | 2006-10-02 02:18:22 -0700 | [diff] [blame] | 606 | static int shm_release(struct inode *ino, struct file *file) |
| 607 | { |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 608 | struct shm_file_data *sfd = shm_file_data(file); |
Kirill Korotaev | 4e98231 | 2006-10-02 02:18:22 -0700 | [diff] [blame] | 609 | |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 610 | put_ipc_ns(sfd->ns); |
Eric Biggers | 3f05317 | 2018-04-13 15:35:30 -0700 | [diff] [blame] | 611 | fput(sfd->file); |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 612 | shm_file_data(file) = NULL; |
| 613 | kfree(sfd); |
Kirill Korotaev | 4e98231 | 2006-10-02 02:18:22 -0700 | [diff] [blame] | 614 | return 0; |
| 615 | } |
| 616 | |
Josef Bacik | 02c24a8 | 2011-07-16 20:44:56 -0400 | [diff] [blame] | 617 | static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync) |
Adam Litke | 516dffd | 2007-03-01 15:46:08 -0800 | [diff] [blame] | 618 | { |
Adam Litke | 516dffd | 2007-03-01 15:46:08 -0800 | [diff] [blame] | 619 | struct shm_file_data *sfd = shm_file_data(file); |
Adam Litke | 516dffd | 2007-03-01 15:46:08 -0800 | [diff] [blame] | 620 | |
Christoph Hellwig | 7ea8085 | 2010-05-26 17:53:25 +0200 | [diff] [blame] | 621 | if (!sfd->file->f_op->fsync) |
| 622 | return -EINVAL; |
Jeff Layton | 0f41074 | 2017-07-05 15:26:50 -0400 | [diff] [blame] | 623 | return sfd->file->f_op->fsync(sfd->file, start, end, datasync); |
Adam Litke | 516dffd | 2007-03-01 15:46:08 -0800 | [diff] [blame] | 624 | } |
| 625 | |
Will Deacon | 7d8a456 | 2012-06-07 14:21:13 -0700 | [diff] [blame] | 626 | static long shm_fallocate(struct file *file, int mode, loff_t offset, |
| 627 | loff_t len) |
| 628 | { |
| 629 | struct shm_file_data *sfd = shm_file_data(file); |
| 630 | |
| 631 | if (!sfd->file->f_op->fallocate) |
| 632 | return -EOPNOTSUPP; |
| 633 | return sfd->file->f_op->fallocate(file, mode, offset, len); |
| 634 | } |
| 635 | |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 636 | static unsigned long shm_get_unmapped_area(struct file *file, |
| 637 | unsigned long addr, unsigned long len, unsigned long pgoff, |
| 638 | unsigned long flags) |
| 639 | { |
| 640 | struct shm_file_data *sfd = shm_file_data(file); |
Shailesh Pandey | 63980c8 | 2016-12-14 15:06:10 -0800 | [diff] [blame] | 641 | |
Al Viro | c4caa77 | 2009-11-30 08:38:43 -0500 | [diff] [blame] | 642 | return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len, |
| 643 | pgoff, flags); |
Adam Litke | 516dffd | 2007-03-01 15:46:08 -0800 | [diff] [blame] | 644 | } |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 645 | |
Arjan van de Ven | 9a32144 | 2007-02-12 00:55:35 -0800 | [diff] [blame] | 646 | static const struct file_operations shm_file_operations = { |
Kirill Korotaev | 4e98231 | 2006-10-02 02:18:22 -0700 | [diff] [blame] | 647 | .mmap = shm_mmap, |
Adam Litke | 516dffd | 2007-03-01 15:46:08 -0800 | [diff] [blame] | 648 | .fsync = shm_fsync, |
Kirill Korotaev | 4e98231 | 2006-10-02 02:18:22 -0700 | [diff] [blame] | 649 | .release = shm_release, |
David Howells | ed5e589 | 2010-01-15 17:01:32 -0800 | [diff] [blame] | 650 | .get_unmapped_area = shm_get_unmapped_area, |
Arnd Bergmann | 6038f37 | 2010-08-15 18:52:59 +0200 | [diff] [blame] | 651 | .llseek = noop_llseek, |
Will Deacon | 7d8a456 | 2012-06-07 14:21:13 -0700 | [diff] [blame] | 652 | .fallocate = shm_fallocate, |
Al Viro | c4caa77 | 2009-11-30 08:38:43 -0500 | [diff] [blame] | 653 | }; |
| 654 | |
Hugh Dickins | c01d5b3 | 2016-07-26 15:26:15 -0700 | [diff] [blame] | 655 | /* |
| 656 | * shm_file_operations_huge is now identical to shm_file_operations, |
| 657 | * but we keep it distinct for the sake of is_file_shm_hugepages(). |
| 658 | */ |
Al Viro | c4caa77 | 2009-11-30 08:38:43 -0500 | [diff] [blame] | 659 | static const struct file_operations shm_file_operations_huge = { |
| 660 | .mmap = shm_mmap, |
| 661 | .fsync = shm_fsync, |
| 662 | .release = shm_release, |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 663 | .get_unmapped_area = shm_get_unmapped_area, |
Arnd Bergmann | 6038f37 | 2010-08-15 18:52:59 +0200 | [diff] [blame] | 664 | .llseek = noop_llseek, |
Will Deacon | 7d8a456 | 2012-06-07 14:21:13 -0700 | [diff] [blame] | 665 | .fallocate = shm_fallocate, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 666 | }; |
| 667 | |
Yaowei Bai | 2954e440 | 2016-01-20 15:01:11 -0800 | [diff] [blame] | 668 | bool is_file_shm_hugepages(struct file *file) |
Al Viro | c4caa77 | 2009-11-30 08:38:43 -0500 | [diff] [blame] | 669 | { |
| 670 | return file->f_op == &shm_file_operations_huge; |
| 671 | } |
| 672 | |
Alexey Dobriyan | f0f37e2f | 2009-09-27 22:29:37 +0400 | [diff] [blame] | 673 | static const struct vm_operations_struct shm_vm_ops = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 674 | .open = shm_open, /* callback for a new vm-area open */ |
| 675 | .close = shm_close, /* callback for when the vm-area is released */ |
Nick Piggin | 54cb882 | 2007-07-19 01:46:59 -0700 | [diff] [blame] | 676 | .fault = shm_fault, |
Mike Kravetz | 3d942ee | 2018-03-28 16:01:01 -0700 | [diff] [blame] | 677 | .split = shm_split, |
Jane Chu | eec3636 | 2018-08-02 15:36:05 -0700 | [diff] [blame] | 678 | .pagesize = shm_pagesize, |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 679 | #if defined(CONFIG_NUMA) |
| 680 | .set_policy = shm_set_policy, |
| 681 | .get_policy = shm_get_policy, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 682 | #endif |
| 683 | }; |
| 684 | |
Nadia Derbey | f4566f0 | 2007-10-18 23:40:53 -0700 | [diff] [blame] | 685 | /** |
| 686 | * newseg - Create a new shared memory segment |
| 687 | * @ns: namespace |
| 688 | * @params: ptr to the structure that contains key, size and shmflg |
| 689 | * |
Davidlohr Bueso | d9a605e | 2013-09-11 14:26:24 -0700 | [diff] [blame] | 690 | * Called with shm_ids.rwsem held as a writer. |
Nadia Derbey | f4566f0 | 2007-10-18 23:40:53 -0700 | [diff] [blame] | 691 | */ |
Nadia Derbey | 7748dbf | 2007-10-18 23:40:49 -0700 | [diff] [blame] | 692 | static int newseg(struct ipc_namespace *ns, struct ipc_params *params) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 693 | { |
Nadia Derbey | 7748dbf | 2007-10-18 23:40:49 -0700 | [diff] [blame] | 694 | key_t key = params->key; |
| 695 | int shmflg = params->flg; |
| 696 | size_t size = params->u.size; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 697 | int error; |
| 698 | struct shmid_kernel *shp; |
Robin Holt | d69f3ba | 2013-04-30 19:15:54 -0700 | [diff] [blame] | 699 | size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; |
Manfred Spraul | 239521f | 2014-01-27 17:07:04 -0800 | [diff] [blame] | 700 | struct file *file; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 701 | char name[13]; |
KOSAKI Motohiro | ca16d14 | 2011-05-26 19:16:19 +0900 | [diff] [blame] | 702 | vm_flags_t acctflag = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 703 | |
Kirill Korotaev | 4e98231 | 2006-10-02 02:18:22 -0700 | [diff] [blame] | 704 | if (size < SHMMIN || size > ns->shm_ctlmax) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 705 | return -EINVAL; |
| 706 | |
Manfred Spraul | 1376327 | 2014-06-06 14:37:41 -0700 | [diff] [blame] | 707 | if (numpages << PAGE_SHIFT < size) |
| 708 | return -ENOSPC; |
| 709 | |
Manfred Spraul | 09c6eb1 | 2014-06-06 14:37:40 -0700 | [diff] [blame] | 710 | if (ns->shm_tot + numpages < ns->shm_tot || |
| 711 | ns->shm_tot + numpages > ns->shm_ctlall) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 712 | return -ENOSPC; |
| 713 | |
Kees Cook | 42e618f | 2017-07-12 14:35:25 -0700 | [diff] [blame] | 714 | shp = kvmalloc(sizeof(*shp), GFP_KERNEL); |
| 715 | if (unlikely(!shp)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 716 | return -ENOMEM; |
| 717 | |
| 718 | shp->shm_perm.key = key; |
Andrew Morton | b33291c | 2006-01-08 01:02:21 -0800 | [diff] [blame] | 719 | shp->shm_perm.mode = (shmflg & S_IRWXUGO); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 720 | shp->mlock_user = NULL; |
| 721 | |
| 722 | shp->shm_perm.security = NULL; |
Eric W. Biederman | 7191adf | 2018-03-22 21:08:27 -0500 | [diff] [blame] | 723 | error = security_shm_alloc(&shp->shm_perm); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 724 | if (error) { |
Kees Cook | 42e618f | 2017-07-12 14:35:25 -0700 | [diff] [blame] | 725 | kvfree(shp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 726 | return error; |
| 727 | } |
| 728 | |
Manfred Spraul | 239521f | 2014-01-27 17:07:04 -0800 | [diff] [blame] | 729 | sprintf(name, "SYSV%08x", key); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 730 | if (shmflg & SHM_HUGETLB) { |
Andrew Morton | c103a4d | 2013-07-08 16:01:08 -0700 | [diff] [blame] | 731 | struct hstate *hs; |
Li Zefan | 091d0d5 | 2013-05-09 15:08:15 +0800 | [diff] [blame] | 732 | size_t hugesize; |
| 733 | |
Andrew Morton | c103a4d | 2013-07-08 16:01:08 -0700 | [diff] [blame] | 734 | hs = hstate_sizelog((shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK); |
Li Zefan | 091d0d5 | 2013-05-09 15:08:15 +0800 | [diff] [blame] | 735 | if (!hs) { |
| 736 | error = -EINVAL; |
| 737 | goto no_file; |
| 738 | } |
| 739 | hugesize = ALIGN(size, huge_page_size(hs)); |
Naoya Horiguchi | af73e4d | 2013-05-07 16:18:13 -0700 | [diff] [blame] | 740 | |
Mel Gorman | 5a6fe12 | 2009-02-10 14:02:27 +0000 | [diff] [blame] | 741 | /* hugetlb_file_setup applies strict accounting */ |
| 742 | if (shmflg & SHM_NORESERVE) |
| 743 | acctflag = VM_NORESERVE; |
Naoya Horiguchi | af73e4d | 2013-05-07 16:18:13 -0700 | [diff] [blame] | 744 | file = hugetlb_file_setup(name, hugesize, acctflag, |
Andi Kleen | 42d7395 | 2012-12-11 16:01:34 -0800 | [diff] [blame] | 745 | &shp->mlock_user, HUGETLB_SHMFS_INODE, |
| 746 | (shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 747 | } else { |
Badari Pulavarty | bf8f972 | 2005-11-07 00:59:27 -0800 | [diff] [blame] | 748 | /* |
| 749 | * Do not allow no accounting for OVERCOMMIT_NEVER, even |
Manfred Spraul | 239521f | 2014-01-27 17:07:04 -0800 | [diff] [blame] | 750 | * if it's asked for. |
Badari Pulavarty | bf8f972 | 2005-11-07 00:59:27 -0800 | [diff] [blame] | 751 | */ |
| 752 | if ((shmflg & SHM_NORESERVE) && |
| 753 | sysctl_overcommit_memory != OVERCOMMIT_NEVER) |
Linus Torvalds | fc8744a | 2009-01-31 15:08:56 -0800 | [diff] [blame] | 754 | acctflag = VM_NORESERVE; |
Stephen Smalley | e1832f2 | 2015-08-06 15:46:55 -0700 | [diff] [blame] | 755 | file = shmem_kernel_file_setup(name, size, acctflag); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 756 | } |
| 757 | error = PTR_ERR(file); |
| 758 | if (IS_ERR(file)) |
| 759 | goto no_file; |
| 760 | |
Eric W. Biederman | 98f929b | 2018-03-23 00:29:57 -0500 | [diff] [blame] | 761 | shp->shm_cprid = get_pid(task_tgid(current)); |
| 762 | shp->shm_lprid = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 763 | shp->shm_atim = shp->shm_dtim = 0; |
Deepa Dinamani | 7ff2819 | 2017-08-02 19:51:14 -0700 | [diff] [blame] | 764 | shp->shm_ctim = ktime_get_real_seconds(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 765 | shp->shm_segsz = size; |
| 766 | shp->shm_nattch = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 767 | shp->shm_file = file; |
Vasiliy Kulikov | 5774ed0 | 2011-07-29 03:55:31 +0400 | [diff] [blame] | 768 | shp->shm_creator = current; |
Linus Torvalds | b9a5322 | 2015-09-30 12:48:40 -0400 | [diff] [blame] | 769 | |
Davidlohr Bueso | 39c96a1 | 2017-11-17 15:31:11 -0800 | [diff] [blame] | 770 | /* ipc_addid() locks shp upon success. */ |
Manfred Spraul | a2642f8 | 2017-07-12 14:35:16 -0700 | [diff] [blame] | 771 | error = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni); |
| 772 | if (error < 0) |
Linus Torvalds | b9a5322 | 2015-09-30 12:48:40 -0400 | [diff] [blame] | 773 | goto no_id; |
Linus Torvalds | b9a5322 | 2015-09-30 12:48:40 -0400 | [diff] [blame] | 774 | |
Alexander Mikhalitsyn | a15261d | 2021-11-19 16:43:21 -0800 | [diff] [blame] | 775 | shp->ns = ns; |
| 776 | |
| 777 | task_lock(current); |
Jack Miller | ab602f7 | 2014-08-08 14:23:19 -0700 | [diff] [blame] | 778 | list_add(&shp->shm_clist, ¤t->sysvshm.shm_clist); |
Alexander Mikhalitsyn | a15261d | 2021-11-19 16:43:21 -0800 | [diff] [blame] | 779 | task_unlock(current); |
Davidlohr Bueso | dbfcd91 | 2013-07-08 16:01:09 -0700 | [diff] [blame] | 780 | |
Badari Pulavarty | 30475cc | 2007-06-16 10:15:59 -0700 | [diff] [blame] | 781 | /* |
| 782 | * shmid gets reported as "inode#" in /proc/pid/maps. |
| 783 | * proc-ps tools use this. Changing this will break them. |
| 784 | */ |
Al Viro | 496ad9a | 2013-01-23 17:07:38 -0500 | [diff] [blame] | 785 | file_inode(file)->i_ino = shp->shm_perm.id; |
Krishnakumar R | 551110a | 2005-10-29 18:16:45 -0700 | [diff] [blame] | 786 | |
Kirill Korotaev | 4e98231 | 2006-10-02 02:18:22 -0700 | [diff] [blame] | 787 | ns->shm_tot += numpages; |
Nadia Derbey | 7ca7e56 | 2007-10-18 23:40:48 -0700 | [diff] [blame] | 788 | error = shp->shm_perm.id; |
Davidlohr Bueso | dbfcd91 | 2013-07-08 16:01:09 -0700 | [diff] [blame] | 789 | |
Davidlohr Bueso | cf9d5d7 | 2013-07-08 16:01:11 -0700 | [diff] [blame] | 790 | ipc_unlock_object(&shp->shm_perm); |
Davidlohr Bueso | dbfcd91 | 2013-07-08 16:01:09 -0700 | [diff] [blame] | 791 | rcu_read_unlock(); |
Nadia Derbey | 7ca7e56 | 2007-10-18 23:40:48 -0700 | [diff] [blame] | 792 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 793 | |
| 794 | no_id: |
Eric W. Biederman | 2236d4d | 2018-03-28 13:38:55 -0500 | [diff] [blame] | 795 | ipc_update_pid(&shp->shm_cprid, NULL); |
| 796 | ipc_update_pid(&shp->shm_lprid, NULL); |
Hugh Dickins | 2195d28 | 2009-09-12 12:21:27 +0100 | [diff] [blame] | 797 | if (is_file_hugepages(file) && shp->mlock_user) |
Hugh Dickins | 353d5c3 | 2009-08-24 16:30:28 +0100 | [diff] [blame] | 798 | user_shm_unlock(size, shp->mlock_user); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 799 | fput(file); |
Manfred Spraul | 39cfffd | 2018-08-21 22:01:29 -0700 | [diff] [blame] | 800 | ipc_rcu_putref(&shp->shm_perm, shm_rcu_free); |
| 801 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 802 | no_file: |
Manfred Spraul | a2642f8 | 2017-07-12 14:35:16 -0700 | [diff] [blame] | 803 | call_rcu(&shp->shm_perm.rcu, shm_rcu_free); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 804 | return error; |
| 805 | } |
| 806 | |
Nadia Derbey | f4566f0 | 2007-10-18 23:40:53 -0700 | [diff] [blame] | 807 | /* |
Davidlohr Bueso | d9a605e | 2013-09-11 14:26:24 -0700 | [diff] [blame] | 808 | * Called with shm_ids.rwsem and ipcp locked. |
Nadia Derbey | f4566f0 | 2007-10-18 23:40:53 -0700 | [diff] [blame] | 809 | */ |
Alexey Dobriyan | 00898e8 | 2020-08-11 18:37:05 -0700 | [diff] [blame] | 810 | static int shm_more_checks(struct kern_ipc_perm *ipcp, struct ipc_params *params) |
Nadia Derbey | 7748dbf | 2007-10-18 23:40:49 -0700 | [diff] [blame] | 811 | { |
Nadia Derbey | 03f02c7 | 2007-10-18 23:40:51 -0700 | [diff] [blame] | 812 | struct shmid_kernel *shp; |
| 813 | |
| 814 | shp = container_of(ipcp, struct shmid_kernel, shm_perm); |
| 815 | if (shp->shm_segsz < params->u.size) |
Nadia Derbey | 7748dbf | 2007-10-18 23:40:49 -0700 | [diff] [blame] | 816 | return -EINVAL; |
| 817 | |
| 818 | return 0; |
| 819 | } |
| 820 | |
Dominik Brodowski | 65749e0 | 2018-03-20 20:07:53 +0100 | [diff] [blame] | 821 | long ksys_shmget(key_t key, size_t size, int shmflg) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 822 | { |
Kirill Korotaev | 4e98231 | 2006-10-02 02:18:22 -0700 | [diff] [blame] | 823 | struct ipc_namespace *ns; |
Mathias Krause | eb66ec4 | 2014-06-06 14:37:36 -0700 | [diff] [blame] | 824 | static const struct ipc_ops shm_ops = { |
| 825 | .getnew = newseg, |
Eric W. Biederman | 50ab44b | 2018-03-23 23:41:55 -0500 | [diff] [blame] | 826 | .associate = security_shm_associate, |
Mathias Krause | eb66ec4 | 2014-06-06 14:37:36 -0700 | [diff] [blame] | 827 | .more_checks = shm_more_checks, |
| 828 | }; |
Nadia Derbey | 7748dbf | 2007-10-18 23:40:49 -0700 | [diff] [blame] | 829 | struct ipc_params shm_params; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 830 | |
Kirill Korotaev | 4e98231 | 2006-10-02 02:18:22 -0700 | [diff] [blame] | 831 | ns = current->nsproxy->ipc_ns; |
| 832 | |
Nadia Derbey | 7748dbf | 2007-10-18 23:40:49 -0700 | [diff] [blame] | 833 | shm_params.key = key; |
| 834 | shm_params.flg = shmflg; |
| 835 | shm_params.u.size = size; |
Nadia Derbey | 7ca7e56 | 2007-10-18 23:40:48 -0700 | [diff] [blame] | 836 | |
Nadia Derbey | 7748dbf | 2007-10-18 23:40:49 -0700 | [diff] [blame] | 837 | return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 838 | } |
| 839 | |
Dominik Brodowski | 65749e0 | 2018-03-20 20:07:53 +0100 | [diff] [blame] | 840 | SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg) |
| 841 | { |
| 842 | return ksys_shmget(key, size, shmflg); |
| 843 | } |
| 844 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 845 | static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version) |
| 846 | { |
Manfred Spraul | 239521f | 2014-01-27 17:07:04 -0800 | [diff] [blame] | 847 | switch (version) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 848 | case IPC_64: |
| 849 | return copy_to_user(buf, in, sizeof(*in)); |
| 850 | case IPC_OLD: |
| 851 | { |
| 852 | struct shmid_ds out; |
| 853 | |
Vasiliy Kulikov | 3af54c9 | 2010-10-30 18:22:49 +0400 | [diff] [blame] | 854 | memset(&out, 0, sizeof(out)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 855 | ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm); |
| 856 | out.shm_segsz = in->shm_segsz; |
| 857 | out.shm_atime = in->shm_atime; |
| 858 | out.shm_dtime = in->shm_dtime; |
| 859 | out.shm_ctime = in->shm_ctime; |
| 860 | out.shm_cpid = in->shm_cpid; |
| 861 | out.shm_lpid = in->shm_lpid; |
| 862 | out.shm_nattch = in->shm_nattch; |
| 863 | |
| 864 | return copy_to_user(buf, &out, sizeof(out)); |
| 865 | } |
| 866 | default: |
| 867 | return -EINVAL; |
| 868 | } |
| 869 | } |
| 870 | |
Pierre Peiffer | 016d713 | 2008-04-29 01:00:50 -0700 | [diff] [blame] | 871 | static inline unsigned long |
| 872 | copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 873 | { |
Manfred Spraul | 239521f | 2014-01-27 17:07:04 -0800 | [diff] [blame] | 874 | switch (version) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 875 | case IPC_64: |
Pierre Peiffer | 016d713 | 2008-04-29 01:00:50 -0700 | [diff] [blame] | 876 | if (copy_from_user(out, buf, sizeof(*out))) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 877 | return -EFAULT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 878 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 879 | case IPC_OLD: |
| 880 | { |
| 881 | struct shmid_ds tbuf_old; |
| 882 | |
| 883 | if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old))) |
| 884 | return -EFAULT; |
| 885 | |
Pierre Peiffer | 016d713 | 2008-04-29 01:00:50 -0700 | [diff] [blame] | 886 | out->shm_perm.uid = tbuf_old.shm_perm.uid; |
| 887 | out->shm_perm.gid = tbuf_old.shm_perm.gid; |
| 888 | out->shm_perm.mode = tbuf_old.shm_perm.mode; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 889 | |
| 890 | return 0; |
| 891 | } |
| 892 | default: |
| 893 | return -EINVAL; |
| 894 | } |
| 895 | } |
| 896 | |
| 897 | static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version) |
| 898 | { |
Manfred Spraul | 239521f | 2014-01-27 17:07:04 -0800 | [diff] [blame] | 899 | switch (version) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 900 | case IPC_64: |
| 901 | return copy_to_user(buf, in, sizeof(*in)); |
| 902 | case IPC_OLD: |
| 903 | { |
| 904 | struct shminfo out; |
| 905 | |
Manfred Spraul | 239521f | 2014-01-27 17:07:04 -0800 | [diff] [blame] | 906 | if (in->shmmax > INT_MAX) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 907 | out.shmmax = INT_MAX; |
| 908 | else |
| 909 | out.shmmax = (int)in->shmmax; |
| 910 | |
| 911 | out.shmmin = in->shmmin; |
| 912 | out.shmmni = in->shmmni; |
| 913 | out.shmseg = in->shmseg; |
Paul McQuade | 46c0a8c | 2014-06-06 14:37:37 -0700 | [diff] [blame] | 914 | out.shmall = in->shmall; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 915 | |
| 916 | return copy_to_user(buf, &out, sizeof(out)); |
| 917 | } |
| 918 | default: |
| 919 | return -EINVAL; |
| 920 | } |
| 921 | } |
| 922 | |
Nadia Derbey | f4566f0 | 2007-10-18 23:40:53 -0700 | [diff] [blame] | 923 | /* |
Helge Deller | b795218 | 2010-10-27 15:34:16 -0700 | [diff] [blame] | 924 | * Calculate and add used RSS and swap pages of a shm. |
Davidlohr Bueso | d9a605e | 2013-09-11 14:26:24 -0700 | [diff] [blame] | 925 | * Called with shm_ids.rwsem held as a reader |
Helge Deller | b795218 | 2010-10-27 15:34:16 -0700 | [diff] [blame] | 926 | */ |
| 927 | static void shm_add_rss_swap(struct shmid_kernel *shp, |
| 928 | unsigned long *rss_add, unsigned long *swp_add) |
| 929 | { |
| 930 | struct inode *inode; |
| 931 | |
Al Viro | 496ad9a | 2013-01-23 17:07:38 -0500 | [diff] [blame] | 932 | inode = file_inode(shp->shm_file); |
Helge Deller | b795218 | 2010-10-27 15:34:16 -0700 | [diff] [blame] | 933 | |
| 934 | if (is_file_hugepages(shp->shm_file)) { |
| 935 | struct address_space *mapping = inode->i_mapping; |
| 936 | struct hstate *h = hstate_file(shp->shm_file); |
| 937 | *rss_add += pages_per_huge_page(h) * mapping->nrpages; |
| 938 | } else { |
| 939 | #ifdef CONFIG_SHMEM |
| 940 | struct shmem_inode_info *info = SHMEM_I(inode); |
Shailesh Pandey | 63980c8 | 2016-12-14 15:06:10 -0800 | [diff] [blame] | 941 | |
Kirill A. Shutemov | 4595ef8 | 2016-07-26 15:26:29 -0700 | [diff] [blame] | 942 | spin_lock_irq(&info->lock); |
Helge Deller | b795218 | 2010-10-27 15:34:16 -0700 | [diff] [blame] | 943 | *rss_add += inode->i_mapping->nrpages; |
| 944 | *swp_add += info->swapped; |
Kirill A. Shutemov | 4595ef8 | 2016-07-26 15:26:29 -0700 | [diff] [blame] | 945 | spin_unlock_irq(&info->lock); |
Helge Deller | b795218 | 2010-10-27 15:34:16 -0700 | [diff] [blame] | 946 | #else |
| 947 | *rss_add += inode->i_mapping->nrpages; |
| 948 | #endif |
| 949 | } |
| 950 | } |
| 951 | |
| 952 | /* |
Davidlohr Bueso | d9a605e | 2013-09-11 14:26:24 -0700 | [diff] [blame] | 953 | * Called with shm_ids.rwsem held as a reader |
Nadia Derbey | f4566f0 | 2007-10-18 23:40:53 -0700 | [diff] [blame] | 954 | */ |
Kirill Korotaev | 4e98231 | 2006-10-02 02:18:22 -0700 | [diff] [blame] | 955 | static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss, |
| 956 | unsigned long *swp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 957 | { |
Nadia Derbey | 7ca7e56 | 2007-10-18 23:40:48 -0700 | [diff] [blame] | 958 | int next_id; |
| 959 | int total, in_use; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 960 | |
| 961 | *rss = 0; |
| 962 | *swp = 0; |
| 963 | |
Nadia Derbey | 7ca7e56 | 2007-10-18 23:40:48 -0700 | [diff] [blame] | 964 | in_use = shm_ids(ns).in_use; |
| 965 | |
| 966 | for (total = 0, next_id = 0; total < in_use; next_id++) { |
Tony Battersby | e562aeb | 2009-04-02 16:58:26 -0700 | [diff] [blame] | 967 | struct kern_ipc_perm *ipc; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 968 | struct shmid_kernel *shp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 969 | |
Tony Battersby | e562aeb | 2009-04-02 16:58:26 -0700 | [diff] [blame] | 970 | ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id); |
| 971 | if (ipc == NULL) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 972 | continue; |
Tony Battersby | e562aeb | 2009-04-02 16:58:26 -0700 | [diff] [blame] | 973 | shp = container_of(ipc, struct shmid_kernel, shm_perm); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 974 | |
Helge Deller | b795218 | 2010-10-27 15:34:16 -0700 | [diff] [blame] | 975 | shm_add_rss_swap(shp, rss, swp); |
Nadia Derbey | 7ca7e56 | 2007-10-18 23:40:48 -0700 | [diff] [blame] | 976 | |
| 977 | total++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 978 | } |
| 979 | } |
| 980 | |
Pierre Peiffer | 8d4cc8b | 2008-04-29 01:00:47 -0700 | [diff] [blame] | 981 | /* |
Davidlohr Bueso | d9a605e | 2013-09-11 14:26:24 -0700 | [diff] [blame] | 982 | * This function handles some shmctl commands which require the rwsem |
Pierre Peiffer | 8d4cc8b | 2008-04-29 01:00:47 -0700 | [diff] [blame] | 983 | * to be held in write mode. |
Davidlohr Bueso | d9a605e | 2013-09-11 14:26:24 -0700 | [diff] [blame] | 984 | * NOTE: no locks must be held, the rwsem is taken inside this function. |
Pierre Peiffer | 8d4cc8b | 2008-04-29 01:00:47 -0700 | [diff] [blame] | 985 | */ |
| 986 | static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd, |
Al Viro | 9ba720c | 2017-07-08 20:58:06 -0400 | [diff] [blame] | 987 | struct shmid64_ds *shmid64) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 988 | { |
Pierre Peiffer | 8d4cc8b | 2008-04-29 01:00:47 -0700 | [diff] [blame] | 989 | struct kern_ipc_perm *ipcp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 990 | struct shmid_kernel *shp; |
Pierre Peiffer | 8d4cc8b | 2008-04-29 01:00:47 -0700 | [diff] [blame] | 991 | int err; |
| 992 | |
Davidlohr Bueso | d9a605e | 2013-09-11 14:26:24 -0700 | [diff] [blame] | 993 | down_write(&shm_ids(ns).rwsem); |
Davidlohr Bueso | 7b4cc5d | 2013-07-08 16:01:12 -0700 | [diff] [blame] | 994 | rcu_read_lock(); |
| 995 | |
Manfred Spraul | 4241c1a | 2018-08-21 22:01:34 -0700 | [diff] [blame] | 996 | ipcp = ipcctl_obtain_check(ns, &shm_ids(ns), shmid, cmd, |
Al Viro | 9ba720c | 2017-07-08 20:58:06 -0400 | [diff] [blame] | 997 | &shmid64->shm_perm, 0); |
Davidlohr Bueso | 7b4cc5d | 2013-07-08 16:01:12 -0700 | [diff] [blame] | 998 | if (IS_ERR(ipcp)) { |
| 999 | err = PTR_ERR(ipcp); |
Davidlohr Bueso | 7b4cc5d | 2013-07-08 16:01:12 -0700 | [diff] [blame] | 1000 | goto out_unlock1; |
| 1001 | } |
Pierre Peiffer | 8d4cc8b | 2008-04-29 01:00:47 -0700 | [diff] [blame] | 1002 | |
Pierre Peiffer | a5f75e7 | 2008-04-29 01:00:54 -0700 | [diff] [blame] | 1003 | shp = container_of(ipcp, struct shmid_kernel, shm_perm); |
Pierre Peiffer | 8d4cc8b | 2008-04-29 01:00:47 -0700 | [diff] [blame] | 1004 | |
Eric W. Biederman | 7191adf | 2018-03-22 21:08:27 -0500 | [diff] [blame] | 1005 | err = security_shm_shmctl(&shp->shm_perm, cmd); |
Pierre Peiffer | 8d4cc8b | 2008-04-29 01:00:47 -0700 | [diff] [blame] | 1006 | if (err) |
Davidlohr Bueso | 79ccf0f | 2013-09-11 14:26:16 -0700 | [diff] [blame] | 1007 | goto out_unlock1; |
Davidlohr Bueso | 7b4cc5d | 2013-07-08 16:01:12 -0700 | [diff] [blame] | 1008 | |
Pierre Peiffer | 8d4cc8b | 2008-04-29 01:00:47 -0700 | [diff] [blame] | 1009 | switch (cmd) { |
| 1010 | case IPC_RMID: |
Davidlohr Bueso | 79ccf0f | 2013-09-11 14:26:16 -0700 | [diff] [blame] | 1011 | ipc_lock_object(&shp->shm_perm); |
Davidlohr Bueso | 7b4cc5d | 2013-07-08 16:01:12 -0700 | [diff] [blame] | 1012 | /* do_shm_rmid unlocks the ipc object and rcu */ |
Pierre Peiffer | 8d4cc8b | 2008-04-29 01:00:47 -0700 | [diff] [blame] | 1013 | do_shm_rmid(ns, ipcp); |
| 1014 | goto out_up; |
| 1015 | case IPC_SET: |
Davidlohr Bueso | 79ccf0f | 2013-09-11 14:26:16 -0700 | [diff] [blame] | 1016 | ipc_lock_object(&shp->shm_perm); |
Al Viro | 9ba720c | 2017-07-08 20:58:06 -0400 | [diff] [blame] | 1017 | err = ipc_update_perm(&shmid64->shm_perm, ipcp); |
Eric W. Biederman | 1efdb69 | 2012-02-07 16:54:11 -0800 | [diff] [blame] | 1018 | if (err) |
Davidlohr Bueso | 7b4cc5d | 2013-07-08 16:01:12 -0700 | [diff] [blame] | 1019 | goto out_unlock0; |
Deepa Dinamani | 7ff2819 | 2017-08-02 19:51:14 -0700 | [diff] [blame] | 1020 | shp->shm_ctim = ktime_get_real_seconds(); |
Pierre Peiffer | 8d4cc8b | 2008-04-29 01:00:47 -0700 | [diff] [blame] | 1021 | break; |
| 1022 | default: |
| 1023 | err = -EINVAL; |
Davidlohr Bueso | 79ccf0f | 2013-09-11 14:26:16 -0700 | [diff] [blame] | 1024 | goto out_unlock1; |
Pierre Peiffer | 8d4cc8b | 2008-04-29 01:00:47 -0700 | [diff] [blame] | 1025 | } |
Davidlohr Bueso | 7b4cc5d | 2013-07-08 16:01:12 -0700 | [diff] [blame] | 1026 | |
| 1027 | out_unlock0: |
| 1028 | ipc_unlock_object(&shp->shm_perm); |
| 1029 | out_unlock1: |
| 1030 | rcu_read_unlock(); |
Pierre Peiffer | 8d4cc8b | 2008-04-29 01:00:47 -0700 | [diff] [blame] | 1031 | out_up: |
Davidlohr Bueso | d9a605e | 2013-09-11 14:26:24 -0700 | [diff] [blame] | 1032 | up_write(&shm_ids(ns).rwsem); |
Pierre Peiffer | 8d4cc8b | 2008-04-29 01:00:47 -0700 | [diff] [blame] | 1033 | return err; |
| 1034 | } |
| 1035 | |
Al Viro | 9ba720c | 2017-07-08 20:58:06 -0400 | [diff] [blame] | 1036 | static int shmctl_ipc_info(struct ipc_namespace *ns, |
| 1037 | struct shminfo64 *shminfo) |
Pierre Peiffer | 8d4cc8b | 2008-04-29 01:00:47 -0700 | [diff] [blame] | 1038 | { |
Al Viro | 9ba720c | 2017-07-08 20:58:06 -0400 | [diff] [blame] | 1039 | int err = security_shm_shmctl(NULL, IPC_INFO); |
| 1040 | if (!err) { |
| 1041 | memset(shminfo, 0, sizeof(*shminfo)); |
| 1042 | shminfo->shmmni = shminfo->shmseg = ns->shm_ctlmni; |
| 1043 | shminfo->shmmax = ns->shm_ctlmax; |
| 1044 | shminfo->shmall = ns->shm_ctlall; |
| 1045 | shminfo->shmmin = SHMMIN; |
Davidlohr Bueso | d9a605e | 2013-09-11 14:26:24 -0700 | [diff] [blame] | 1046 | down_read(&shm_ids(ns).rwsem); |
Manfred Spraul | 27c331a | 2018-08-21 22:02:00 -0700 | [diff] [blame] | 1047 | err = ipc_get_maxidx(&shm_ids(ns)); |
Davidlohr Bueso | d9a605e | 2013-09-11 14:26:24 -0700 | [diff] [blame] | 1048 | up_read(&shm_ids(ns).rwsem); |
Manfred Spraul | 239521f | 2014-01-27 17:07:04 -0800 | [diff] [blame] | 1049 | if (err < 0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1050 | err = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1051 | } |
Al Viro | 9ba720c | 2017-07-08 20:58:06 -0400 | [diff] [blame] | 1052 | return err; |
| 1053 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1054 | |
Al Viro | 9ba720c | 2017-07-08 20:58:06 -0400 | [diff] [blame] | 1055 | static int shmctl_shm_info(struct ipc_namespace *ns, |
| 1056 | struct shm_info *shm_info) |
| 1057 | { |
| 1058 | int err = security_shm_shmctl(NULL, SHM_INFO); |
| 1059 | if (!err) { |
| 1060 | memset(shm_info, 0, sizeof(*shm_info)); |
Davidlohr Bueso | d9a605e | 2013-09-11 14:26:24 -0700 | [diff] [blame] | 1061 | down_read(&shm_ids(ns).rwsem); |
Al Viro | 9ba720c | 2017-07-08 20:58:06 -0400 | [diff] [blame] | 1062 | shm_info->used_ids = shm_ids(ns).in_use; |
| 1063 | shm_get_stat(ns, &shm_info->shm_rss, &shm_info->shm_swp); |
| 1064 | shm_info->shm_tot = ns->shm_tot; |
| 1065 | shm_info->swap_attempts = 0; |
| 1066 | shm_info->swap_successes = 0; |
Manfred Spraul | 27c331a | 2018-08-21 22:02:00 -0700 | [diff] [blame] | 1067 | err = ipc_get_maxidx(&shm_ids(ns)); |
Davidlohr Bueso | d9a605e | 2013-09-11 14:26:24 -0700 | [diff] [blame] | 1068 | up_read(&shm_ids(ns).rwsem); |
Al Viro | 9ba720c | 2017-07-08 20:58:06 -0400 | [diff] [blame] | 1069 | if (err < 0) |
| 1070 | err = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1071 | } |
Al Viro | 9ba720c | 2017-07-08 20:58:06 -0400 | [diff] [blame] | 1072 | return err; |
| 1073 | } |
Nadia Derbey | 023a535 | 2007-10-18 23:40:51 -0700 | [diff] [blame] | 1074 | |
Al Viro | 9ba720c | 2017-07-08 20:58:06 -0400 | [diff] [blame] | 1075 | static int shmctl_stat(struct ipc_namespace *ns, int shmid, |
| 1076 | int cmd, struct shmid64_ds *tbuf) |
| 1077 | { |
| 1078 | struct shmid_kernel *shp; |
Al Viro | 9ba720c | 2017-07-08 20:58:06 -0400 | [diff] [blame] | 1079 | int err; |
Davidlohr Bueso | c97cb9c | 2013-09-11 14:26:20 -0700 | [diff] [blame] | 1080 | |
Philippe Mikoyan | 87ad4b0 | 2018-02-06 15:40:49 -0800 | [diff] [blame] | 1081 | memset(tbuf, 0, sizeof(*tbuf)); |
| 1082 | |
Al Viro | 9ba720c | 2017-07-08 20:58:06 -0400 | [diff] [blame] | 1083 | rcu_read_lock(); |
Davidlohr Bueso | c21a697 | 2018-04-10 16:35:23 -0700 | [diff] [blame] | 1084 | if (cmd == SHM_STAT || cmd == SHM_STAT_ANY) { |
Al Viro | 9ba720c | 2017-07-08 20:58:06 -0400 | [diff] [blame] | 1085 | shp = shm_obtain_object(ns, shmid); |
| 1086 | if (IS_ERR(shp)) { |
| 1087 | err = PTR_ERR(shp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1088 | goto out_unlock; |
Al Viro | 9ba720c | 2017-07-08 20:58:06 -0400 | [diff] [blame] | 1089 | } |
Davidlohr Bueso | c21a697 | 2018-04-10 16:35:23 -0700 | [diff] [blame] | 1090 | } else { /* IPC_STAT */ |
Al Viro | 9ba720c | 2017-07-08 20:58:06 -0400 | [diff] [blame] | 1091 | shp = shm_obtain_object_check(ns, shmid); |
| 1092 | if (IS_ERR(shp)) { |
| 1093 | err = PTR_ERR(shp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1094 | goto out_unlock; |
Al Viro | 9ba720c | 2017-07-08 20:58:06 -0400 | [diff] [blame] | 1095 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1096 | } |
Al Viro | 9ba720c | 2017-07-08 20:58:06 -0400 | [diff] [blame] | 1097 | |
Davidlohr Bueso | c21a697 | 2018-04-10 16:35:23 -0700 | [diff] [blame] | 1098 | /* |
| 1099 | * Semantically SHM_STAT_ANY ought to be identical to |
| 1100 | * that functionality provided by the /proc/sysvipc/ |
| 1101 | * interface. As such, only audit these calls and |
| 1102 | * do not do traditional S_IRUGO permission checks on |
| 1103 | * the ipc object. |
| 1104 | */ |
| 1105 | if (cmd == SHM_STAT_ANY) |
| 1106 | audit_ipc_obj(&shp->shm_perm); |
| 1107 | else { |
| 1108 | err = -EACCES; |
| 1109 | if (ipcperms(ns, &shp->shm_perm, S_IRUGO)) |
| 1110 | goto out_unlock; |
| 1111 | } |
Al Viro | 9ba720c | 2017-07-08 20:58:06 -0400 | [diff] [blame] | 1112 | |
Eric W. Biederman | 7191adf | 2018-03-22 21:08:27 -0500 | [diff] [blame] | 1113 | err = security_shm_shmctl(&shp->shm_perm, cmd); |
Al Viro | 9ba720c | 2017-07-08 20:58:06 -0400 | [diff] [blame] | 1114 | if (err) |
| 1115 | goto out_unlock; |
| 1116 | |
Philippe Mikoyan | 87ad4b0 | 2018-02-06 15:40:49 -0800 | [diff] [blame] | 1117 | ipc_lock_object(&shp->shm_perm); |
| 1118 | |
| 1119 | if (!ipc_valid_object(&shp->shm_perm)) { |
| 1120 | ipc_unlock_object(&shp->shm_perm); |
| 1121 | err = -EIDRM; |
| 1122 | goto out_unlock; |
| 1123 | } |
| 1124 | |
Al Viro | 9ba720c | 2017-07-08 20:58:06 -0400 | [diff] [blame] | 1125 | kernel_to_ipc64_perm(&shp->shm_perm, &tbuf->shm_perm); |
| 1126 | tbuf->shm_segsz = shp->shm_segsz; |
| 1127 | tbuf->shm_atime = shp->shm_atim; |
| 1128 | tbuf->shm_dtime = shp->shm_dtim; |
| 1129 | tbuf->shm_ctime = shp->shm_ctim; |
Arnd Bergmann | c2ab975 | 2015-04-28 21:39:50 +0200 | [diff] [blame] | 1130 | #ifndef CONFIG_64BIT |
| 1131 | tbuf->shm_atime_high = shp->shm_atim >> 32; |
| 1132 | tbuf->shm_dtime_high = shp->shm_dtim >> 32; |
| 1133 | tbuf->shm_ctime_high = shp->shm_ctim >> 32; |
| 1134 | #endif |
Eric W. Biederman | 98f929b | 2018-03-23 00:29:57 -0500 | [diff] [blame] | 1135 | tbuf->shm_cpid = pid_vnr(shp->shm_cprid); |
| 1136 | tbuf->shm_lpid = pid_vnr(shp->shm_lprid); |
Al Viro | 9ba720c | 2017-07-08 20:58:06 -0400 | [diff] [blame] | 1137 | tbuf->shm_nattch = shp->shm_nattch; |
Philippe Mikoyan | 87ad4b0 | 2018-02-06 15:40:49 -0800 | [diff] [blame] | 1138 | |
Manfred Spraul | 615c999 | 2018-08-21 22:01:21 -0700 | [diff] [blame] | 1139 | if (cmd == IPC_STAT) { |
| 1140 | /* |
| 1141 | * As defined in SUS: |
| 1142 | * Return 0 on success |
| 1143 | */ |
| 1144 | err = 0; |
| 1145 | } else { |
| 1146 | /* |
| 1147 | * SHM_STAT and SHM_STAT_ANY (both Linux specific) |
| 1148 | * Return the full id, including the sequence number |
| 1149 | */ |
| 1150 | err = shp->shm_perm.id; |
| 1151 | } |
Davidlohr Bueso | 68eccc1 | 2013-09-11 14:26:18 -0700 | [diff] [blame] | 1152 | |
Manfred Spraul | 615c999 | 2018-08-21 22:01:21 -0700 | [diff] [blame] | 1153 | ipc_unlock_object(&shp->shm_perm); |
Davidlohr Bueso | 68eccc1 | 2013-09-11 14:26:18 -0700 | [diff] [blame] | 1154 | out_unlock: |
Davidlohr Bueso | c97cb9c | 2013-09-11 14:26:20 -0700 | [diff] [blame] | 1155 | rcu_read_unlock(); |
Al Viro | 9ba720c | 2017-07-08 20:58:06 -0400 | [diff] [blame] | 1156 | return err; |
| 1157 | } |
| 1158 | |
| 1159 | static int shmctl_do_lock(struct ipc_namespace *ns, int shmid, int cmd) |
| 1160 | { |
| 1161 | struct shmid_kernel *shp; |
| 1162 | struct file *shm_file; |
| 1163 | int err; |
| 1164 | |
| 1165 | rcu_read_lock(); |
| 1166 | shp = shm_obtain_object_check(ns, shmid); |
| 1167 | if (IS_ERR(shp)) { |
| 1168 | err = PTR_ERR(shp); |
| 1169 | goto out_unlock1; |
| 1170 | } |
| 1171 | |
| 1172 | audit_ipc_obj(&(shp->shm_perm)); |
Eric W. Biederman | 7191adf | 2018-03-22 21:08:27 -0500 | [diff] [blame] | 1173 | err = security_shm_shmctl(&shp->shm_perm, cmd); |
Al Viro | 9ba720c | 2017-07-08 20:58:06 -0400 | [diff] [blame] | 1174 | if (err) |
| 1175 | goto out_unlock1; |
| 1176 | |
| 1177 | ipc_lock_object(&shp->shm_perm); |
| 1178 | |
| 1179 | /* check if shm_destroy() is tearing down shp */ |
| 1180 | if (!ipc_valid_object(&shp->shm_perm)) { |
| 1181 | err = -EIDRM; |
| 1182 | goto out_unlock0; |
| 1183 | } |
| 1184 | |
| 1185 | if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) { |
| 1186 | kuid_t euid = current_euid(); |
| 1187 | |
| 1188 | if (!uid_eq(euid, shp->shm_perm.uid) && |
| 1189 | !uid_eq(euid, shp->shm_perm.cuid)) { |
| 1190 | err = -EPERM; |
| 1191 | goto out_unlock0; |
| 1192 | } |
| 1193 | if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK)) { |
| 1194 | err = -EPERM; |
| 1195 | goto out_unlock0; |
| 1196 | } |
| 1197 | } |
| 1198 | |
| 1199 | shm_file = shp->shm_file; |
| 1200 | if (is_file_hugepages(shm_file)) |
| 1201 | goto out_unlock0; |
| 1202 | |
| 1203 | if (cmd == SHM_LOCK) { |
| 1204 | struct user_struct *user = current_user(); |
| 1205 | |
| 1206 | err = shmem_lock(shm_file, 1, user); |
| 1207 | if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) { |
| 1208 | shp->shm_perm.mode |= SHM_LOCKED; |
| 1209 | shp->mlock_user = user; |
| 1210 | } |
| 1211 | goto out_unlock0; |
| 1212 | } |
| 1213 | |
| 1214 | /* SHM_UNLOCK */ |
| 1215 | if (!(shp->shm_perm.mode & SHM_LOCKED)) |
| 1216 | goto out_unlock0; |
| 1217 | shmem_lock(shm_file, 0, shp->mlock_user); |
| 1218 | shp->shm_perm.mode &= ~SHM_LOCKED; |
| 1219 | shp->mlock_user = NULL; |
| 1220 | get_file(shm_file); |
| 1221 | ipc_unlock_object(&shp->shm_perm); |
| 1222 | rcu_read_unlock(); |
| 1223 | shmem_unlock_mapping(shm_file->f_mapping); |
| 1224 | |
| 1225 | fput(shm_file); |
| 1226 | return err; |
| 1227 | |
| 1228 | out_unlock0: |
| 1229 | ipc_unlock_object(&shp->shm_perm); |
| 1230 | out_unlock1: |
| 1231 | rcu_read_unlock(); |
Davidlohr Bueso | 68eccc1 | 2013-09-11 14:26:18 -0700 | [diff] [blame] | 1232 | return err; |
| 1233 | } |
| 1234 | |
Arnd Bergmann | 275f221 | 2018-12-31 22:22:40 +0100 | [diff] [blame] | 1235 | static long ksys_shmctl(int shmid, int cmd, struct shmid_ds __user *buf, int version) |
Davidlohr Bueso | 68eccc1 | 2013-09-11 14:26:18 -0700 | [diff] [blame] | 1236 | { |
Arnd Bergmann | 275f221 | 2018-12-31 22:22:40 +0100 | [diff] [blame] | 1237 | int err; |
Davidlohr Bueso | 68eccc1 | 2013-09-11 14:26:18 -0700 | [diff] [blame] | 1238 | struct ipc_namespace *ns; |
Al Viro | 553f770 | 2017-07-08 22:52:47 -0400 | [diff] [blame] | 1239 | struct shmid64_ds sem64; |
Davidlohr Bueso | 68eccc1 | 2013-09-11 14:26:18 -0700 | [diff] [blame] | 1240 | |
Davidlohr Bueso | 2caacaa | 2013-09-11 14:26:21 -0700 | [diff] [blame] | 1241 | if (cmd < 0 || shmid < 0) |
| 1242 | return -EINVAL; |
Davidlohr Bueso | 68eccc1 | 2013-09-11 14:26:18 -0700 | [diff] [blame] | 1243 | |
Davidlohr Bueso | 68eccc1 | 2013-09-11 14:26:18 -0700 | [diff] [blame] | 1244 | ns = current->nsproxy->ipc_ns; |
| 1245 | |
| 1246 | switch (cmd) { |
Al Viro | 9ba720c | 2017-07-08 20:58:06 -0400 | [diff] [blame] | 1247 | case IPC_INFO: { |
| 1248 | struct shminfo64 shminfo; |
| 1249 | err = shmctl_ipc_info(ns, &shminfo); |
| 1250 | if (err < 0) |
| 1251 | return err; |
| 1252 | if (copy_shminfo_to_user(buf, &shminfo, version)) |
| 1253 | err = -EFAULT; |
Pierre Peiffer | 8d4cc8b | 2008-04-29 01:00:47 -0700 | [diff] [blame] | 1254 | return err; |
Davidlohr Bueso | 2caacaa | 2013-09-11 14:26:21 -0700 | [diff] [blame] | 1255 | } |
Al Viro | 9ba720c | 2017-07-08 20:58:06 -0400 | [diff] [blame] | 1256 | case SHM_INFO: { |
| 1257 | struct shm_info shm_info; |
| 1258 | err = shmctl_shm_info(ns, &shm_info); |
| 1259 | if (err < 0) |
| 1260 | return err; |
| 1261 | if (copy_to_user(buf, &shm_info, sizeof(shm_info))) |
| 1262 | err = -EFAULT; |
| 1263 | return err; |
| 1264 | } |
| 1265 | case SHM_STAT: |
Davidlohr Bueso | c21a697 | 2018-04-10 16:35:23 -0700 | [diff] [blame] | 1266 | case SHM_STAT_ANY: |
Al Viro | 9ba720c | 2017-07-08 20:58:06 -0400 | [diff] [blame] | 1267 | case IPC_STAT: { |
Al Viro | 553f770 | 2017-07-08 22:52:47 -0400 | [diff] [blame] | 1268 | err = shmctl_stat(ns, shmid, cmd, &sem64); |
Al Viro | 9ba720c | 2017-07-08 20:58:06 -0400 | [diff] [blame] | 1269 | if (err < 0) |
| 1270 | return err; |
Al Viro | 553f770 | 2017-07-08 22:52:47 -0400 | [diff] [blame] | 1271 | if (copy_shmid_to_user(buf, &sem64, version)) |
Al Viro | 9ba720c | 2017-07-08 20:58:06 -0400 | [diff] [blame] | 1272 | err = -EFAULT; |
| 1273 | return err; |
| 1274 | } |
| 1275 | case IPC_SET: |
Al Viro | 553f770 | 2017-07-08 22:52:47 -0400 | [diff] [blame] | 1276 | if (copy_shmid_from_user(&sem64, buf, version)) |
Al Viro | 9ba720c | 2017-07-08 20:58:06 -0400 | [diff] [blame] | 1277 | return -EFAULT; |
Gustavo A. R. Silva | df561f66 | 2020-08-23 17:36:59 -0500 | [diff] [blame] | 1278 | fallthrough; |
Al Viro | 9ba720c | 2017-07-08 20:58:06 -0400 | [diff] [blame] | 1279 | case IPC_RMID: |
Al Viro | 553f770 | 2017-07-08 22:52:47 -0400 | [diff] [blame] | 1280 | return shmctl_down(ns, shmid, cmd, &sem64); |
Al Viro | 9ba720c | 2017-07-08 20:58:06 -0400 | [diff] [blame] | 1281 | case SHM_LOCK: |
| 1282 | case SHM_UNLOCK: |
| 1283 | return shmctl_do_lock(ns, shmid, cmd); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1284 | default: |
Pierre Peiffer | 8d4cc8b | 2008-04-29 01:00:47 -0700 | [diff] [blame] | 1285 | return -EINVAL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1286 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1287 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1288 | |
Dominik Brodowski | c84d079 | 2018-03-20 20:12:33 +0100 | [diff] [blame] | 1289 | SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf) |
| 1290 | { |
Arnd Bergmann | 275f221 | 2018-12-31 22:22:40 +0100 | [diff] [blame] | 1291 | return ksys_shmctl(shmid, cmd, buf, IPC_64); |
Dominik Brodowski | c84d079 | 2018-03-20 20:12:33 +0100 | [diff] [blame] | 1292 | } |
| 1293 | |
Arnd Bergmann | 275f221 | 2018-12-31 22:22:40 +0100 | [diff] [blame] | 1294 | #ifdef CONFIG_ARCH_WANT_IPC_PARSE_VERSION |
| 1295 | long ksys_old_shmctl(int shmid, int cmd, struct shmid_ds __user *buf) |
| 1296 | { |
| 1297 | int version = ipc_parse_version(&cmd); |
| 1298 | |
| 1299 | return ksys_shmctl(shmid, cmd, buf, version); |
| 1300 | } |
| 1301 | |
| 1302 | SYSCALL_DEFINE3(old_shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf) |
| 1303 | { |
| 1304 | return ksys_old_shmctl(shmid, cmd, buf); |
| 1305 | } |
| 1306 | #endif |
| 1307 | |
Al Viro | 553f770 | 2017-07-08 22:52:47 -0400 | [diff] [blame] | 1308 | #ifdef CONFIG_COMPAT |
| 1309 | |
| 1310 | struct compat_shmid_ds { |
| 1311 | struct compat_ipc_perm shm_perm; |
| 1312 | int shm_segsz; |
Arnd Bergmann | 9afc5ee | 2018-07-13 12:52:28 +0200 | [diff] [blame] | 1313 | old_time32_t shm_atime; |
| 1314 | old_time32_t shm_dtime; |
| 1315 | old_time32_t shm_ctime; |
Al Viro | 553f770 | 2017-07-08 22:52:47 -0400 | [diff] [blame] | 1316 | compat_ipc_pid_t shm_cpid; |
| 1317 | compat_ipc_pid_t shm_lpid; |
| 1318 | unsigned short shm_nattch; |
| 1319 | unsigned short shm_unused; |
| 1320 | compat_uptr_t shm_unused2; |
| 1321 | compat_uptr_t shm_unused3; |
| 1322 | }; |
| 1323 | |
| 1324 | struct compat_shminfo64 { |
| 1325 | compat_ulong_t shmmax; |
| 1326 | compat_ulong_t shmmin; |
| 1327 | compat_ulong_t shmmni; |
| 1328 | compat_ulong_t shmseg; |
| 1329 | compat_ulong_t shmall; |
| 1330 | compat_ulong_t __unused1; |
| 1331 | compat_ulong_t __unused2; |
| 1332 | compat_ulong_t __unused3; |
| 1333 | compat_ulong_t __unused4; |
| 1334 | }; |
| 1335 | |
| 1336 | struct compat_shm_info { |
| 1337 | compat_int_t used_ids; |
| 1338 | compat_ulong_t shm_tot, shm_rss, shm_swp; |
| 1339 | compat_ulong_t swap_attempts, swap_successes; |
| 1340 | }; |
| 1341 | |
| 1342 | static int copy_compat_shminfo_to_user(void __user *buf, struct shminfo64 *in, |
| 1343 | int version) |
| 1344 | { |
| 1345 | if (in->shmmax > INT_MAX) |
| 1346 | in->shmmax = INT_MAX; |
| 1347 | if (version == IPC_64) { |
| 1348 | struct compat_shminfo64 info; |
| 1349 | memset(&info, 0, sizeof(info)); |
| 1350 | info.shmmax = in->shmmax; |
| 1351 | info.shmmin = in->shmmin; |
| 1352 | info.shmmni = in->shmmni; |
| 1353 | info.shmseg = in->shmseg; |
| 1354 | info.shmall = in->shmall; |
| 1355 | return copy_to_user(buf, &info, sizeof(info)); |
| 1356 | } else { |
| 1357 | struct shminfo info; |
| 1358 | memset(&info, 0, sizeof(info)); |
| 1359 | info.shmmax = in->shmmax; |
| 1360 | info.shmmin = in->shmmin; |
| 1361 | info.shmmni = in->shmmni; |
| 1362 | info.shmseg = in->shmseg; |
| 1363 | info.shmall = in->shmall; |
| 1364 | return copy_to_user(buf, &info, sizeof(info)); |
| 1365 | } |
| 1366 | } |
| 1367 | |
| 1368 | static int put_compat_shm_info(struct shm_info *ip, |
| 1369 | struct compat_shm_info __user *uip) |
| 1370 | { |
| 1371 | struct compat_shm_info info; |
| 1372 | |
| 1373 | memset(&info, 0, sizeof(info)); |
| 1374 | info.used_ids = ip->used_ids; |
| 1375 | info.shm_tot = ip->shm_tot; |
| 1376 | info.shm_rss = ip->shm_rss; |
| 1377 | info.shm_swp = ip->shm_swp; |
| 1378 | info.swap_attempts = ip->swap_attempts; |
| 1379 | info.swap_successes = ip->swap_successes; |
Al Viro | b776e4b | 2017-09-25 20:38:45 -0400 | [diff] [blame] | 1380 | return copy_to_user(uip, &info, sizeof(info)); |
Al Viro | 553f770 | 2017-07-08 22:52:47 -0400 | [diff] [blame] | 1381 | } |
| 1382 | |
| 1383 | static int copy_compat_shmid_to_user(void __user *buf, struct shmid64_ds *in, |
| 1384 | int version) |
| 1385 | { |
| 1386 | if (version == IPC_64) { |
| 1387 | struct compat_shmid64_ds v; |
| 1388 | memset(&v, 0, sizeof(v)); |
Al Viro | 28327fa | 2017-07-09 10:10:32 -0400 | [diff] [blame] | 1389 | to_compat_ipc64_perm(&v.shm_perm, &in->shm_perm); |
Arnd Bergmann | c2ab975 | 2015-04-28 21:39:50 +0200 | [diff] [blame] | 1390 | v.shm_atime = lower_32_bits(in->shm_atime); |
| 1391 | v.shm_atime_high = upper_32_bits(in->shm_atime); |
| 1392 | v.shm_dtime = lower_32_bits(in->shm_dtime); |
| 1393 | v.shm_dtime_high = upper_32_bits(in->shm_dtime); |
| 1394 | v.shm_ctime = lower_32_bits(in->shm_ctime); |
| 1395 | v.shm_ctime_high = upper_32_bits(in->shm_ctime); |
Al Viro | 553f770 | 2017-07-08 22:52:47 -0400 | [diff] [blame] | 1396 | v.shm_segsz = in->shm_segsz; |
| 1397 | v.shm_nattch = in->shm_nattch; |
| 1398 | v.shm_cpid = in->shm_cpid; |
| 1399 | v.shm_lpid = in->shm_lpid; |
| 1400 | return copy_to_user(buf, &v, sizeof(v)); |
| 1401 | } else { |
| 1402 | struct compat_shmid_ds v; |
| 1403 | memset(&v, 0, sizeof(v)); |
Al Viro | 28327fa | 2017-07-09 10:10:32 -0400 | [diff] [blame] | 1404 | to_compat_ipc_perm(&v.shm_perm, &in->shm_perm); |
Al Viro | 553f770 | 2017-07-08 22:52:47 -0400 | [diff] [blame] | 1405 | v.shm_perm.key = in->shm_perm.key; |
Al Viro | 553f770 | 2017-07-08 22:52:47 -0400 | [diff] [blame] | 1406 | v.shm_atime = in->shm_atime; |
| 1407 | v.shm_dtime = in->shm_dtime; |
| 1408 | v.shm_ctime = in->shm_ctime; |
| 1409 | v.shm_segsz = in->shm_segsz; |
| 1410 | v.shm_nattch = in->shm_nattch; |
| 1411 | v.shm_cpid = in->shm_cpid; |
| 1412 | v.shm_lpid = in->shm_lpid; |
| 1413 | return copy_to_user(buf, &v, sizeof(v)); |
| 1414 | } |
| 1415 | } |
| 1416 | |
| 1417 | static int copy_compat_shmid_from_user(struct shmid64_ds *out, void __user *buf, |
| 1418 | int version) |
| 1419 | { |
| 1420 | memset(out, 0, sizeof(*out)); |
| 1421 | if (version == IPC_64) { |
Linus Torvalds | 6aa211e | 2017-09-25 18:37:28 -0700 | [diff] [blame] | 1422 | struct compat_shmid64_ds __user *p = buf; |
Al Viro | 28327fa | 2017-07-09 10:10:32 -0400 | [diff] [blame] | 1423 | return get_compat_ipc64_perm(&out->shm_perm, &p->shm_perm); |
Al Viro | 553f770 | 2017-07-08 22:52:47 -0400 | [diff] [blame] | 1424 | } else { |
Linus Torvalds | 6aa211e | 2017-09-25 18:37:28 -0700 | [diff] [blame] | 1425 | struct compat_shmid_ds __user *p = buf; |
Al Viro | 28327fa | 2017-07-09 10:10:32 -0400 | [diff] [blame] | 1426 | return get_compat_ipc_perm(&out->shm_perm, &p->shm_perm); |
Al Viro | 553f770 | 2017-07-08 22:52:47 -0400 | [diff] [blame] | 1427 | } |
Al Viro | 553f770 | 2017-07-08 22:52:47 -0400 | [diff] [blame] | 1428 | } |
| 1429 | |
Jason Yan | 1cd377b | 2020-04-06 20:12:56 -0700 | [diff] [blame] | 1430 | static long compat_ksys_shmctl(int shmid, int cmd, void __user *uptr, int version) |
Al Viro | 553f770 | 2017-07-08 22:52:47 -0400 | [diff] [blame] | 1431 | { |
| 1432 | struct ipc_namespace *ns; |
| 1433 | struct shmid64_ds sem64; |
Al Viro | 553f770 | 2017-07-08 22:52:47 -0400 | [diff] [blame] | 1434 | int err; |
| 1435 | |
| 1436 | ns = current->nsproxy->ipc_ns; |
| 1437 | |
| 1438 | if (cmd < 0 || shmid < 0) |
| 1439 | return -EINVAL; |
| 1440 | |
| 1441 | switch (cmd) { |
| 1442 | case IPC_INFO: { |
| 1443 | struct shminfo64 shminfo; |
| 1444 | err = shmctl_ipc_info(ns, &shminfo); |
| 1445 | if (err < 0) |
| 1446 | return err; |
| 1447 | if (copy_compat_shminfo_to_user(uptr, &shminfo, version)) |
| 1448 | err = -EFAULT; |
| 1449 | return err; |
| 1450 | } |
| 1451 | case SHM_INFO: { |
| 1452 | struct shm_info shm_info; |
| 1453 | err = shmctl_shm_info(ns, &shm_info); |
| 1454 | if (err < 0) |
| 1455 | return err; |
| 1456 | if (put_compat_shm_info(&shm_info, uptr)) |
| 1457 | err = -EFAULT; |
| 1458 | return err; |
| 1459 | } |
| 1460 | case IPC_STAT: |
Davidlohr Bueso | c21a697 | 2018-04-10 16:35:23 -0700 | [diff] [blame] | 1461 | case SHM_STAT_ANY: |
Al Viro | 553f770 | 2017-07-08 22:52:47 -0400 | [diff] [blame] | 1462 | case SHM_STAT: |
| 1463 | err = shmctl_stat(ns, shmid, cmd, &sem64); |
| 1464 | if (err < 0) |
| 1465 | return err; |
Will Deacon | 58aff0a | 2017-09-18 17:47:38 +0100 | [diff] [blame] | 1466 | if (copy_compat_shmid_to_user(uptr, &sem64, version)) |
Al Viro | 553f770 | 2017-07-08 22:52:47 -0400 | [diff] [blame] | 1467 | err = -EFAULT; |
| 1468 | return err; |
| 1469 | |
| 1470 | case IPC_SET: |
| 1471 | if (copy_compat_shmid_from_user(&sem64, uptr, version)) |
| 1472 | return -EFAULT; |
Gustavo A. R. Silva | df561f66 | 2020-08-23 17:36:59 -0500 | [diff] [blame] | 1473 | fallthrough; |
Al Viro | 553f770 | 2017-07-08 22:52:47 -0400 | [diff] [blame] | 1474 | case IPC_RMID: |
| 1475 | return shmctl_down(ns, shmid, cmd, &sem64); |
| 1476 | case SHM_LOCK: |
| 1477 | case SHM_UNLOCK: |
| 1478 | return shmctl_do_lock(ns, shmid, cmd); |
Al Viro | 553f770 | 2017-07-08 22:52:47 -0400 | [diff] [blame] | 1479 | default: |
| 1480 | return -EINVAL; |
| 1481 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1482 | return err; |
| 1483 | } |
Dominik Brodowski | c84d079 | 2018-03-20 20:12:33 +0100 | [diff] [blame] | 1484 | |
| 1485 | COMPAT_SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, void __user *, uptr) |
| 1486 | { |
Arnd Bergmann | 275f221 | 2018-12-31 22:22:40 +0100 | [diff] [blame] | 1487 | return compat_ksys_shmctl(shmid, cmd, uptr, IPC_64); |
Dominik Brodowski | c84d079 | 2018-03-20 20:12:33 +0100 | [diff] [blame] | 1488 | } |
Arnd Bergmann | 275f221 | 2018-12-31 22:22:40 +0100 | [diff] [blame] | 1489 | |
| 1490 | #ifdef CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION |
| 1491 | long compat_ksys_old_shmctl(int shmid, int cmd, void __user *uptr) |
| 1492 | { |
| 1493 | int version = compat_ipc_parse_version(&cmd); |
| 1494 | |
| 1495 | return compat_ksys_shmctl(shmid, cmd, uptr, version); |
| 1496 | } |
| 1497 | |
| 1498 | COMPAT_SYSCALL_DEFINE3(old_shmctl, int, shmid, int, cmd, void __user *, uptr) |
| 1499 | { |
| 1500 | return compat_ksys_old_shmctl(shmid, cmd, uptr); |
| 1501 | } |
| 1502 | #endif |
Al Viro | 553f770 | 2017-07-08 22:52:47 -0400 | [diff] [blame] | 1503 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1504 | |
| 1505 | /* |
| 1506 | * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists. |
| 1507 | * |
| 1508 | * NOTE! Despite the name, this is NOT a direct system call entrypoint. The |
| 1509 | * "raddr" thing points to kernel space, and there has to be a wrapper around |
| 1510 | * this. |
| 1511 | */ |
Davidlohr Bueso | 95e91b8 | 2017-02-27 14:28:24 -0800 | [diff] [blame] | 1512 | long do_shmat(int shmid, char __user *shmaddr, int shmflg, |
| 1513 | ulong *raddr, unsigned long shmlba) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1514 | { |
| 1515 | struct shmid_kernel *shp; |
Davidlohr Bueso | f0cb880 | 2017-05-08 15:57:03 -0700 | [diff] [blame] | 1516 | unsigned long addr = (unsigned long)shmaddr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1517 | unsigned long size; |
Al Viro | 4f089ac | 2018-06-17 12:24:00 -0400 | [diff] [blame] | 1518 | struct file *file, *base; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1519 | int err; |
Davidlohr Bueso | f0cb880 | 2017-05-08 15:57:03 -0700 | [diff] [blame] | 1520 | unsigned long flags = MAP_SHARED; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1521 | unsigned long prot; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1522 | int acc_mode; |
Kirill Korotaev | 4e98231 | 2006-10-02 02:18:22 -0700 | [diff] [blame] | 1523 | struct ipc_namespace *ns; |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 1524 | struct shm_file_data *sfd; |
Al Viro | c9c554f | 2018-07-11 14:19:04 -0400 | [diff] [blame] | 1525 | int f_flags; |
Michel Lespinasse | 41badc1 | 2013-02-22 16:32:47 -0800 | [diff] [blame] | 1526 | unsigned long populate = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1527 | |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 1528 | err = -EINVAL; |
| 1529 | if (shmid < 0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1530 | goto out; |
Davidlohr Bueso | f0cb880 | 2017-05-08 15:57:03 -0700 | [diff] [blame] | 1531 | |
| 1532 | if (addr) { |
Will Deacon | 079a96a | 2012-07-30 14:42:38 -0700 | [diff] [blame] | 1533 | if (addr & (shmlba - 1)) { |
Davidlohr Bueso | 8f89c00 | 2018-05-25 14:47:30 -0700 | [diff] [blame] | 1534 | if (shmflg & SHM_RND) { |
Davidlohr Bueso | a73ab24 | 2018-05-25 14:47:27 -0700 | [diff] [blame] | 1535 | addr &= ~(shmlba - 1); /* round down */ |
Davidlohr Bueso | 8f89c00 | 2018-05-25 14:47:30 -0700 | [diff] [blame] | 1536 | |
| 1537 | /* |
| 1538 | * Ensure that the round-down is non-nil |
| 1539 | * when remapping. This can happen for |
| 1540 | * cases when addr < shmlba. |
| 1541 | */ |
| 1542 | if (!addr && (shmflg & SHM_REMAP)) |
| 1543 | goto out; |
| 1544 | } else |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1545 | #ifndef __ARCH_FORCE_SHMLBA |
| 1546 | if (addr & ~PAGE_MASK) |
| 1547 | #endif |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 1548 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1549 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1550 | |
Davidlohr Bueso | f0cb880 | 2017-05-08 15:57:03 -0700 | [diff] [blame] | 1551 | flags |= MAP_FIXED; |
| 1552 | } else if ((shmflg & SHM_REMAP)) |
| 1553 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1554 | |
| 1555 | if (shmflg & SHM_RDONLY) { |
| 1556 | prot = PROT_READ; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1557 | acc_mode = S_IRUGO; |
Al Viro | c9c554f | 2018-07-11 14:19:04 -0400 | [diff] [blame] | 1558 | f_flags = O_RDONLY; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1559 | } else { |
| 1560 | prot = PROT_READ | PROT_WRITE; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1561 | acc_mode = S_IRUGO | S_IWUGO; |
Al Viro | c9c554f | 2018-07-11 14:19:04 -0400 | [diff] [blame] | 1562 | f_flags = O_RDWR; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1563 | } |
| 1564 | if (shmflg & SHM_EXEC) { |
| 1565 | prot |= PROT_EXEC; |
| 1566 | acc_mode |= S_IXUGO; |
| 1567 | } |
| 1568 | |
| 1569 | /* |
| 1570 | * We cannot rely on the fs check since SYSV IPC does have an |
| 1571 | * additional creator id... |
| 1572 | */ |
Kirill Korotaev | 4e98231 | 2006-10-02 02:18:22 -0700 | [diff] [blame] | 1573 | ns = current->nsproxy->ipc_ns; |
Davidlohr Bueso | c2c737a | 2013-09-11 14:26:23 -0700 | [diff] [blame] | 1574 | rcu_read_lock(); |
| 1575 | shp = shm_obtain_object_check(ns, shmid); |
Nadia Derbey | 023a535 | 2007-10-18 23:40:51 -0700 | [diff] [blame] | 1576 | if (IS_ERR(shp)) { |
| 1577 | err = PTR_ERR(shp); |
Davidlohr Bueso | c2c737a | 2013-09-11 14:26:23 -0700 | [diff] [blame] | 1578 | goto out_unlock; |
Nadia Derbey | 023a535 | 2007-10-18 23:40:51 -0700 | [diff] [blame] | 1579 | } |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 1580 | |
| 1581 | err = -EACCES; |
Serge E. Hallyn | b0e7759 | 2011-03-23 16:43:24 -0700 | [diff] [blame] | 1582 | if (ipcperms(ns, &shp->shm_perm, acc_mode)) |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 1583 | goto out_unlock; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1584 | |
Eric W. Biederman | 7191adf | 2018-03-22 21:08:27 -0500 | [diff] [blame] | 1585 | err = security_shm_shmat(&shp->shm_perm, shmaddr, shmflg); |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 1586 | if (err) |
| 1587 | goto out_unlock; |
| 1588 | |
Davidlohr Bueso | c2c737a | 2013-09-11 14:26:23 -0700 | [diff] [blame] | 1589 | ipc_lock_object(&shp->shm_perm); |
Greg Thelen | a399b29 | 2013-11-21 14:32:00 -0800 | [diff] [blame] | 1590 | |
| 1591 | /* check if shm_destroy() is tearing down shp */ |
Rafael Aquini | 0f3d2b0 | 2014-01-27 17:07:01 -0800 | [diff] [blame] | 1592 | if (!ipc_valid_object(&shp->shm_perm)) { |
Greg Thelen | a399b29 | 2013-11-21 14:32:00 -0800 | [diff] [blame] | 1593 | ipc_unlock_object(&shp->shm_perm); |
| 1594 | err = -EIDRM; |
| 1595 | goto out_unlock; |
| 1596 | } |
| 1597 | |
Eric Biggers | 3f05317 | 2018-04-13 15:35:30 -0700 | [diff] [blame] | 1598 | /* |
| 1599 | * We need to take a reference to the real shm file to prevent the |
| 1600 | * pointer from becoming stale in cases where the lifetime of the outer |
| 1601 | * file extends beyond that of the shm segment. It's not usually |
| 1602 | * possible, but it can happen during remap_file_pages() emulation as |
| 1603 | * that unmaps the memory, then does ->mmap() via file reference only. |
| 1604 | * We'll deny the ->mmap() if the shm segment was since removed, but to |
| 1605 | * detect shm ID reuse we need to compare the file pointers. |
| 1606 | */ |
Al Viro | 4f089ac | 2018-06-17 12:24:00 -0400 | [diff] [blame] | 1607 | base = get_file(shp->shm_file); |
| 1608 | shp->shm_nattch++; |
| 1609 | size = i_size_read(file_inode(base)); |
| 1610 | ipc_unlock_object(&shp->shm_perm); |
| 1611 | rcu_read_unlock(); |
| 1612 | |
| 1613 | err = -ENOMEM; |
| 1614 | sfd = kzalloc(sizeof(*sfd), GFP_KERNEL); |
| 1615 | if (!sfd) { |
| 1616 | fput(base); |
| 1617 | goto out_nattch; |
| 1618 | } |
| 1619 | |
| 1620 | file = alloc_file_clone(base, f_flags, |
| 1621 | is_file_hugepages(base) ? |
| 1622 | &shm_file_operations_huge : |
| 1623 | &shm_file_operations); |
| 1624 | err = PTR_ERR(file); |
| 1625 | if (IS_ERR(file)) { |
| 1626 | kfree(sfd); |
| 1627 | fput(base); |
| 1628 | goto out_nattch; |
| 1629 | } |
| 1630 | |
| 1631 | sfd->id = shp->shm_perm.id; |
| 1632 | sfd->ns = get_ipc_ns(ns); |
| 1633 | sfd->file = base; |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 1634 | sfd->vm_ops = NULL; |
Al Viro | 4f089ac | 2018-06-17 12:24:00 -0400 | [diff] [blame] | 1635 | file->private_data = sfd; |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 1636 | |
Al Viro | 8b3ec68 | 2012-05-30 17:11:23 -0400 | [diff] [blame] | 1637 | err = security_mmap_file(file, prot, flags); |
| 1638 | if (err) |
| 1639 | goto out_fput; |
| 1640 | |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 1641 | if (mmap_write_lock_killable(current->mm)) { |
Michal Hocko | 91f4f94 | 2016-05-23 16:25:51 -0700 | [diff] [blame] | 1642 | err = -EINTR; |
| 1643 | goto out_fput; |
| 1644 | } |
| 1645 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1646 | if (addr && !(shmflg & SHM_REMAP)) { |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 1647 | err = -EINVAL; |
Manfred Spraul | 247a8ce | 2014-06-06 14:37:38 -0700 | [diff] [blame] | 1648 | if (addr + size < addr) |
| 1649 | goto invalid; |
| 1650 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1651 | if (find_vma_intersection(current->mm, addr, addr + size)) |
| 1652 | goto invalid; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1653 | } |
Davidlohr Bueso | f42569b | 2013-09-11 14:26:22 -0700 | [diff] [blame] | 1654 | |
Peter Collingbourne | 45e5530 | 2020-08-06 23:23:37 -0700 | [diff] [blame] | 1655 | addr = do_mmap(file, addr, size, prot, flags, 0, &populate, NULL); |
Michel Lespinasse | bebeb3d | 2013-02-22 16:32:37 -0800 | [diff] [blame] | 1656 | *raddr = addr; |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 1657 | err = 0; |
Michel Lespinasse | bebeb3d | 2013-02-22 16:32:37 -0800 | [diff] [blame] | 1658 | if (IS_ERR_VALUE(addr)) |
| 1659 | err = (long)addr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1660 | invalid: |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 1661 | mmap_write_unlock(current->mm); |
Michel Lespinasse | bebeb3d | 2013-02-22 16:32:37 -0800 | [diff] [blame] | 1662 | if (populate) |
Michel Lespinasse | 41badc1 | 2013-02-22 16:32:47 -0800 | [diff] [blame] | 1663 | mm_populate(addr, populate); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1664 | |
Al Viro | 8b3ec68 | 2012-05-30 17:11:23 -0400 | [diff] [blame] | 1665 | out_fput: |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 1666 | fput(file); |
| 1667 | |
| 1668 | out_nattch: |
Davidlohr Bueso | d9a605e | 2013-09-11 14:26:24 -0700 | [diff] [blame] | 1669 | down_write(&shm_ids(ns).rwsem); |
Nadia Derbey | 00c2bf8 | 2008-07-25 01:48:03 -0700 | [diff] [blame] | 1670 | shp = shm_lock(ns, shmid); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1671 | shp->shm_nattch--; |
Alexander Mikhalitsyn | a15261d | 2021-11-19 16:43:21 -0800 | [diff] [blame] | 1672 | |
| 1673 | if (shm_may_destroy(shp)) |
Kirill Korotaev | 4e98231 | 2006-10-02 02:18:22 -0700 | [diff] [blame] | 1674 | shm_destroy(ns, shp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1675 | else |
| 1676 | shm_unlock(shp); |
Davidlohr Bueso | d9a605e | 2013-09-11 14:26:24 -0700 | [diff] [blame] | 1677 | up_write(&shm_ids(ns).rwsem); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1678 | return err; |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 1679 | |
| 1680 | out_unlock: |
Davidlohr Bueso | c2c737a | 2013-09-11 14:26:23 -0700 | [diff] [blame] | 1681 | rcu_read_unlock(); |
Davidlohr Bueso | f42569b | 2013-09-11 14:26:22 -0700 | [diff] [blame] | 1682 | out: |
| 1683 | return err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1684 | } |
| 1685 | |
Heiko Carstens | d5460c9 | 2009-01-14 14:14:27 +0100 | [diff] [blame] | 1686 | SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg) |
Stephen Rothwell | 7d87e14c | 2005-05-01 08:59:12 -0700 | [diff] [blame] | 1687 | { |
| 1688 | unsigned long ret; |
| 1689 | long err; |
| 1690 | |
Will Deacon | 079a96a | 2012-07-30 14:42:38 -0700 | [diff] [blame] | 1691 | err = do_shmat(shmid, shmaddr, shmflg, &ret, SHMLBA); |
Stephen Rothwell | 7d87e14c | 2005-05-01 08:59:12 -0700 | [diff] [blame] | 1692 | if (err) |
| 1693 | return err; |
| 1694 | force_successful_syscall_return(); |
| 1695 | return (long)ret; |
| 1696 | } |
| 1697 | |
Al Viro | a78ee9e | 2017-07-09 10:38:28 -0400 | [diff] [blame] | 1698 | #ifdef CONFIG_COMPAT |
| 1699 | |
| 1700 | #ifndef COMPAT_SHMLBA |
| 1701 | #define COMPAT_SHMLBA SHMLBA |
| 1702 | #endif |
| 1703 | |
| 1704 | COMPAT_SYSCALL_DEFINE3(shmat, int, shmid, compat_uptr_t, shmaddr, int, shmflg) |
| 1705 | { |
| 1706 | unsigned long ret; |
| 1707 | long err; |
| 1708 | |
| 1709 | err = do_shmat(shmid, compat_ptr(shmaddr), shmflg, &ret, COMPAT_SHMLBA); |
| 1710 | if (err) |
| 1711 | return err; |
| 1712 | force_successful_syscall_return(); |
| 1713 | return (long)ret; |
| 1714 | } |
| 1715 | #endif |
| 1716 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1717 | /* |
| 1718 | * detach and kill segment if marked destroyed. |
| 1719 | * The work is done in shm_close. |
| 1720 | */ |
Dominik Brodowski | da1e2744 | 2018-03-20 20:09:48 +0100 | [diff] [blame] | 1721 | long ksys_shmdt(char __user *shmaddr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1722 | { |
| 1723 | struct mm_struct *mm = current->mm; |
Mike Frysinger | 586c7e6 | 2009-06-09 16:26:23 -0700 | [diff] [blame] | 1724 | struct vm_area_struct *vma; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1725 | unsigned long addr = (unsigned long)shmaddr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1726 | int retval = -EINVAL; |
Mike Frysinger | 586c7e6 | 2009-06-09 16:26:23 -0700 | [diff] [blame] | 1727 | #ifdef CONFIG_MMU |
| 1728 | loff_t size = 0; |
Dave Hansen | d3c9790 | 2014-12-12 16:58:19 -0800 | [diff] [blame] | 1729 | struct file *file; |
Mike Frysinger | 586c7e6 | 2009-06-09 16:26:23 -0700 | [diff] [blame] | 1730 | struct vm_area_struct *next; |
| 1731 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1732 | |
Hugh Dickins | df1e2fb | 2006-03-24 03:18:06 -0800 | [diff] [blame] | 1733 | if (addr & ~PAGE_MASK) |
| 1734 | return retval; |
| 1735 | |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 1736 | if (mmap_write_lock_killable(mm)) |
Michal Hocko | 91f4f94 | 2016-05-23 16:25:51 -0700 | [diff] [blame] | 1737 | return -EINTR; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1738 | |
| 1739 | /* |
| 1740 | * This function tries to be smart and unmap shm segments that |
| 1741 | * were modified by partial mlock or munmap calls: |
| 1742 | * - It first determines the size of the shm segment that should be |
| 1743 | * unmapped: It searches for a vma that is backed by shm and that |
| 1744 | * started at address shmaddr. It records it's size and then unmaps |
| 1745 | * it. |
| 1746 | * - Then it unmaps all shm vmas that started at shmaddr and that |
Dave Hansen | d3c9790 | 2014-12-12 16:58:19 -0800 | [diff] [blame] | 1747 | * are within the initially determined size and that are from the |
| 1748 | * same shm segment from which we determined the size. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1749 | * Errors from do_munmap are ignored: the function only fails if |
| 1750 | * it's called with invalid parameters or if it's called to unmap |
| 1751 | * a part of a vma. Both calls in this function are for full vmas, |
| 1752 | * the parameters are directly copied from the vma itself and always |
| 1753 | * valid - therefore do_munmap cannot fail. (famous last words?) |
| 1754 | */ |
| 1755 | /* |
| 1756 | * If it had been mremap()'d, the starting address would not |
| 1757 | * match the usual checks anyway. So assume all vma's are |
| 1758 | * above the starting address given. |
| 1759 | */ |
| 1760 | vma = find_vma(mm, addr); |
| 1761 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1762 | #ifdef CONFIG_MMU |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1763 | while (vma) { |
| 1764 | next = vma->vm_next; |
| 1765 | |
| 1766 | /* |
| 1767 | * Check if the starting address would match, i.e. it's |
| 1768 | * a fragment created by mprotect() and/or munmap(), or it |
| 1769 | * otherwise it starts at this address with no hassles. |
| 1770 | */ |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 1771 | if ((vma->vm_ops == &shm_vm_ops) && |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1772 | (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) { |
| 1773 | |
Dave Hansen | d3c9790 | 2014-12-12 16:58:19 -0800 | [diff] [blame] | 1774 | /* |
| 1775 | * Record the file of the shm segment being |
| 1776 | * unmapped. With mremap(), someone could place |
| 1777 | * page from another segment but with equal offsets |
| 1778 | * in the range we are unmapping. |
| 1779 | */ |
| 1780 | file = vma->vm_file; |
Dave Hansen | 07a46ed | 2014-12-12 16:58:22 -0800 | [diff] [blame] | 1781 | size = i_size_read(file_inode(vma->vm_file)); |
Mike Rapoport | 897ab3e | 2017-02-24 14:58:22 -0800 | [diff] [blame] | 1782 | do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1783 | /* |
| 1784 | * We discovered the size of the shm segment, so |
| 1785 | * break out of here and fall through to the next |
| 1786 | * loop that uses the size information to stop |
| 1787 | * searching for matching vma's. |
| 1788 | */ |
| 1789 | retval = 0; |
| 1790 | vma = next; |
| 1791 | break; |
| 1792 | } |
| 1793 | vma = next; |
| 1794 | } |
| 1795 | |
| 1796 | /* |
| 1797 | * We need look no further than the maximum address a fragment |
| 1798 | * could possibly have landed at. Also cast things to loff_t to |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 1799 | * prevent overflows and make comparisons vs. equal-width types. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1800 | */ |
KAMEZAWA Hiroyuki | 8e36709 | 2006-02-10 01:51:12 -0800 | [diff] [blame] | 1801 | size = PAGE_ALIGN(size); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1802 | while (vma && (loff_t)(vma->vm_end - addr) <= size) { |
| 1803 | next = vma->vm_next; |
| 1804 | |
| 1805 | /* finding a matching vma now does not alter retval */ |
Eric W. Biederman | bc56bba | 2007-02-20 13:57:53 -0800 | [diff] [blame] | 1806 | if ((vma->vm_ops == &shm_vm_ops) && |
Dave Hansen | d3c9790 | 2014-12-12 16:58:19 -0800 | [diff] [blame] | 1807 | ((vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) && |
| 1808 | (vma->vm_file == file)) |
Mike Rapoport | 897ab3e | 2017-02-24 14:58:22 -0800 | [diff] [blame] | 1809 | do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1810 | vma = next; |
| 1811 | } |
| 1812 | |
Shailesh Pandey | 63980c8 | 2016-12-14 15:06:10 -0800 | [diff] [blame] | 1813 | #else /* CONFIG_MMU */ |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1814 | /* under NOMMU conditions, the exact address to be destroyed must be |
Shailesh Pandey | 63980c8 | 2016-12-14 15:06:10 -0800 | [diff] [blame] | 1815 | * given |
| 1816 | */ |
Davidlohr Bueso | 530fcd16 | 2013-09-11 14:26:28 -0700 | [diff] [blame] | 1817 | if (vma && vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) { |
Mike Rapoport | 897ab3e | 2017-02-24 14:58:22 -0800 | [diff] [blame] | 1818 | do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL); |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1819 | retval = 0; |
| 1820 | } |
| 1821 | |
| 1822 | #endif |
| 1823 | |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 1824 | mmap_write_unlock(mm); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1825 | return retval; |
| 1826 | } |
| 1827 | |
Dominik Brodowski | da1e2744 | 2018-03-20 20:09:48 +0100 | [diff] [blame] | 1828 | SYSCALL_DEFINE1(shmdt, char __user *, shmaddr) |
| 1829 | { |
| 1830 | return ksys_shmdt(shmaddr); |
| 1831 | } |
| 1832 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1833 | #ifdef CONFIG_PROC_FS |
Mike Waychison | 19b4946 | 2005-09-06 15:17:10 -0700 | [diff] [blame] | 1834 | static int sysvipc_shm_proc_show(struct seq_file *s, void *it) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1835 | { |
Eric W. Biederman | 98f929b | 2018-03-23 00:29:57 -0500 | [diff] [blame] | 1836 | struct pid_namespace *pid_ns = ipc_seq_pid_ns(s); |
Eric W. Biederman | 1efdb69 | 2012-02-07 16:54:11 -0800 | [diff] [blame] | 1837 | struct user_namespace *user_ns = seq_user_ns(s); |
Kees Cook | ade9f91 | 2017-08-02 13:32:21 -0700 | [diff] [blame] | 1838 | struct kern_ipc_perm *ipcp = it; |
| 1839 | struct shmid_kernel *shp; |
Helge Deller | b795218 | 2010-10-27 15:34:16 -0700 | [diff] [blame] | 1840 | unsigned long rss = 0, swp = 0; |
| 1841 | |
Kees Cook | ade9f91 | 2017-08-02 13:32:21 -0700 | [diff] [blame] | 1842 | shp = container_of(ipcp, struct shmid_kernel, shm_perm); |
Helge Deller | b795218 | 2010-10-27 15:34:16 -0700 | [diff] [blame] | 1843 | shm_add_rss_swap(shp, &rss, &swp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1844 | |
Paul Menage | 6c82681 | 2008-06-12 15:21:49 -0700 | [diff] [blame] | 1845 | #if BITS_PER_LONG <= 32 |
| 1846 | #define SIZE_SPEC "%10lu" |
| 1847 | #else |
| 1848 | #define SIZE_SPEC "%21lu" |
| 1849 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1850 | |
Joe Perches | 7f032d6 | 2015-04-15 16:17:54 -0700 | [diff] [blame] | 1851 | seq_printf(s, |
| 1852 | "%10d %10d %4o " SIZE_SPEC " %5u %5u " |
Deepa Dinamani | 7ff2819 | 2017-08-02 19:51:14 -0700 | [diff] [blame] | 1853 | "%5lu %5u %5u %5u %5u %10llu %10llu %10llu " |
Joe Perches | 7f032d6 | 2015-04-15 16:17:54 -0700 | [diff] [blame] | 1854 | SIZE_SPEC " " SIZE_SPEC "\n", |
| 1855 | shp->shm_perm.key, |
| 1856 | shp->shm_perm.id, |
| 1857 | shp->shm_perm.mode, |
| 1858 | shp->shm_segsz, |
Eric W. Biederman | 98f929b | 2018-03-23 00:29:57 -0500 | [diff] [blame] | 1859 | pid_nr_ns(shp->shm_cprid, pid_ns), |
| 1860 | pid_nr_ns(shp->shm_lprid, pid_ns), |
Joe Perches | 7f032d6 | 2015-04-15 16:17:54 -0700 | [diff] [blame] | 1861 | shp->shm_nattch, |
| 1862 | from_kuid_munged(user_ns, shp->shm_perm.uid), |
| 1863 | from_kgid_munged(user_ns, shp->shm_perm.gid), |
| 1864 | from_kuid_munged(user_ns, shp->shm_perm.cuid), |
| 1865 | from_kgid_munged(user_ns, shp->shm_perm.cgid), |
| 1866 | shp->shm_atim, |
| 1867 | shp->shm_dtim, |
| 1868 | shp->shm_ctim, |
| 1869 | rss * PAGE_SIZE, |
| 1870 | swp * PAGE_SIZE); |
| 1871 | |
| 1872 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1873 | } |
| 1874 | #endif |