Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
| 3 | * linux/ipc/util.h |
| 4 | * Copyright (C) 1999 Christoph Rohland |
| 5 | * |
Christian Kujau | 624dffc | 2006-01-15 02:43:54 +0100 | [diff] [blame] | 6 | * ipc helper functions (c) 1999 Manfred Spraul <manfred@colorfullife.com> |
Kirill Korotaev | 73ea413 | 2006-10-02 02:18:20 -0700 | [diff] [blame] | 7 | * namespaces support. 2006 OpenVZ, SWsoft Inc. |
| 8 | * Pavel Emelianov <xemul@openvz.org> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | */ |
| 10 | |
| 11 | #ifndef _IPC_UTIL_H |
| 12 | #define _IPC_UTIL_H |
| 13 | |
Johannes Weiner | 232086b | 2009-06-20 02:23:29 +0200 | [diff] [blame] | 14 | #include <linux/unistd.h> |
Nadia Derbey | 023a535 | 2007-10-18 23:40:51 -0700 | [diff] [blame] | 15 | #include <linux/err.h> |
Davidlohr Bueso | 15df03c8 | 2017-11-17 15:31:18 -0800 | [diff] [blame] | 16 | #include <linux/ipc_namespace.h> |
Nadia Derbey | 7ca7e56 | 2007-10-18 23:40:48 -0700 | [diff] [blame] | 17 | |
Waiman Long | 5ac893b | 2019-05-14 15:46:29 -0700 | [diff] [blame] | 18 | /* |
| 19 | * The IPC ID contains 2 separate numbers - index and sequence number. |
| 20 | * By default, |
| 21 | * bits 0-14: index (32k, 15 bits) |
| 22 | * bits 15-30: sequence number (64k, 16 bits) |
| 23 | * |
| 24 | * When IPCMNI extension mode is turned on, the composition changes: |
| 25 | * bits 0-23: index (16M, 24 bits) |
| 26 | * bits 24-30: sequence number (128, 7 bits) |
| 27 | */ |
| 28 | #define IPCMNI_SHIFT 15 |
| 29 | #define IPCMNI_EXTEND_SHIFT 24 |
Manfred Spraul | 99db46e | 2019-05-14 15:46:36 -0700 | [diff] [blame] | 30 | #define IPCMNI_EXTEND_MIN_CYCLE (RADIX_TREE_MAP_SIZE * RADIX_TREE_MAP_SIZE) |
Waiman Long | 5ac893b | 2019-05-14 15:46:29 -0700 | [diff] [blame] | 31 | #define IPCMNI (1 << IPCMNI_SHIFT) |
| 32 | #define IPCMNI_EXTEND (1 << IPCMNI_EXTEND_SHIFT) |
| 33 | |
| 34 | #ifdef CONFIG_SYSVIPC_SYSCTL |
| 35 | extern int ipc_mni; |
| 36 | extern int ipc_mni_shift; |
Manfred Spraul | 99db46e | 2019-05-14 15:46:36 -0700 | [diff] [blame] | 37 | extern int ipc_min_cycle; |
Waiman Long | 5ac893b | 2019-05-14 15:46:29 -0700 | [diff] [blame] | 38 | |
Manfred Spraul | 3278a2c | 2019-05-14 15:46:33 -0700 | [diff] [blame] | 39 | #define ipcmni_seq_shift() ipc_mni_shift |
Waiman Long | 5ac893b | 2019-05-14 15:46:29 -0700 | [diff] [blame] | 40 | #define IPCMNI_IDX_MASK ((1 << ipc_mni_shift) - 1) |
| 41 | |
| 42 | #else /* CONFIG_SYSVIPC_SYSCTL */ |
| 43 | |
| 44 | #define ipc_mni IPCMNI |
Manfred Spraul | 99db46e | 2019-05-14 15:46:36 -0700 | [diff] [blame] | 45 | #define ipc_min_cycle ((int)RADIX_TREE_MAP_SIZE) |
Manfred Spraul | 3278a2c | 2019-05-14 15:46:33 -0700 | [diff] [blame] | 46 | #define ipcmni_seq_shift() IPCMNI_SHIFT |
Waiman Long | 5ac893b | 2019-05-14 15:46:29 -0700 | [diff] [blame] | 47 | #define IPCMNI_IDX_MASK ((1 << IPCMNI_SHIFT) - 1) |
| 48 | #endif /* CONFIG_SYSVIPC_SYSCTL */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 49 | |
Davidlohr Bueso | eae04d2 | 2018-08-21 22:01:56 -0700 | [diff] [blame] | 50 | void sem_init(void); |
| 51 | void msg_init(void); |
Manfred Spraul | 239521f | 2014-01-27 17:07:04 -0800 | [diff] [blame] | 52 | void shm_init(void); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 53 | |
Pavel Emelyanov | ae5e1b2 | 2008-02-08 04:18:22 -0800 | [diff] [blame] | 54 | struct ipc_namespace; |
Eric W. Biederman | 03f1fc0 | 2018-03-23 00:22:05 -0500 | [diff] [blame] | 55 | struct pid_namespace; |
Pavel Emelyanov | ae5e1b2 | 2008-02-08 04:18:22 -0800 | [diff] [blame] | 56 | |
Serge E. Hallyn | 614b84c | 2009-04-06 19:01:08 -0700 | [diff] [blame] | 57 | #ifdef CONFIG_POSIX_MQUEUE |
Serge E. Hallyn | 7eafd7c | 2009-04-06 19:01:10 -0700 | [diff] [blame] | 58 | extern void mq_clear_sbinfo(struct ipc_namespace *ns); |
| 59 | extern void mq_put_mnt(struct ipc_namespace *ns); |
Serge E. Hallyn | 614b84c | 2009-04-06 19:01:08 -0700 | [diff] [blame] | 60 | #else |
Serge E. Hallyn | 7eafd7c | 2009-04-06 19:01:10 -0700 | [diff] [blame] | 61 | static inline void mq_clear_sbinfo(struct ipc_namespace *ns) { } |
| 62 | static inline void mq_put_mnt(struct ipc_namespace *ns) { } |
Serge E. Hallyn | 614b84c | 2009-04-06 19:01:08 -0700 | [diff] [blame] | 63 | #endif |
| 64 | |
| 65 | #ifdef CONFIG_SYSVIPC |
Davidlohr Bueso | eae04d2 | 2018-08-21 22:01:56 -0700 | [diff] [blame] | 66 | void sem_init_ns(struct ipc_namespace *ns); |
| 67 | void msg_init_ns(struct ipc_namespace *ns); |
| 68 | void shm_init_ns(struct ipc_namespace *ns); |
Kirill Korotaev | 73ea413 | 2006-10-02 02:18:20 -0700 | [diff] [blame] | 69 | |
| 70 | void sem_exit_ns(struct ipc_namespace *ns); |
| 71 | void msg_exit_ns(struct ipc_namespace *ns); |
| 72 | void shm_exit_ns(struct ipc_namespace *ns); |
Serge E. Hallyn | 614b84c | 2009-04-06 19:01:08 -0700 | [diff] [blame] | 73 | #else |
Davidlohr Bueso | eae04d2 | 2018-08-21 22:01:56 -0700 | [diff] [blame] | 74 | static inline void sem_init_ns(struct ipc_namespace *ns) { } |
| 75 | static inline void msg_init_ns(struct ipc_namespace *ns) { } |
| 76 | static inline void shm_init_ns(struct ipc_namespace *ns) { } |
Serge E. Hallyn | 614b84c | 2009-04-06 19:01:08 -0700 | [diff] [blame] | 77 | |
| 78 | static inline void sem_exit_ns(struct ipc_namespace *ns) { } |
| 79 | static inline void msg_exit_ns(struct ipc_namespace *ns) { } |
| 80 | static inline void shm_exit_ns(struct ipc_namespace *ns) { } |
| 81 | #endif |
Kirill Korotaev | 73ea413 | 2006-10-02 02:18:20 -0700 | [diff] [blame] | 82 | |
Nadia Derbey | 7748dbf | 2007-10-18 23:40:49 -0700 | [diff] [blame] | 83 | /* |
| 84 | * Structure that holds the parameters needed by the ipc operations |
| 85 | * (see after) |
| 86 | */ |
| 87 | struct ipc_params { |
| 88 | key_t key; |
| 89 | int flg; |
| 90 | union { |
| 91 | size_t size; /* for shared memories */ |
| 92 | int nsems; /* for semaphores */ |
| 93 | } u; /* holds the getnew() specific param */ |
| 94 | }; |
| 95 | |
| 96 | /* |
| 97 | * Structure that holds some ipc operations. This structure is used to unify |
| 98 | * the calls to sys_msgget(), sys_semget(), sys_shmget() |
| 99 | * . routine to call to create a new ipc object. Can be one of newque, |
| 100 | * newary, newseg |
Nadia Derbey | f4566f0 | 2007-10-18 23:40:53 -0700 | [diff] [blame] | 101 | * . routine to call to check permissions for a new ipc object. |
Nadia Derbey | 7748dbf | 2007-10-18 23:40:49 -0700 | [diff] [blame] | 102 | * Can be one of security_msg_associate, security_sem_associate, |
| 103 | * security_shm_associate |
| 104 | * . routine to call for an extra check if needed |
| 105 | */ |
| 106 | struct ipc_ops { |
Paul McQuade | 46c0a8c | 2014-06-06 14:37:37 -0700 | [diff] [blame] | 107 | int (*getnew)(struct ipc_namespace *, struct ipc_params *); |
| 108 | int (*associate)(struct kern_ipc_perm *, int); |
| 109 | int (*more_checks)(struct kern_ipc_perm *, struct ipc_params *); |
Nadia Derbey | 7748dbf | 2007-10-18 23:40:49 -0700 | [diff] [blame] | 110 | }; |
| 111 | |
Mike Waychison | ae78177 | 2005-09-06 15:17:09 -0700 | [diff] [blame] | 112 | struct seq_file; |
Pierre Peiffer | ed2ddbf | 2008-02-08 04:18:57 -0800 | [diff] [blame] | 113 | struct ipc_ids; |
Cedric Le Goater | 7d69a1f | 2007-07-15 23:40:58 -0700 | [diff] [blame] | 114 | |
Davidlohr Bueso | eae04d2 | 2018-08-21 22:01:56 -0700 | [diff] [blame] | 115 | void ipc_init_ids(struct ipc_ids *ids); |
Mike Waychison | ae78177 | 2005-09-06 15:17:09 -0700 | [diff] [blame] | 116 | #ifdef CONFIG_PROC_FS |
| 117 | void __init ipc_init_proc_interface(const char *path, const char *header, |
Kirill Korotaev | 73ea413 | 2006-10-02 02:18:20 -0700 | [diff] [blame] | 118 | int ids, int (*show)(struct seq_file *, void *)); |
Eric W. Biederman | 03f1fc0 | 2018-03-23 00:22:05 -0500 | [diff] [blame] | 119 | struct pid_namespace *ipc_seq_pid_ns(struct seq_file *); |
Mike Waychison | ae78177 | 2005-09-06 15:17:09 -0700 | [diff] [blame] | 120 | #else |
| 121 | #define ipc_init_proc_interface(path, header, ids, show) do {} while (0) |
| 122 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 123 | |
Kirill Korotaev | 73ea413 | 2006-10-02 02:18:20 -0700 | [diff] [blame] | 124 | #define IPC_SEM_IDS 0 |
| 125 | #define IPC_MSG_IDS 1 |
| 126 | #define IPC_SHM_IDS 2 |
| 127 | |
Waiman Long | 5ac893b | 2019-05-14 15:46:29 -0700 | [diff] [blame] | 128 | #define ipcid_to_idx(id) ((id) & IPCMNI_IDX_MASK) |
Manfred Spraul | 3278a2c | 2019-05-14 15:46:33 -0700 | [diff] [blame] | 129 | #define ipcid_to_seqx(id) ((id) >> ipcmni_seq_shift()) |
| 130 | #define ipcid_seq_max() (INT_MAX >> ipcmni_seq_shift()) |
Nadia Derbey | ce621f5 | 2007-10-18 23:40:52 -0700 | [diff] [blame] | 131 | |
Davidlohr Bueso | d9a605e | 2013-09-11 14:26:24 -0700 | [diff] [blame] | 132 | /* must be called with ids->rwsem acquired for writing */ |
Nadia Derbey | 7ca7e56 | 2007-10-18 23:40:48 -0700 | [diff] [blame] | 133 | int ipc_addid(struct ipc_ids *, struct kern_ipc_perm *, int); |
Nadia Derbey | 3e148c7 | 2007-10-18 23:40:54 -0700 | [diff] [blame] | 134 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 135 | /* must be called with both locks acquired. */ |
Nadia Derbey | 7ca7e56 | 2007-10-18 23:40:48 -0700 | [diff] [blame] | 136 | void ipc_rmid(struct ipc_ids *, struct kern_ipc_perm *); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 137 | |
Guillaume Knispel | 0cfb6ae | 2017-09-08 16:17:55 -0700 | [diff] [blame] | 138 | /* must be called with both locks acquired. */ |
| 139 | void ipc_set_key_private(struct ipc_ids *, struct kern_ipc_perm *); |
| 140 | |
Nadia Derbey | f4566f0 | 2007-10-18 23:40:53 -0700 | [diff] [blame] | 141 | /* must be called with ipcp locked */ |
Serge E. Hallyn | b0e7759 | 2011-03-23 16:43:24 -0700 | [diff] [blame] | 142 | int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flg); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 143 | |
Davidlohr Bueso | 15df03c8 | 2017-11-17 15:31:18 -0800 | [diff] [blame] | 144 | /** |
Manfred Spraul | 27c331a | 2018-08-21 22:02:00 -0700 | [diff] [blame] | 145 | * ipc_get_maxidx - get the highest assigned index |
Davidlohr Bueso | 15df03c8 | 2017-11-17 15:31:18 -0800 | [diff] [blame] | 146 | * @ids: ipc identifier set |
| 147 | * |
| 148 | * Called with ipc_ids.rwsem held for reading. |
| 149 | */ |
Manfred Spraul | 27c331a | 2018-08-21 22:02:00 -0700 | [diff] [blame] | 150 | static inline int ipc_get_maxidx(struct ipc_ids *ids) |
Davidlohr Bueso | 15df03c8 | 2017-11-17 15:31:18 -0800 | [diff] [blame] | 151 | { |
| 152 | if (ids->in_use == 0) |
| 153 | return -1; |
| 154 | |
Waiman Long | 5ac893b | 2019-05-14 15:46:29 -0700 | [diff] [blame] | 155 | if (ids->in_use == ipc_mni) |
| 156 | return ipc_mni - 1; |
Davidlohr Bueso | 15df03c8 | 2017-11-17 15:31:18 -0800 | [diff] [blame] | 157 | |
Manfred Spraul | 27c331a | 2018-08-21 22:02:00 -0700 | [diff] [blame] | 158 | return ids->max_idx; |
Davidlohr Bueso | 15df03c8 | 2017-11-17 15:31:18 -0800 | [diff] [blame] | 159 | } |
| 160 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 161 | /* |
| 162 | * For allocation that need to be freed by RCU. |
| 163 | * Objects are reference counted, they start with reference count 1. |
| 164 | * getref increases the refcount, the putref call that reduces the recount |
| 165 | * to 0 schedules the rcu destruction. Caller must guarantee locking. |
Manfred Spraul | 62b49c9 | 2017-07-12 14:35:34 -0700 | [diff] [blame] | 166 | * |
| 167 | * refcount is initialized by ipc_addid(), before that point call_rcu() |
| 168 | * must be used. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 169 | */ |
Manfred Spraul | 2a9d648 | 2018-08-21 22:02:04 -0700 | [diff] [blame] | 170 | bool ipc_rcu_getref(struct kern_ipc_perm *ptr); |
Manfred Spraul | dba4cdd | 2017-07-12 14:34:41 -0700 | [diff] [blame] | 171 | void ipc_rcu_putref(struct kern_ipc_perm *ptr, |
| 172 | void (*func)(struct rcu_head *head)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 173 | |
Davidlohr Bueso | 55b7ae5 | 2015-06-30 14:58:42 -0700 | [diff] [blame] | 174 | struct kern_ipc_perm *ipc_obtain_object_idr(struct ipc_ids *ids, int id); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 175 | |
| 176 | void kernel_to_ipc64_perm(struct kern_ipc_perm *in, struct ipc64_perm *out); |
| 177 | void ipc64_perm_to_ipc_perm(struct ipc64_perm *in, struct ipc_perm *out); |
Eric W. Biederman | 1efdb69 | 2012-02-07 16:54:11 -0800 | [diff] [blame] | 178 | int ipc_update_perm(struct ipc64_perm *in, struct kern_ipc_perm *out); |
Manfred Spraul | 4241c1a | 2018-08-21 22:01:34 -0700 | [diff] [blame] | 179 | struct kern_ipc_perm *ipcctl_obtain_check(struct ipc_namespace *ns, |
Davidlohr Bueso | 444d0f6 | 2013-04-30 19:15:24 -0700 | [diff] [blame] | 180 | struct ipc_ids *ids, int id, int cmd, |
| 181 | struct ipc64_perm *perm, int extra_perm); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 182 | |
Eric W. Biederman | 03f1fc0 | 2018-03-23 00:22:05 -0500 | [diff] [blame] | 183 | static inline void ipc_update_pid(struct pid **pos, struct pid *pid) |
| 184 | { |
| 185 | struct pid *old = *pos; |
| 186 | if (old != pid) { |
| 187 | *pos = get_pid(pid); |
| 188 | put_pid(old); |
| 189 | } |
| 190 | } |
| 191 | |
Arnd Bergmann | 275f221 | 2018-12-31 22:22:40 +0100 | [diff] [blame] | 192 | #ifdef CONFIG_ARCH_WANT_IPC_PARSE_VERSION |
Manfred Spraul | 239521f | 2014-01-27 17:07:04 -0800 | [diff] [blame] | 193 | int ipc_parse_version(int *cmd); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 194 | #endif |
| 195 | |
| 196 | extern void free_msg(struct msg_msg *msg); |
Mathias Krause | 4e9b45a | 2013-11-12 15:11:47 -0800 | [diff] [blame] | 197 | extern struct msg_msg *load_msg(const void __user *src, size_t len); |
Stanislav Kinsbursky | 4a674f3 | 2013-01-04 15:34:55 -0800 | [diff] [blame] | 198 | extern struct msg_msg *copy_msg(struct msg_msg *src, struct msg_msg *dst); |
Mathias Krause | 4e9b45a | 2013-11-12 15:11:47 -0800 | [diff] [blame] | 199 | extern int store_msg(void __user *dest, struct msg_msg *msg, size_t len); |
Nadia Derbey | 7748dbf | 2007-10-18 23:40:49 -0700 | [diff] [blame] | 200 | |
Manfred Spraul | 27c331a | 2018-08-21 22:02:00 -0700 | [diff] [blame] | 201 | static inline int ipc_checkid(struct kern_ipc_perm *ipcp, int id) |
Nadia Derbey | 023a535 | 2007-10-18 23:40:51 -0700 | [diff] [blame] | 202 | { |
Manfred Spraul | 27c331a | 2018-08-21 22:02:00 -0700 | [diff] [blame] | 203 | return ipcid_to_seqx(id) != ipcp->seq; |
Nadia Derbey | 023a535 | 2007-10-18 23:40:51 -0700 | [diff] [blame] | 204 | } |
| 205 | |
Davidlohr Bueso | 1ca7003 | 2013-07-08 16:01:10 -0700 | [diff] [blame] | 206 | static inline void ipc_lock_object(struct kern_ipc_perm *perm) |
| 207 | { |
| 208 | spin_lock(&perm->lock); |
| 209 | } |
| 210 | |
| 211 | static inline void ipc_unlock_object(struct kern_ipc_perm *perm) |
| 212 | { |
| 213 | spin_unlock(&perm->lock); |
| 214 | } |
| 215 | |
| 216 | static inline void ipc_assert_locked_object(struct kern_ipc_perm *perm) |
| 217 | { |
| 218 | assert_spin_locked(&perm->lock); |
| 219 | } |
| 220 | |
Nadia Derbey | 023a535 | 2007-10-18 23:40:51 -0700 | [diff] [blame] | 221 | static inline void ipc_unlock(struct kern_ipc_perm *perm) |
| 222 | { |
Davidlohr Bueso | cf9d5d7 | 2013-07-08 16:01:11 -0700 | [diff] [blame] | 223 | ipc_unlock_object(perm); |
Nadia Derbey | 023a535 | 2007-10-18 23:40:51 -0700 | [diff] [blame] | 224 | rcu_read_unlock(); |
| 225 | } |
| 226 | |
Rafael Aquini | 0f3d2b0 | 2014-01-27 17:07:01 -0800 | [diff] [blame] | 227 | /* |
| 228 | * ipc_valid_object() - helper to sort out IPC_RMID races for codepaths |
| 229 | * where the respective ipc_ids.rwsem is not being held down. |
| 230 | * Checks whether the ipc object is still around or if it's gone already, as |
| 231 | * ipc_rmid() may have already freed the ID while the ipc lock was spinning. |
| 232 | * Needs to be called with kern_ipc_perm.lock held -- exception made for one |
| 233 | * checkpoint case at sys_semtimedop() as noted in code commentary. |
| 234 | */ |
| 235 | static inline bool ipc_valid_object(struct kern_ipc_perm *perm) |
| 236 | { |
Rafael Aquini | 72a8ff2 | 2014-01-27 17:07:02 -0800 | [diff] [blame] | 237 | return !perm->deleted; |
Rafael Aquini | 0f3d2b0 | 2014-01-27 17:07:01 -0800 | [diff] [blame] | 238 | } |
| 239 | |
Davidlohr Bueso | 4d2bff5 | 2013-04-30 19:15:19 -0700 | [diff] [blame] | 240 | struct kern_ipc_perm *ipc_obtain_object_check(struct ipc_ids *ids, int id); |
Pavel Emelyanov | b2d75cd | 2008-02-08 04:18:54 -0800 | [diff] [blame] | 241 | int ipcget(struct ipc_namespace *ns, struct ipc_ids *ids, |
Mathias Krause | eb66ec4 | 2014-06-06 14:37:36 -0700 | [diff] [blame] | 242 | const struct ipc_ops *ops, struct ipc_params *params); |
Alexey Dobriyan | 665c774 | 2009-06-17 16:27:57 -0700 | [diff] [blame] | 243 | void free_ipcs(struct ipc_namespace *ns, struct ipc_ids *ids, |
| 244 | void (*free)(struct ipc_namespace *, struct kern_ipc_perm *)); |
Al Viro | 553f770 | 2017-07-08 22:52:47 -0400 | [diff] [blame] | 245 | |
Waiman Long | 8c81ddd | 2018-10-30 15:07:24 -0700 | [diff] [blame] | 246 | static inline int sem_check_semmni(struct ipc_namespace *ns) { |
| 247 | /* |
Waiman Long | 5ac893b | 2019-05-14 15:46:29 -0700 | [diff] [blame] | 248 | * Check semmni range [0, ipc_mni] |
Waiman Long | 8c81ddd | 2018-10-30 15:07:24 -0700 | [diff] [blame] | 249 | * semmni is the last element of sem_ctls[4] array |
| 250 | */ |
Waiman Long | 5ac893b | 2019-05-14 15:46:29 -0700 | [diff] [blame] | 251 | return ((ns->sem_ctls[3] < 0) || (ns->sem_ctls[3] > ipc_mni)) |
Waiman Long | 8c81ddd | 2018-10-30 15:07:24 -0700 | [diff] [blame] | 252 | ? -ERANGE : 0; |
| 253 | } |
| 254 | |
Al Viro | 553f770 | 2017-07-08 22:52:47 -0400 | [diff] [blame] | 255 | #ifdef CONFIG_COMPAT |
| 256 | #include <linux/compat.h> |
| 257 | struct compat_ipc_perm { |
| 258 | key_t key; |
| 259 | __compat_uid_t uid; |
| 260 | __compat_gid_t gid; |
| 261 | __compat_uid_t cuid; |
| 262 | __compat_gid_t cgid; |
| 263 | compat_mode_t mode; |
| 264 | unsigned short seq; |
| 265 | }; |
| 266 | |
Al Viro | c0ebccb | 2017-07-09 10:03:23 -0400 | [diff] [blame] | 267 | void to_compat_ipc_perm(struct compat_ipc_perm *, struct ipc64_perm *); |
| 268 | void to_compat_ipc64_perm(struct compat_ipc64_perm *, struct ipc64_perm *); |
| 269 | int get_compat_ipc_perm(struct ipc64_perm *, struct compat_ipc_perm __user *); |
| 270 | int get_compat_ipc64_perm(struct ipc64_perm *, |
| 271 | struct compat_ipc64_perm __user *); |
| 272 | |
Al Viro | 553f770 | 2017-07-08 22:52:47 -0400 | [diff] [blame] | 273 | static inline int compat_ipc_parse_version(int *cmd) |
| 274 | { |
Al Viro | 553f770 | 2017-07-08 22:52:47 -0400 | [diff] [blame] | 275 | int version = *cmd & IPC_64; |
| 276 | *cmd &= ~IPC_64; |
| 277 | return version; |
Al Viro | 553f770 | 2017-07-08 22:52:47 -0400 | [diff] [blame] | 278 | } |
| 279 | #endif |
Dominik Brodowski | 41f4f0e | 2018-03-20 19:48:14 +0100 | [diff] [blame] | 280 | |
| 281 | /* for __ARCH_WANT_SYS_IPC */ |
| 282 | long ksys_semtimedop(int semid, struct sembuf __user *tsops, |
| 283 | unsigned int nsops, |
Arnd Bergmann | 21fc538 | 2018-04-13 13:58:00 +0200 | [diff] [blame] | 284 | const struct __kernel_timespec __user *timeout); |
Dominik Brodowski | 6989471 | 2018-03-20 19:53:58 +0100 | [diff] [blame] | 285 | long ksys_semget(key_t key, int nsems, int semflg); |
Arnd Bergmann | 275f221 | 2018-12-31 22:22:40 +0100 | [diff] [blame] | 286 | long ksys_old_semctl(int semid, int semnum, int cmd, unsigned long arg); |
Dominik Brodowski | 3d65661 | 2018-03-20 20:06:04 +0100 | [diff] [blame] | 287 | long ksys_msgget(key_t key, int msgflg); |
Arnd Bergmann | 275f221 | 2018-12-31 22:22:40 +0100 | [diff] [blame] | 288 | long ksys_old_msgctl(int msqid, int cmd, struct msqid_ds __user *buf); |
Dominik Brodowski | 078faac | 2018-03-20 21:25:57 +0100 | [diff] [blame] | 289 | long ksys_msgrcv(int msqid, struct msgbuf __user *msgp, size_t msgsz, |
| 290 | long msgtyp, int msgflg); |
Dominik Brodowski | 31c213f | 2018-03-20 21:29:00 +0100 | [diff] [blame] | 291 | long ksys_msgsnd(int msqid, struct msgbuf __user *msgp, size_t msgsz, |
| 292 | int msgflg); |
Dominik Brodowski | 65749e0 | 2018-03-20 20:07:53 +0100 | [diff] [blame] | 293 | long ksys_shmget(key_t key, size_t size, int shmflg); |
Dominik Brodowski | da1e2744 | 2018-03-20 20:09:48 +0100 | [diff] [blame] | 294 | long ksys_shmdt(char __user *shmaddr); |
Arnd Bergmann | 275f221 | 2018-12-31 22:22:40 +0100 | [diff] [blame] | 295 | long ksys_old_shmctl(int shmid, int cmd, struct shmid_ds __user *buf); |
Dominik Brodowski | 41f4f0e | 2018-03-20 19:48:14 +0100 | [diff] [blame] | 296 | |
| 297 | /* for CONFIG_ARCH_WANT_OLD_COMPAT_IPC */ |
Dominik Brodowski | 41f4f0e | 2018-03-20 19:48:14 +0100 | [diff] [blame] | 298 | long compat_ksys_semtimedop(int semid, struct sembuf __user *tsems, |
| 299 | unsigned int nsops, |
Arnd Bergmann | 9afc5ee | 2018-07-13 12:52:28 +0200 | [diff] [blame] | 300 | const struct old_timespec32 __user *timeout); |
Arnd Bergmann | b0d1757 | 2018-04-13 13:58:23 +0200 | [diff] [blame] | 301 | #ifdef CONFIG_COMPAT |
Arnd Bergmann | 275f221 | 2018-12-31 22:22:40 +0100 | [diff] [blame] | 302 | long compat_ksys_old_semctl(int semid, int semnum, int cmd, int arg); |
| 303 | long compat_ksys_old_msgctl(int msqid, int cmd, void __user *uptr); |
Dominik Brodowski | 078faac | 2018-03-20 21:25:57 +0100 | [diff] [blame] | 304 | long compat_ksys_msgrcv(int msqid, compat_uptr_t msgp, compat_ssize_t msgsz, |
| 305 | compat_long_t msgtyp, int msgflg); |
Dominik Brodowski | 31c213f | 2018-03-20 21:29:00 +0100 | [diff] [blame] | 306 | long compat_ksys_msgsnd(int msqid, compat_uptr_t msgp, |
| 307 | compat_ssize_t msgsz, int msgflg); |
Arnd Bergmann | 275f221 | 2018-12-31 22:22:40 +0100 | [diff] [blame] | 308 | long compat_ksys_old_shmctl(int shmid, int cmd, void __user *uptr); |
Dominik Brodowski | 41f4f0e | 2018-03-20 19:48:14 +0100 | [diff] [blame] | 309 | #endif /* CONFIG_COMPAT */ |
| 310 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 311 | #endif |