blob: 5a1a15f646bae0cb1f669ea303cd51dcbef5aff4 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Andreas Gruenbacher33d3dff2009-12-17 21:24:29 -05002#include <linux/fanotify.h>
Eric Parisff0b16a2009-12-17 21:24:25 -05003#include <linux/fdtable.h>
4#include <linux/fsnotify_backend.h>
5#include <linux/init.h>
Eric Paris9e66e422009-12-17 21:24:34 -05006#include <linux/jiffies.h>
Eric Parisff0b16a2009-12-17 21:24:25 -05007#include <linux/kernel.h> /* UINT_MAX */
Eric Paris1c529062009-12-17 21:24:28 -05008#include <linux/mount.h>
Eric Paris9e66e422009-12-17 21:24:34 -05009#include <linux/sched.h>
Ingo Molnar5b825c32017-02-02 17:54:15 +010010#include <linux/sched/user.h>
Eric W. Biederman7a360942017-09-26 12:45:33 -050011#include <linux/sched/signal.h>
Eric Parisff0b16a2009-12-17 21:24:25 -050012#include <linux/types.h>
Eric Paris9e66e422009-12-17 21:24:34 -050013#include <linux/wait.h>
Steve Grubbde8cd832017-10-02 20:21:39 -040014#include <linux/audit.h>
Shakeel Buttd46eb14b2018-08-17 15:46:39 -070015#include <linux/sched/mm.h>
Eric Parisff0b16a2009-12-17 21:24:25 -050016
Jan Kara7053aee2014-01-21 15:48:14 -080017#include "fanotify.h"
Eric Paris767cd462009-12-17 21:24:25 -050018
Jan Kara7053aee2014-01-21 15:48:14 -080019static bool should_merge(struct fsnotify_event *old_fsn,
20 struct fsnotify_event *new_fsn)
21{
22 struct fanotify_event_info *old, *new;
23
Jan Kara7053aee2014-01-21 15:48:14 -080024 pr_debug("%s: old=%p new=%p\n", __func__, old_fsn, new_fsn);
25 old = FANOTIFY_E(old_fsn);
26 new = FANOTIFY_E(new_fsn);
27
Amir Goldsteind0a6a872018-10-04 00:25:38 +030028 if (old_fsn->inode == new_fsn->inode && old->pid == new->pid &&
Jan Kara7053aee2014-01-21 15:48:14 -080029 old->path.mnt == new->path.mnt &&
30 old->path.dentry == new->path.dentry)
31 return true;
Eric Paris767cd462009-12-17 21:24:25 -050032 return false;
33}
34
Eric Parisf70ab542010-07-28 10:18:37 -040035/* and the list better be locked by something too! */
Jan Kara83c0e1b2014-01-28 18:53:22 +010036static int fanotify_merge(struct list_head *list, struct fsnotify_event *event)
Eric Paris767cd462009-12-17 21:24:25 -050037{
Jan Kara7053aee2014-01-21 15:48:14 -080038 struct fsnotify_event *test_event;
Eric Paris767cd462009-12-17 21:24:25 -050039
40 pr_debug("%s: list=%p event=%p\n", __func__, list, event);
41
Jan Kara13116df2014-01-28 18:29:24 +010042 /*
43 * Don't merge a permission event with any other event so that we know
44 * the event structure we have created in fanotify_handle_event() is the
45 * one we should check for permission response.
46 */
Miklos Szeredi6685df32017-10-30 21:14:56 +010047 if (fanotify_is_perm_event(event->mask))
Jan Kara83c0e1b2014-01-28 18:53:22 +010048 return 0;
Jan Kara13116df2014-01-28 18:29:24 +010049
Jan Kara7053aee2014-01-21 15:48:14 -080050 list_for_each_entry_reverse(test_event, list, list) {
51 if (should_merge(test_event, event)) {
Kinglong Mee6c711002017-02-09 20:45:22 +080052 test_event->mask |= event->mask;
53 return 1;
Eric Parisa12a7dd2009-12-17 21:24:25 -050054 }
55 }
Eric Parisf70ab542010-07-28 10:18:37 -040056
Kinglong Mee6c711002017-02-09 20:45:22 +080057 return 0;
Eric Paris767cd462009-12-17 21:24:25 -050058}
59
Jan Karaf0834412014-04-03 14:46:33 -070060static int fanotify_get_response(struct fsnotify_group *group,
Jan Kara05f0e382016-11-10 17:45:16 +010061 struct fanotify_perm_event_info *event,
62 struct fsnotify_iter_info *iter_info)
Eric Paris9e66e422009-12-17 21:24:34 -050063{
64 int ret;
65
66 pr_debug("%s: group=%p event=%p\n", __func__, group, event);
67
Jan Kara96d41012016-09-19 14:44:30 -070068 wait_event(group->fanotify_data.access_waitq, event->response);
Eric Paris9e66e422009-12-17 21:24:34 -050069
70 /* userspace responded, convert to something usable */
Steve Grubbde8cd832017-10-02 20:21:39 -040071 switch (event->response & ~FAN_AUDIT) {
Eric Paris9e66e422009-12-17 21:24:34 -050072 case FAN_ALLOW:
73 ret = 0;
74 break;
75 case FAN_DENY:
76 default:
77 ret = -EPERM;
78 }
Steve Grubbde8cd832017-10-02 20:21:39 -040079
80 /* Check if the response should be audited */
81 if (event->response & FAN_AUDIT)
82 audit_fanotify(event->response & ~FAN_AUDIT);
83
Eric Paris9e66e422009-12-17 21:24:34 -050084 event->response = 0;
Eric Paris9e66e422009-12-17 21:24:34 -050085
Eric Parisb2d87902009-12-17 21:24:34 -050086 pr_debug("%s: group=%p event=%p about to return ret=%d\n", __func__,
87 group, event, ret);
88
Eric Paris9e66e422009-12-17 21:24:34 -050089 return ret;
90}
Eric Paris9e66e422009-12-17 21:24:34 -050091
Matthew Bobrowski2d10b232018-11-08 14:05:49 +110092/*
93 * This function returns a mask for an event that only contains the flags
94 * that have been specifically requested by the user. Flags that may have
95 * been included within the event mask, but have not been explicitly
96 * requested by the user, will not be present in the returned mask.
97 */
98static u32 fanotify_group_event_mask(struct fsnotify_iter_info *iter_info,
Amir Goldstein5b0457a2018-04-20 16:10:50 -070099 u32 event_mask, const void *data,
100 int data_type)
Eric Paris1c529062009-12-17 21:24:28 -0500101{
Amir Goldstein54a307b2018-04-04 23:42:18 +0300102 __u32 marks_mask = 0, marks_ignored_mask = 0;
Al Viro3cd5eca2016-11-20 20:19:09 -0500103 const struct path *path = data;
Amir Goldstein837a3932018-04-20 16:10:54 -0700104 struct fsnotify_mark *mark;
105 int type;
Eric Paris1968f5e2010-07-28 10:18:39 -0400106
Amir Goldstein837a3932018-04-20 16:10:54 -0700107 pr_debug("%s: report_mask=%x mask=%x data=%p data_type=%d\n",
108 __func__, iter_info->report_mask, event_mask, data, data_type);
Eric Paris1968f5e2010-07-28 10:18:39 -0400109
Matthew Bobrowski2d10b232018-11-08 14:05:49 +1100110 /* If we don't have enough info to send an event to userspace say no */
Linus Torvalds20696012010-08-12 14:23:04 -0700111 if (data_type != FSNOTIFY_EVENT_PATH)
Matthew Bobrowski2d10b232018-11-08 14:05:49 +1100112 return 0;
Eric Paris1c529062009-12-17 21:24:28 -0500113
Matthew Bobrowski2d10b232018-11-08 14:05:49 +1100114 /* Sorry, fanotify only gives a damn about files and dirs */
David Howellse36cb0b2015-01-29 12:02:35 +0000115 if (!d_is_reg(path->dentry) &&
David Howells54f2a2f2015-01-29 12:02:36 +0000116 !d_can_lookup(path->dentry))
Matthew Bobrowski2d10b232018-11-08 14:05:49 +1100117 return 0;
Eric Parise1c048b2010-10-28 17:21:58 -0400118
Amir Goldstein837a3932018-04-20 16:10:54 -0700119 fsnotify_foreach_obj_type(type) {
120 if (!fsnotify_iter_should_report_type(iter_info, type))
121 continue;
122 mark = iter_info->marks[type];
123 /*
Amir Goldsteinb469e7e2018-10-30 20:29:53 +0200124 * If the event is for a child and this mark doesn't care about
125 * events on a child, don't send it!
Amir Goldstein837a3932018-04-20 16:10:54 -0700126 */
Amir Goldsteinb469e7e2018-10-30 20:29:53 +0200127 if (event_mask & FS_EVENT_ON_CHILD &&
128 (type != FSNOTIFY_OBJ_TYPE_INODE ||
129 !(mark->mask & FS_EVENT_ON_CHILD)))
Amir Goldstein837a3932018-04-20 16:10:54 -0700130 continue;
Amir Goldstein54a307b2018-04-04 23:42:18 +0300131
Amir Goldstein837a3932018-04-20 16:10:54 -0700132 marks_mask |= mark->mask;
133 marks_ignored_mask |= mark->ignored_mask;
Eric Paris1968f5e2010-07-28 10:18:39 -0400134 }
135
David Howellse36cb0b2015-01-29 12:02:35 +0000136 if (d_is_dir(path->dentry) &&
Lino Sanfilippo66ba93c2015-02-10 14:08:27 -0800137 !(marks_mask & FS_ISDIR & ~marks_ignored_mask))
Matthew Bobrowski2d10b232018-11-08 14:05:49 +1100138 return 0;
Eric Paris8fcd6522010-10-28 17:21:59 -0400139
Matthew Bobrowski2d10b232018-11-08 14:05:49 +1100140 return event_mask & FANOTIFY_OUTGOING_EVENTS & marks_mask &
141 ~marks_ignored_mask;
Eric Paris1c529062009-12-17 21:24:28 -0500142}
143
Jan Kara1f5eaa92018-02-21 14:10:59 +0100144struct fanotify_event_info *fanotify_alloc_event(struct fsnotify_group *group,
145 struct inode *inode, u32 mask,
Al Viro3cd5eca2016-11-20 20:19:09 -0500146 const struct path *path)
Jan Karaf0834412014-04-03 14:46:33 -0700147{
Shakeel Buttd46eb14b2018-08-17 15:46:39 -0700148 struct fanotify_event_info *event = NULL;
149 gfp_t gfp = GFP_KERNEL_ACCOUNT;
Jan Kara1f5eaa92018-02-21 14:10:59 +0100150
151 /*
152 * For queues with unlimited length lost events are not expected and
153 * can possibly have security implications. Avoid losing events when
154 * memory is short.
155 */
156 if (group->max_events == UINT_MAX)
157 gfp |= __GFP_NOFAIL;
Jan Karaf0834412014-04-03 14:46:33 -0700158
Shakeel Buttd46eb14b2018-08-17 15:46:39 -0700159 /* Whoever is interested in the event, pays for the allocation. */
160 memalloc_use_memcg(group->memcg);
161
Miklos Szeredi6685df32017-10-30 21:14:56 +0100162 if (fanotify_is_perm_event(mask)) {
Jan Karaf0834412014-04-03 14:46:33 -0700163 struct fanotify_perm_event_info *pevent;
164
Jan Kara1f5eaa92018-02-21 14:10:59 +0100165 pevent = kmem_cache_alloc(fanotify_perm_event_cachep, gfp);
Jan Karaf0834412014-04-03 14:46:33 -0700166 if (!pevent)
Shakeel Buttd46eb14b2018-08-17 15:46:39 -0700167 goto out;
Jan Karaf0834412014-04-03 14:46:33 -0700168 event = &pevent->fae;
169 pevent->response = 0;
170 goto init;
171 }
Jan Kara1f5eaa92018-02-21 14:10:59 +0100172 event = kmem_cache_alloc(fanotify_event_cachep, gfp);
Jan Karaf0834412014-04-03 14:46:33 -0700173 if (!event)
Shakeel Buttd46eb14b2018-08-17 15:46:39 -0700174 goto out;
Jan Karaf0834412014-04-03 14:46:33 -0700175init: __maybe_unused
176 fsnotify_init_event(&event->fse, inode, mask);
Amir Goldsteind0a6a872018-10-04 00:25:38 +0300177 if (FAN_GROUP_FLAG(group, FAN_REPORT_TID))
178 event->pid = get_pid(task_pid(current));
179 else
180 event->pid = get_pid(task_tgid(current));
Jan Karaf0834412014-04-03 14:46:33 -0700181 if (path) {
182 event->path = *path;
183 path_get(&event->path);
184 } else {
185 event->path.mnt = NULL;
186 event->path.dentry = NULL;
187 }
Shakeel Buttd46eb14b2018-08-17 15:46:39 -0700188out:
189 memalloc_unuse_memcg();
Jan Karaf0834412014-04-03 14:46:33 -0700190 return event;
191}
192
Jan Kara7053aee2014-01-21 15:48:14 -0800193static int fanotify_handle_event(struct fsnotify_group *group,
194 struct inode *inode,
Al Viro3cd5eca2016-11-20 20:19:09 -0500195 u32 mask, const void *data, int data_type,
Jan Kara9385a842016-11-10 17:51:50 +0100196 const unsigned char *file_name, u32 cookie,
197 struct fsnotify_iter_info *iter_info)
Jan Kara7053aee2014-01-21 15:48:14 -0800198{
199 int ret = 0;
200 struct fanotify_event_info *event;
201 struct fsnotify_event *fsn_event;
Jan Kara7053aee2014-01-21 15:48:14 -0800202
203 BUILD_BUG_ON(FAN_ACCESS != FS_ACCESS);
204 BUILD_BUG_ON(FAN_MODIFY != FS_MODIFY);
205 BUILD_BUG_ON(FAN_CLOSE_NOWRITE != FS_CLOSE_NOWRITE);
206 BUILD_BUG_ON(FAN_CLOSE_WRITE != FS_CLOSE_WRITE);
207 BUILD_BUG_ON(FAN_OPEN != FS_OPEN);
208 BUILD_BUG_ON(FAN_EVENT_ON_CHILD != FS_EVENT_ON_CHILD);
209 BUILD_BUG_ON(FAN_Q_OVERFLOW != FS_Q_OVERFLOW);
210 BUILD_BUG_ON(FAN_OPEN_PERM != FS_OPEN_PERM);
211 BUILD_BUG_ON(FAN_ACCESS_PERM != FS_ACCESS_PERM);
212 BUILD_BUG_ON(FAN_ONDIR != FS_ISDIR);
Matthew Bobrowski9b076f12018-11-08 14:07:14 +1100213 BUILD_BUG_ON(FAN_OPEN_EXEC != FS_OPEN_EXEC);
Jan Kara7053aee2014-01-21 15:48:14 -0800214
Matthew Bobrowski9b076f12018-11-08 14:07:14 +1100215 BUILD_BUG_ON(HWEIGHT32(ALL_FANOTIFY_EVENT_BITS) != 11);
Amir Goldsteinbdd5a462018-10-04 00:25:37 +0300216
Matthew Bobrowski2d10b232018-11-08 14:05:49 +1100217 mask = fanotify_group_event_mask(iter_info, mask, data, data_type);
218 if (!mask)
Jan Kara83c4c4b2014-01-21 15:48:15 -0800219 return 0;
220
Jan Kara7053aee2014-01-21 15:48:14 -0800221 pr_debug("%s: group=%p inode=%p mask=%x\n", __func__, group, inode,
222 mask);
223
Miklos Szeredi6685df32017-10-30 21:14:56 +0100224 if (fanotify_is_perm_event(mask)) {
Miklos Szeredif37650f2017-10-30 21:14:56 +0100225 /*
226 * fsnotify_prepare_user_wait() fails if we race with mark
227 * deletion. Just let the operation pass in that case.
228 */
229 if (!fsnotify_prepare_user_wait(iter_info))
230 return 0;
231 }
Miklos Szeredif37650f2017-10-30 21:14:56 +0100232
Jan Kara1f5eaa92018-02-21 14:10:59 +0100233 event = fanotify_alloc_event(group, inode, mask, data);
Miklos Szeredif37650f2017-10-30 21:14:56 +0100234 ret = -ENOMEM;
Jan Kara7b1f6412018-02-21 15:07:52 +0100235 if (unlikely(!event)) {
236 /*
237 * We don't queue overflow events for permission events as
238 * there the access is denied and so no event is in fact lost.
239 */
240 if (!fanotify_is_perm_event(mask))
241 fsnotify_queue_overflow(group);
Miklos Szeredif37650f2017-10-30 21:14:56 +0100242 goto finish;
Jan Kara7b1f6412018-02-21 15:07:52 +0100243 }
Jan Kara7053aee2014-01-21 15:48:14 -0800244
245 fsn_event = &event->fse;
Jan Kara8ba8fa912014-08-06 16:03:26 -0700246 ret = fsnotify_add_event(group, fsn_event, fanotify_merge);
Jan Kara83c0e1b2014-01-28 18:53:22 +0100247 if (ret) {
Jan Kara482ef062014-02-21 19:07:54 +0100248 /* Permission events shouldn't be merged */
Amir Goldstein23c9dee2018-10-04 00:25:35 +0300249 BUG_ON(ret == 1 && mask & FANOTIFY_PERM_EVENTS);
Jan Kara7053aee2014-01-21 15:48:14 -0800250 /* Our event wasn't used in the end. Free it. */
251 fsnotify_destroy_event(group, fsn_event);
Jan Kara482ef062014-02-21 19:07:54 +0100252
Miklos Szeredif37650f2017-10-30 21:14:56 +0100253 ret = 0;
Miklos Szeredi6685df32017-10-30 21:14:56 +0100254 } else if (fanotify_is_perm_event(mask)) {
Jan Kara05f0e382016-11-10 17:45:16 +0100255 ret = fanotify_get_response(group, FANOTIFY_PE(fsn_event),
256 iter_info);
Jan Kara85816792014-01-28 21:38:06 +0100257 fsnotify_destroy_event(group, fsn_event);
258 }
Miklos Szeredif37650f2017-10-30 21:14:56 +0100259finish:
Miklos Szeredi6685df32017-10-30 21:14:56 +0100260 if (fanotify_is_perm_event(mask))
Miklos Szeredif37650f2017-10-30 21:14:56 +0100261 fsnotify_finish_user_wait(iter_info);
Miklos Szeredi6685df32017-10-30 21:14:56 +0100262
Jan Kara7053aee2014-01-21 15:48:14 -0800263 return ret;
264}
265
Eric Paris4afeff82010-10-28 17:21:58 -0400266static void fanotify_free_group_priv(struct fsnotify_group *group)
267{
268 struct user_struct *user;
269
270 user = group->fanotify_data.user;
271 atomic_dec(&user->fanotify_listeners);
272 free_uid(user);
273}
274
Jan Kara7053aee2014-01-21 15:48:14 -0800275static void fanotify_free_event(struct fsnotify_event *fsn_event)
276{
277 struct fanotify_event_info *event;
278
279 event = FANOTIFY_E(fsn_event);
280 path_put(&event->path);
Amir Goldsteind0a6a872018-10-04 00:25:38 +0300281 put_pid(event->pid);
Miklos Szeredi6685df32017-10-30 21:14:56 +0100282 if (fanotify_is_perm_event(fsn_event->mask)) {
Jan Karaf0834412014-04-03 14:46:33 -0700283 kmem_cache_free(fanotify_perm_event_cachep,
284 FANOTIFY_PE(fsn_event));
285 return;
286 }
Jan Kara7053aee2014-01-21 15:48:14 -0800287 kmem_cache_free(fanotify_event_cachep, event);
288}
289
Jan Kara054c6362016-12-21 18:06:12 +0100290static void fanotify_free_mark(struct fsnotify_mark *fsn_mark)
291{
292 kmem_cache_free(fanotify_mark_cache, fsn_mark);
293}
294
Eric Parisff0b16a2009-12-17 21:24:25 -0500295const struct fsnotify_ops fanotify_fsnotify_ops = {
296 .handle_event = fanotify_handle_event,
Eric Paris4afeff82010-10-28 17:21:58 -0400297 .free_group_priv = fanotify_free_group_priv,
Jan Kara7053aee2014-01-21 15:48:14 -0800298 .free_event = fanotify_free_event,
Jan Kara054c6362016-12-21 18:06:12 +0100299 .free_mark = fanotify_free_mark,
Eric Parisff0b16a2009-12-17 21:24:25 -0500300};