blob: 5558316036370242320fd22cb2840852b31ddf90 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Andreas Gruenbacher33d3dff2009-12-17 21:24:29 -05002#include <linux/fanotify.h>
Eric Parisff0b16a2009-12-17 21:24:25 -05003#include <linux/fdtable.h>
4#include <linux/fsnotify_backend.h>
5#include <linux/init.h>
Eric Paris9e66e422009-12-17 21:24:34 -05006#include <linux/jiffies.h>
Eric Parisff0b16a2009-12-17 21:24:25 -05007#include <linux/kernel.h> /* UINT_MAX */
Eric Paris1c529062009-12-17 21:24:28 -05008#include <linux/mount.h>
Eric Paris9e66e422009-12-17 21:24:34 -05009#include <linux/sched.h>
Ingo Molnar5b825c32017-02-02 17:54:15 +010010#include <linux/sched/user.h>
Eric W. Biederman7a360942017-09-26 12:45:33 -050011#include <linux/sched/signal.h>
Eric Parisff0b16a2009-12-17 21:24:25 -050012#include <linux/types.h>
Eric Paris9e66e422009-12-17 21:24:34 -050013#include <linux/wait.h>
Steve Grubbde8cd832017-10-02 20:21:39 -040014#include <linux/audit.h>
Shakeel Buttd46eb14b2018-08-17 15:46:39 -070015#include <linux/sched/mm.h>
Amir Goldsteine9e0c892019-01-10 19:04:34 +020016#include <linux/statfs.h>
Eric Parisff0b16a2009-12-17 21:24:25 -050017
Jan Kara7053aee2014-01-21 15:48:14 -080018#include "fanotify.h"
Eric Paris767cd462009-12-17 21:24:25 -050019
Jan Kara7053aee2014-01-21 15:48:14 -080020static bool should_merge(struct fsnotify_event *old_fsn,
21 struct fsnotify_event *new_fsn)
22{
Amir Goldstein33913992019-01-10 19:04:32 +020023 struct fanotify_event *old, *new;
Jan Kara7053aee2014-01-21 15:48:14 -080024
Jan Kara7053aee2014-01-21 15:48:14 -080025 pr_debug("%s: old=%p new=%p\n", __func__, old_fsn, new_fsn);
26 old = FANOTIFY_E(old_fsn);
27 new = FANOTIFY_E(new_fsn);
28
Amir Goldsteine9e0c892019-01-10 19:04:34 +020029 if (old_fsn->inode != new_fsn->inode || old->pid != new->pid ||
30 old->fh_type != new->fh_type || old->fh_len != new->fh_len)
31 return false;
32
33 if (fanotify_event_has_path(old)) {
34 return old->path.mnt == new->path.mnt &&
35 old->path.dentry == new->path.dentry;
36 } else if (fanotify_event_has_fid(old)) {
37 return fanotify_fid_equal(&old->fid, &new->fid, old->fh_len);
38 }
39
40 /* Do not merge events if we failed to encode fid */
Eric Paris767cd462009-12-17 21:24:25 -050041 return false;
42}
43
Eric Parisf70ab542010-07-28 10:18:37 -040044/* and the list better be locked by something too! */
Jan Kara83c0e1b2014-01-28 18:53:22 +010045static int fanotify_merge(struct list_head *list, struct fsnotify_event *event)
Eric Paris767cd462009-12-17 21:24:25 -050046{
Jan Kara7053aee2014-01-21 15:48:14 -080047 struct fsnotify_event *test_event;
Amir Goldstein33913992019-01-10 19:04:32 +020048 struct fanotify_event *new;
Eric Paris767cd462009-12-17 21:24:25 -050049
50 pr_debug("%s: list=%p event=%p\n", __func__, list, event);
Amir Goldsteina0a92d22019-01-10 19:04:31 +020051 new = FANOTIFY_E(event);
Eric Paris767cd462009-12-17 21:24:25 -050052
Jan Kara13116df2014-01-28 18:29:24 +010053 /*
54 * Don't merge a permission event with any other event so that we know
55 * the event structure we have created in fanotify_handle_event() is the
56 * one we should check for permission response.
57 */
Amir Goldsteina0a92d22019-01-10 19:04:31 +020058 if (fanotify_is_perm_event(new->mask))
Jan Kara83c0e1b2014-01-28 18:53:22 +010059 return 0;
Jan Kara13116df2014-01-28 18:29:24 +010060
Jan Kara7053aee2014-01-21 15:48:14 -080061 list_for_each_entry_reverse(test_event, list, list) {
62 if (should_merge(test_event, event)) {
Amir Goldsteina0a92d22019-01-10 19:04:31 +020063 FANOTIFY_E(test_event)->mask |= new->mask;
Kinglong Mee6c711002017-02-09 20:45:22 +080064 return 1;
Eric Parisa12a7dd2009-12-17 21:24:25 -050065 }
66 }
Eric Parisf70ab542010-07-28 10:18:37 -040067
Kinglong Mee6c711002017-02-09 20:45:22 +080068 return 0;
Eric Paris767cd462009-12-17 21:24:25 -050069}
70
Jan Karaf0834412014-04-03 14:46:33 -070071static int fanotify_get_response(struct fsnotify_group *group,
Amir Goldstein33913992019-01-10 19:04:32 +020072 struct fanotify_perm_event *event,
Jan Kara05f0e382016-11-10 17:45:16 +010073 struct fsnotify_iter_info *iter_info)
Eric Paris9e66e422009-12-17 21:24:34 -050074{
75 int ret;
76
77 pr_debug("%s: group=%p event=%p\n", __func__, group, event);
78
Jan Kara96d41012016-09-19 14:44:30 -070079 wait_event(group->fanotify_data.access_waitq, event->response);
Eric Paris9e66e422009-12-17 21:24:34 -050080
81 /* userspace responded, convert to something usable */
Steve Grubbde8cd832017-10-02 20:21:39 -040082 switch (event->response & ~FAN_AUDIT) {
Eric Paris9e66e422009-12-17 21:24:34 -050083 case FAN_ALLOW:
84 ret = 0;
85 break;
86 case FAN_DENY:
87 default:
88 ret = -EPERM;
89 }
Steve Grubbde8cd832017-10-02 20:21:39 -040090
91 /* Check if the response should be audited */
92 if (event->response & FAN_AUDIT)
93 audit_fanotify(event->response & ~FAN_AUDIT);
94
Eric Paris9e66e422009-12-17 21:24:34 -050095 event->response = 0;
Eric Paris9e66e422009-12-17 21:24:34 -050096
Eric Parisb2d87902009-12-17 21:24:34 -050097 pr_debug("%s: group=%p event=%p about to return ret=%d\n", __func__,
98 group, event, ret);
99
Eric Paris9e66e422009-12-17 21:24:34 -0500100 return ret;
101}
Eric Paris9e66e422009-12-17 21:24:34 -0500102
Matthew Bobrowski2d10b232018-11-08 14:05:49 +1100103/*
104 * This function returns a mask for an event that only contains the flags
105 * that have been specifically requested by the user. Flags that may have
106 * been included within the event mask, but have not been explicitly
107 * requested by the user, will not be present in the returned mask.
108 */
109static u32 fanotify_group_event_mask(struct fsnotify_iter_info *iter_info,
Amir Goldstein5b0457a2018-04-20 16:10:50 -0700110 u32 event_mask, const void *data,
111 int data_type)
Eric Paris1c529062009-12-17 21:24:28 -0500112{
Amir Goldstein54a307b2018-04-04 23:42:18 +0300113 __u32 marks_mask = 0, marks_ignored_mask = 0;
Al Viro3cd5eca2016-11-20 20:19:09 -0500114 const struct path *path = data;
Amir Goldstein837a3932018-04-20 16:10:54 -0700115 struct fsnotify_mark *mark;
116 int type;
Eric Paris1968f5e2010-07-28 10:18:39 -0400117
Amir Goldstein837a3932018-04-20 16:10:54 -0700118 pr_debug("%s: report_mask=%x mask=%x data=%p data_type=%d\n",
119 __func__, iter_info->report_mask, event_mask, data, data_type);
Eric Paris1968f5e2010-07-28 10:18:39 -0400120
Matthew Bobrowski2d10b232018-11-08 14:05:49 +1100121 /* If we don't have enough info to send an event to userspace say no */
Linus Torvalds20696012010-08-12 14:23:04 -0700122 if (data_type != FSNOTIFY_EVENT_PATH)
Matthew Bobrowski2d10b232018-11-08 14:05:49 +1100123 return 0;
Eric Paris1c529062009-12-17 21:24:28 -0500124
Matthew Bobrowski2d10b232018-11-08 14:05:49 +1100125 /* Sorry, fanotify only gives a damn about files and dirs */
David Howellse36cb0b2015-01-29 12:02:35 +0000126 if (!d_is_reg(path->dentry) &&
David Howells54f2a2f2015-01-29 12:02:36 +0000127 !d_can_lookup(path->dentry))
Matthew Bobrowski2d10b232018-11-08 14:05:49 +1100128 return 0;
Eric Parise1c048b2010-10-28 17:21:58 -0400129
Amir Goldstein837a3932018-04-20 16:10:54 -0700130 fsnotify_foreach_obj_type(type) {
131 if (!fsnotify_iter_should_report_type(iter_info, type))
132 continue;
133 mark = iter_info->marks[type];
134 /*
Amir Goldsteinb469e7e2018-10-30 20:29:53 +0200135 * If the event is for a child and this mark doesn't care about
136 * events on a child, don't send it!
Amir Goldstein837a3932018-04-20 16:10:54 -0700137 */
Amir Goldsteinb469e7e2018-10-30 20:29:53 +0200138 if (event_mask & FS_EVENT_ON_CHILD &&
139 (type != FSNOTIFY_OBJ_TYPE_INODE ||
140 !(mark->mask & FS_EVENT_ON_CHILD)))
Amir Goldstein837a3932018-04-20 16:10:54 -0700141 continue;
Amir Goldstein54a307b2018-04-04 23:42:18 +0300142
Amir Goldstein837a3932018-04-20 16:10:54 -0700143 marks_mask |= mark->mask;
144 marks_ignored_mask |= mark->ignored_mask;
Eric Paris1968f5e2010-07-28 10:18:39 -0400145 }
146
David Howellse36cb0b2015-01-29 12:02:35 +0000147 if (d_is_dir(path->dentry) &&
Lino Sanfilippo66ba93c2015-02-10 14:08:27 -0800148 !(marks_mask & FS_ISDIR & ~marks_ignored_mask))
Matthew Bobrowski2d10b232018-11-08 14:05:49 +1100149 return 0;
Eric Paris8fcd6522010-10-28 17:21:59 -0400150
Matthew Bobrowski2d10b232018-11-08 14:05:49 +1100151 return event_mask & FANOTIFY_OUTGOING_EVENTS & marks_mask &
152 ~marks_ignored_mask;
Eric Paris1c529062009-12-17 21:24:28 -0500153}
154
Amir Goldsteine9e0c892019-01-10 19:04:34 +0200155static int fanotify_encode_fid(struct fanotify_event *event,
Amir Goldstein77115222019-01-10 19:04:37 +0200156 struct inode *inode, gfp_t gfp,
157 __kernel_fsid_t *fsid)
Amir Goldsteine9e0c892019-01-10 19:04:34 +0200158{
159 struct fanotify_fid *fid = &event->fid;
160 int dwords, bytes = 0;
Amir Goldsteine9e0c892019-01-10 19:04:34 +0200161 int err, type;
162
Amir Goldsteine9e0c892019-01-10 19:04:34 +0200163 fid->ext_fh = NULL;
164 dwords = 0;
165 err = -ENOENT;
Amir Goldstein77115222019-01-10 19:04:37 +0200166 type = exportfs_encode_inode_fh(inode, NULL, &dwords, NULL);
Amir Goldsteine9e0c892019-01-10 19:04:34 +0200167 if (!dwords)
168 goto out_err;
169
Amir Goldsteine9e0c892019-01-10 19:04:34 +0200170 bytes = dwords << 2;
171 if (bytes > FANOTIFY_INLINE_FH_LEN) {
172 /* Treat failure to allocate fh as failure to allocate event */
173 err = -ENOMEM;
174 fid->ext_fh = kmalloc(bytes, gfp);
175 if (!fid->ext_fh)
176 goto out_err;
177 }
178
Amir Goldstein77115222019-01-10 19:04:37 +0200179 type = exportfs_encode_inode_fh(inode, fanotify_fid_fh(fid, bytes),
180 &dwords, NULL);
Amir Goldsteine9e0c892019-01-10 19:04:34 +0200181 err = -EINVAL;
182 if (!type || type == FILEID_INVALID || bytes != dwords << 2)
183 goto out_err;
184
Amir Goldstein77115222019-01-10 19:04:37 +0200185 fid->fsid = *fsid;
Amir Goldsteine9e0c892019-01-10 19:04:34 +0200186 event->fh_len = bytes;
187
188 return type;
189
190out_err:
191 pr_warn_ratelimited("fanotify: failed to encode fid (fsid=%x.%x, "
192 "type=%d, bytes=%d, err=%i)\n",
Amir Goldstein77115222019-01-10 19:04:37 +0200193 fsid->val[0], fsid->val[1], type, bytes, err);
Amir Goldsteine9e0c892019-01-10 19:04:34 +0200194 kfree(fid->ext_fh);
195 fid->ext_fh = NULL;
196 event->fh_len = 0;
197
198 return FILEID_INVALID;
199}
200
Amir Goldstein33913992019-01-10 19:04:32 +0200201struct fanotify_event *fanotify_alloc_event(struct fsnotify_group *group,
Amir Goldstein77115222019-01-10 19:04:37 +0200202 struct inode *inode, u32 mask,
203 const struct path *path,
204 __kernel_fsid_t *fsid)
Jan Karaf0834412014-04-03 14:46:33 -0700205{
Amir Goldstein33913992019-01-10 19:04:32 +0200206 struct fanotify_event *event = NULL;
Shakeel Buttd46eb14b2018-08-17 15:46:39 -0700207 gfp_t gfp = GFP_KERNEL_ACCOUNT;
Jan Kara1f5eaa92018-02-21 14:10:59 +0100208
209 /*
210 * For queues with unlimited length lost events are not expected and
211 * can possibly have security implications. Avoid losing events when
212 * memory is short.
213 */
214 if (group->max_events == UINT_MAX)
215 gfp |= __GFP_NOFAIL;
Jan Karaf0834412014-04-03 14:46:33 -0700216
Shakeel Buttd46eb14b2018-08-17 15:46:39 -0700217 /* Whoever is interested in the event, pays for the allocation. */
218 memalloc_use_memcg(group->memcg);
219
Miklos Szeredi6685df32017-10-30 21:14:56 +0100220 if (fanotify_is_perm_event(mask)) {
Amir Goldstein33913992019-01-10 19:04:32 +0200221 struct fanotify_perm_event *pevent;
Jan Karaf0834412014-04-03 14:46:33 -0700222
Jan Kara1f5eaa92018-02-21 14:10:59 +0100223 pevent = kmem_cache_alloc(fanotify_perm_event_cachep, gfp);
Jan Karaf0834412014-04-03 14:46:33 -0700224 if (!pevent)
Shakeel Buttd46eb14b2018-08-17 15:46:39 -0700225 goto out;
Jan Karaf0834412014-04-03 14:46:33 -0700226 event = &pevent->fae;
227 pevent->response = 0;
228 goto init;
229 }
Jan Kara1f5eaa92018-02-21 14:10:59 +0100230 event = kmem_cache_alloc(fanotify_event_cachep, gfp);
Jan Karaf0834412014-04-03 14:46:33 -0700231 if (!event)
Shakeel Buttd46eb14b2018-08-17 15:46:39 -0700232 goto out;
Jan Karaf0834412014-04-03 14:46:33 -0700233init: __maybe_unused
Amir Goldsteina0a92d22019-01-10 19:04:31 +0200234 fsnotify_init_event(&event->fse, inode);
235 event->mask = mask;
Amir Goldsteind0a6a872018-10-04 00:25:38 +0300236 if (FAN_GROUP_FLAG(group, FAN_REPORT_TID))
237 event->pid = get_pid(task_pid(current));
238 else
239 event->pid = get_pid(task_tgid(current));
Amir Goldsteine9e0c892019-01-10 19:04:34 +0200240 event->fh_len = 0;
241 if (path && FAN_GROUP_FLAG(group, FAN_REPORT_FID)) {
242 /* Report the event without a file identifier on encode error */
Amir Goldstein77115222019-01-10 19:04:37 +0200243 event->fh_type = fanotify_encode_fid(event,
244 d_inode(path->dentry), gfp, fsid);
Amir Goldsteine9e0c892019-01-10 19:04:34 +0200245 } else if (path) {
246 event->fh_type = FILEID_ROOT;
Jan Karaf0834412014-04-03 14:46:33 -0700247 event->path = *path;
248 path_get(&event->path);
249 } else {
Amir Goldsteine9e0c892019-01-10 19:04:34 +0200250 event->fh_type = FILEID_INVALID;
Jan Karaf0834412014-04-03 14:46:33 -0700251 event->path.mnt = NULL;
252 event->path.dentry = NULL;
253 }
Shakeel Buttd46eb14b2018-08-17 15:46:39 -0700254out:
255 memalloc_unuse_memcg();
Jan Karaf0834412014-04-03 14:46:33 -0700256 return event;
257}
258
Amir Goldstein77115222019-01-10 19:04:37 +0200259/*
260 * Get cached fsid of the filesystem containing the object from any connector.
261 * All connectors are supposed to have the same fsid, but we do not verify that
262 * here.
263 */
264static __kernel_fsid_t fanotify_get_fsid(struct fsnotify_iter_info *iter_info)
265{
266 int type;
267 __kernel_fsid_t fsid = {};
268
269 fsnotify_foreach_obj_type(type) {
270 if (!fsnotify_iter_should_report_type(iter_info, type))
271 continue;
272
273 fsid = iter_info->marks[type]->connector->fsid;
274 if (WARN_ON_ONCE(!fsid.val[0] && !fsid.val[1]))
275 continue;
276 return fsid;
277 }
278
279 return fsid;
280}
281
Jan Kara7053aee2014-01-21 15:48:14 -0800282static int fanotify_handle_event(struct fsnotify_group *group,
283 struct inode *inode,
Al Viro3cd5eca2016-11-20 20:19:09 -0500284 u32 mask, const void *data, int data_type,
Jan Kara9385a842016-11-10 17:51:50 +0100285 const unsigned char *file_name, u32 cookie,
286 struct fsnotify_iter_info *iter_info)
Jan Kara7053aee2014-01-21 15:48:14 -0800287{
288 int ret = 0;
Amir Goldstein33913992019-01-10 19:04:32 +0200289 struct fanotify_event *event;
Jan Kara7053aee2014-01-21 15:48:14 -0800290 struct fsnotify_event *fsn_event;
Amir Goldstein77115222019-01-10 19:04:37 +0200291 __kernel_fsid_t fsid = {};
Jan Kara7053aee2014-01-21 15:48:14 -0800292
293 BUILD_BUG_ON(FAN_ACCESS != FS_ACCESS);
294 BUILD_BUG_ON(FAN_MODIFY != FS_MODIFY);
295 BUILD_BUG_ON(FAN_CLOSE_NOWRITE != FS_CLOSE_NOWRITE);
296 BUILD_BUG_ON(FAN_CLOSE_WRITE != FS_CLOSE_WRITE);
297 BUILD_BUG_ON(FAN_OPEN != FS_OPEN);
298 BUILD_BUG_ON(FAN_EVENT_ON_CHILD != FS_EVENT_ON_CHILD);
299 BUILD_BUG_ON(FAN_Q_OVERFLOW != FS_Q_OVERFLOW);
300 BUILD_BUG_ON(FAN_OPEN_PERM != FS_OPEN_PERM);
301 BUILD_BUG_ON(FAN_ACCESS_PERM != FS_ACCESS_PERM);
302 BUILD_BUG_ON(FAN_ONDIR != FS_ISDIR);
Matthew Bobrowski9b076f12018-11-08 14:07:14 +1100303 BUILD_BUG_ON(FAN_OPEN_EXEC != FS_OPEN_EXEC);
Matthew Bobrowski66917a32018-11-08 14:12:44 +1100304 BUILD_BUG_ON(FAN_OPEN_EXEC_PERM != FS_OPEN_EXEC_PERM);
Jan Kara7053aee2014-01-21 15:48:14 -0800305
Matthew Bobrowski66917a32018-11-08 14:12:44 +1100306 BUILD_BUG_ON(HWEIGHT32(ALL_FANOTIFY_EVENT_BITS) != 12);
Amir Goldsteinbdd5a462018-10-04 00:25:37 +0300307
Matthew Bobrowski2d10b232018-11-08 14:05:49 +1100308 mask = fanotify_group_event_mask(iter_info, mask, data, data_type);
309 if (!mask)
Jan Kara83c4c4b2014-01-21 15:48:15 -0800310 return 0;
311
Jan Kara7053aee2014-01-21 15:48:14 -0800312 pr_debug("%s: group=%p inode=%p mask=%x\n", __func__, group, inode,
313 mask);
314
Miklos Szeredi6685df32017-10-30 21:14:56 +0100315 if (fanotify_is_perm_event(mask)) {
Miklos Szeredif37650f2017-10-30 21:14:56 +0100316 /*
317 * fsnotify_prepare_user_wait() fails if we race with mark
318 * deletion. Just let the operation pass in that case.
319 */
320 if (!fsnotify_prepare_user_wait(iter_info))
321 return 0;
322 }
Miklos Szeredif37650f2017-10-30 21:14:56 +0100323
Amir Goldstein77115222019-01-10 19:04:37 +0200324 if (FAN_GROUP_FLAG(group, FAN_REPORT_FID))
325 fsid = fanotify_get_fsid(iter_info);
326
327 event = fanotify_alloc_event(group, inode, mask, data, &fsid);
Miklos Szeredif37650f2017-10-30 21:14:56 +0100328 ret = -ENOMEM;
Jan Kara7b1f6412018-02-21 15:07:52 +0100329 if (unlikely(!event)) {
330 /*
331 * We don't queue overflow events for permission events as
332 * there the access is denied and so no event is in fact lost.
333 */
334 if (!fanotify_is_perm_event(mask))
335 fsnotify_queue_overflow(group);
Miklos Szeredif37650f2017-10-30 21:14:56 +0100336 goto finish;
Jan Kara7b1f6412018-02-21 15:07:52 +0100337 }
Jan Kara7053aee2014-01-21 15:48:14 -0800338
339 fsn_event = &event->fse;
Jan Kara8ba8fa912014-08-06 16:03:26 -0700340 ret = fsnotify_add_event(group, fsn_event, fanotify_merge);
Jan Kara83c0e1b2014-01-28 18:53:22 +0100341 if (ret) {
Jan Kara482ef062014-02-21 19:07:54 +0100342 /* Permission events shouldn't be merged */
Amir Goldstein23c9dee2018-10-04 00:25:35 +0300343 BUG_ON(ret == 1 && mask & FANOTIFY_PERM_EVENTS);
Jan Kara7053aee2014-01-21 15:48:14 -0800344 /* Our event wasn't used in the end. Free it. */
345 fsnotify_destroy_event(group, fsn_event);
Jan Kara482ef062014-02-21 19:07:54 +0100346
Miklos Szeredif37650f2017-10-30 21:14:56 +0100347 ret = 0;
Miklos Szeredi6685df32017-10-30 21:14:56 +0100348 } else if (fanotify_is_perm_event(mask)) {
Jan Kara05f0e382016-11-10 17:45:16 +0100349 ret = fanotify_get_response(group, FANOTIFY_PE(fsn_event),
350 iter_info);
Jan Kara85816792014-01-28 21:38:06 +0100351 fsnotify_destroy_event(group, fsn_event);
352 }
Miklos Szeredif37650f2017-10-30 21:14:56 +0100353finish:
Miklos Szeredi6685df32017-10-30 21:14:56 +0100354 if (fanotify_is_perm_event(mask))
Miklos Szeredif37650f2017-10-30 21:14:56 +0100355 fsnotify_finish_user_wait(iter_info);
Miklos Szeredi6685df32017-10-30 21:14:56 +0100356
Jan Kara7053aee2014-01-21 15:48:14 -0800357 return ret;
358}
359
Eric Paris4afeff82010-10-28 17:21:58 -0400360static void fanotify_free_group_priv(struct fsnotify_group *group)
361{
362 struct user_struct *user;
363
364 user = group->fanotify_data.user;
365 atomic_dec(&user->fanotify_listeners);
366 free_uid(user);
367}
368
Jan Kara7053aee2014-01-21 15:48:14 -0800369static void fanotify_free_event(struct fsnotify_event *fsn_event)
370{
Amir Goldstein33913992019-01-10 19:04:32 +0200371 struct fanotify_event *event;
Jan Kara7053aee2014-01-21 15:48:14 -0800372
373 event = FANOTIFY_E(fsn_event);
Amir Goldsteine9e0c892019-01-10 19:04:34 +0200374 if (fanotify_event_has_path(event))
375 path_put(&event->path);
376 else if (fanotify_event_has_ext_fh(event))
377 kfree(event->fid.ext_fh);
Amir Goldsteind0a6a872018-10-04 00:25:38 +0300378 put_pid(event->pid);
Amir Goldsteina0a92d22019-01-10 19:04:31 +0200379 if (fanotify_is_perm_event(event->mask)) {
Jan Karaf0834412014-04-03 14:46:33 -0700380 kmem_cache_free(fanotify_perm_event_cachep,
381 FANOTIFY_PE(fsn_event));
382 return;
383 }
Jan Kara7053aee2014-01-21 15:48:14 -0800384 kmem_cache_free(fanotify_event_cachep, event);
385}
386
Jan Kara054c6362016-12-21 18:06:12 +0100387static void fanotify_free_mark(struct fsnotify_mark *fsn_mark)
388{
389 kmem_cache_free(fanotify_mark_cache, fsn_mark);
390}
391
Eric Parisff0b16a2009-12-17 21:24:25 -0500392const struct fsnotify_ops fanotify_fsnotify_ops = {
393 .handle_event = fanotify_handle_event,
Eric Paris4afeff82010-10-28 17:21:58 -0400394 .free_group_priv = fanotify_free_group_priv,
Jan Kara7053aee2014-01-21 15:48:14 -0800395 .free_event = fanotify_free_event,
Jan Kara054c6362016-12-21 18:06:12 +0100396 .free_mark = fanotify_free_mark,
Eric Parisff0b16a2009-12-17 21:24:25 -0500397};