Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Andrew Morton | 9d0243b | 2006-01-08 01:00:39 -0800 | [diff] [blame] | 2 | /* |
| 3 | * Implement the manual drop-all-pagecache function |
| 4 | */ |
| 5 | |
Johannes Weiner | 16e2df2 | 2021-09-02 14:53:21 -0700 | [diff] [blame] | 6 | #include <linux/pagemap.h> |
Andrew Morton | 9d0243b | 2006-01-08 01:00:39 -0800 | [diff] [blame] | 7 | #include <linux/kernel.h> |
| 8 | #include <linux/mm.h> |
| 9 | #include <linux/fs.h> |
| 10 | #include <linux/writeback.h> |
| 11 | #include <linux/sysctl.h> |
| 12 | #include <linux/gfp.h> |
Dave Chinner | 55fa609 | 2011-03-22 22:23:40 +1100 | [diff] [blame] | 13 | #include "internal.h" |
Andrew Morton | 9d0243b | 2006-01-08 01:00:39 -0800 | [diff] [blame] | 14 | |
| 15 | /* A global variable is a bit ugly, but it keeps the code simple */ |
| 16 | int sysctl_drop_caches; |
| 17 | |
Al Viro | 01a05b3 | 2010-03-23 06:06:58 -0400 | [diff] [blame] | 18 | static void drop_pagecache_sb(struct super_block *sb, void *unused) |
Andrew Morton | 9d0243b | 2006-01-08 01:00:39 -0800 | [diff] [blame] | 19 | { |
Jan Kara | eccb95c | 2008-04-29 00:59:37 -0700 | [diff] [blame] | 20 | struct inode *inode, *toput_inode = NULL; |
Andrew Morton | 9d0243b | 2006-01-08 01:00:39 -0800 | [diff] [blame] | 21 | |
Dave Chinner | 74278da | 2015-03-04 12:37:22 -0500 | [diff] [blame] | 22 | spin_lock(&sb->s_inode_list_lock); |
Andrew Morton | 9d0243b | 2006-01-08 01:00:39 -0800 | [diff] [blame] | 23 | list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { |
Dave Chinner | 250df6e | 2011-03-22 22:23:36 +1100 | [diff] [blame] | 24 | spin_lock(&inode->i_lock); |
Jan Kara | c27d82f | 2019-02-01 14:21:23 -0800 | [diff] [blame] | 25 | /* |
| 26 | * We must skip inodes in unusual state. We may also skip |
| 27 | * inodes without pages but we deliberately won't in case |
| 28 | * we need to reschedule to avoid softlockups. |
| 29 | */ |
Dave Chinner | 250df6e | 2011-03-22 22:23:36 +1100 | [diff] [blame] | 30 | if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) || |
Johannes Weiner | 16e2df2 | 2021-09-02 14:53:21 -0700 | [diff] [blame] | 31 | (mapping_empty(inode->i_mapping) && !need_resched())) { |
Dave Chinner | 250df6e | 2011-03-22 22:23:36 +1100 | [diff] [blame] | 32 | spin_unlock(&inode->i_lock); |
Andrew Morton | 9d0243b | 2006-01-08 01:00:39 -0800 | [diff] [blame] | 33 | continue; |
Dave Chinner | 250df6e | 2011-03-22 22:23:36 +1100 | [diff] [blame] | 34 | } |
Jan Kara | eccb95c | 2008-04-29 00:59:37 -0700 | [diff] [blame] | 35 | __iget(inode); |
Dave Chinner | 250df6e | 2011-03-22 22:23:36 +1100 | [diff] [blame] | 36 | spin_unlock(&inode->i_lock); |
Dave Chinner | 74278da | 2015-03-04 12:37:22 -0500 | [diff] [blame] | 37 | spin_unlock(&sb->s_inode_list_lock); |
| 38 | |
Mike Waychison | 2869735 | 2009-06-16 15:32:59 -0700 | [diff] [blame] | 39 | invalidate_mapping_pages(inode->i_mapping, 0, -1); |
Jan Kara | eccb95c | 2008-04-29 00:59:37 -0700 | [diff] [blame] | 40 | iput(toput_inode); |
| 41 | toput_inode = inode; |
Dave Chinner | 74278da | 2015-03-04 12:37:22 -0500 | [diff] [blame] | 42 | |
Eric Sandeen | 04646ae | 2019-12-06 10:54:23 -0600 | [diff] [blame] | 43 | cond_resched(); |
Dave Chinner | 74278da | 2015-03-04 12:37:22 -0500 | [diff] [blame] | 44 | spin_lock(&sb->s_inode_list_lock); |
Andrew Morton | 9d0243b | 2006-01-08 01:00:39 -0800 | [diff] [blame] | 45 | } |
Dave Chinner | 74278da | 2015-03-04 12:37:22 -0500 | [diff] [blame] | 46 | spin_unlock(&sb->s_inode_list_lock); |
Jan Kara | eccb95c | 2008-04-29 00:59:37 -0700 | [diff] [blame] | 47 | iput(toput_inode); |
Andrew Morton | 9d0243b | 2006-01-08 01:00:39 -0800 | [diff] [blame] | 48 | } |
| 49 | |
Joe Perches | 1f7e061 | 2014-06-06 14:38:05 -0700 | [diff] [blame] | 50 | int drop_caches_sysctl_handler(struct ctl_table *table, int write, |
Christoph Hellwig | 3292739 | 2020-04-24 08:43:38 +0200 | [diff] [blame] | 51 | void *buffer, size_t *length, loff_t *ppos) |
Andrew Morton | 9d0243b | 2006-01-08 01:00:39 -0800 | [diff] [blame] | 52 | { |
Petr Holasek | cb16e95 | 2011-03-23 16:43:09 -0700 | [diff] [blame] | 53 | int ret; |
| 54 | |
| 55 | ret = proc_dointvec_minmax(table, write, buffer, length, ppos); |
| 56 | if (ret) |
| 57 | return ret; |
Andrew Morton | 9d0243b | 2006-01-08 01:00:39 -0800 | [diff] [blame] | 58 | if (write) { |
Dave Hansen | 5509a5d | 2014-04-03 14:48:19 -0700 | [diff] [blame] | 59 | static int stfu; |
| 60 | |
| 61 | if (sysctl_drop_caches & 1) { |
Al Viro | 01a05b3 | 2010-03-23 06:06:58 -0400 | [diff] [blame] | 62 | iterate_supers(drop_pagecache_sb, NULL); |
Dave Hansen | 5509a5d | 2014-04-03 14:48:19 -0700 | [diff] [blame] | 63 | count_vm_event(DROP_PAGECACHE); |
| 64 | } |
| 65 | if (sysctl_drop_caches & 2) { |
Andrew Morton | 9d0243b | 2006-01-08 01:00:39 -0800 | [diff] [blame] | 66 | drop_slab(); |
Dave Hansen | 5509a5d | 2014-04-03 14:48:19 -0700 | [diff] [blame] | 67 | count_vm_event(DROP_SLAB); |
| 68 | } |
| 69 | if (!stfu) { |
| 70 | pr_info("%s (%d): drop_caches: %d\n", |
| 71 | current->comm, task_pid_nr(current), |
| 72 | sysctl_drop_caches); |
| 73 | } |
| 74 | stfu |= sysctl_drop_caches & 4; |
Andrew Morton | 9d0243b | 2006-01-08 01:00:39 -0800 | [diff] [blame] | 75 | } |
| 76 | return 0; |
| 77 | } |