blob: e619c31b6bd92a4f570127b91a37253126ca9e3a [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Andrew Morton9d0243b2006-01-08 01:00:39 -08002/*
3 * Implement the manual drop-all-pagecache function
4 */
5
Johannes Weiner16e2df22021-09-02 14:53:21 -07006#include <linux/pagemap.h>
Andrew Morton9d0243b2006-01-08 01:00:39 -08007#include <linux/kernel.h>
8#include <linux/mm.h>
9#include <linux/fs.h>
10#include <linux/writeback.h>
11#include <linux/sysctl.h>
12#include <linux/gfp.h>
Dave Chinner55fa6092011-03-22 22:23:40 +110013#include "internal.h"
Andrew Morton9d0243b2006-01-08 01:00:39 -080014
15/* A global variable is a bit ugly, but it keeps the code simple */
16int sysctl_drop_caches;
17
Al Viro01a05b32010-03-23 06:06:58 -040018static void drop_pagecache_sb(struct super_block *sb, void *unused)
Andrew Morton9d0243b2006-01-08 01:00:39 -080019{
Jan Karaeccb95c2008-04-29 00:59:37 -070020 struct inode *inode, *toput_inode = NULL;
Andrew Morton9d0243b2006-01-08 01:00:39 -080021
Dave Chinner74278da2015-03-04 12:37:22 -050022 spin_lock(&sb->s_inode_list_lock);
Andrew Morton9d0243b2006-01-08 01:00:39 -080023 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
Dave Chinner250df6e2011-03-22 22:23:36 +110024 spin_lock(&inode->i_lock);
Jan Karac27d82f2019-02-01 14:21:23 -080025 /*
26 * We must skip inodes in unusual state. We may also skip
27 * inodes without pages but we deliberately won't in case
28 * we need to reschedule to avoid softlockups.
29 */
Dave Chinner250df6e2011-03-22 22:23:36 +110030 if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
Johannes Weiner16e2df22021-09-02 14:53:21 -070031 (mapping_empty(inode->i_mapping) && !need_resched())) {
Dave Chinner250df6e2011-03-22 22:23:36 +110032 spin_unlock(&inode->i_lock);
Andrew Morton9d0243b2006-01-08 01:00:39 -080033 continue;
Dave Chinner250df6e2011-03-22 22:23:36 +110034 }
Jan Karaeccb95c2008-04-29 00:59:37 -070035 __iget(inode);
Dave Chinner250df6e2011-03-22 22:23:36 +110036 spin_unlock(&inode->i_lock);
Dave Chinner74278da2015-03-04 12:37:22 -050037 spin_unlock(&sb->s_inode_list_lock);
38
Mike Waychison28697352009-06-16 15:32:59 -070039 invalidate_mapping_pages(inode->i_mapping, 0, -1);
Jan Karaeccb95c2008-04-29 00:59:37 -070040 iput(toput_inode);
41 toput_inode = inode;
Dave Chinner74278da2015-03-04 12:37:22 -050042
Eric Sandeen04646ae2019-12-06 10:54:23 -060043 cond_resched();
Dave Chinner74278da2015-03-04 12:37:22 -050044 spin_lock(&sb->s_inode_list_lock);
Andrew Morton9d0243b2006-01-08 01:00:39 -080045 }
Dave Chinner74278da2015-03-04 12:37:22 -050046 spin_unlock(&sb->s_inode_list_lock);
Jan Karaeccb95c2008-04-29 00:59:37 -070047 iput(toput_inode);
Andrew Morton9d0243b2006-01-08 01:00:39 -080048}
49
Joe Perches1f7e0612014-06-06 14:38:05 -070050int drop_caches_sysctl_handler(struct ctl_table *table, int write,
Christoph Hellwig32927392020-04-24 08:43:38 +020051 void *buffer, size_t *length, loff_t *ppos)
Andrew Morton9d0243b2006-01-08 01:00:39 -080052{
Petr Holasekcb16e952011-03-23 16:43:09 -070053 int ret;
54
55 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
56 if (ret)
57 return ret;
Andrew Morton9d0243b2006-01-08 01:00:39 -080058 if (write) {
Dave Hansen5509a5d2014-04-03 14:48:19 -070059 static int stfu;
60
61 if (sysctl_drop_caches & 1) {
Al Viro01a05b32010-03-23 06:06:58 -040062 iterate_supers(drop_pagecache_sb, NULL);
Dave Hansen5509a5d2014-04-03 14:48:19 -070063 count_vm_event(DROP_PAGECACHE);
64 }
65 if (sysctl_drop_caches & 2) {
Andrew Morton9d0243b2006-01-08 01:00:39 -080066 drop_slab();
Dave Hansen5509a5d2014-04-03 14:48:19 -070067 count_vm_event(DROP_SLAB);
68 }
69 if (!stfu) {
70 pr_info("%s (%d): drop_caches: %d\n",
71 current->comm, task_pid_nr(current),
72 sysctl_drop_caches);
73 }
74 stfu |= sysctl_drop_caches & 4;
Andrew Morton9d0243b2006-01-08 01:00:39 -080075 }
76 return 0;
77}