blob: 7907e6419e5726702b0cd8eac2d57a5bd48306f8 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002
3#include <linux/mm.h>
4#include <linux/file.h>
Bryan Wueb280622008-05-04 23:12:55 +08005#include <linux/fdtable.h>
Al Viro5ad4e532009-03-29 19:50:06 -04006#include <linux/fs_struct.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007#include <linux/mount.h>
Kees Cook5096add2007-05-08 00:26:04 -07008#include <linux/ptrace.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09009#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/seq_file.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +010011#include <linux/sched/mm.h>
12
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include "internal.h"
14
15/*
16 * Logic: we've got two memory sums for each process, "shared", and
Frederik Schwarzer025dfda2008-10-16 19:02:37 +020017 * "non-shared". Shared memory may get counted more than once, for
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 * each process that owns it. Non-shared memory is counted
19 * accurately.
20 */
Eric W. Biedermandf5f8312008-02-08 04:18:33 -080021void task_mem(struct seq_file *m, struct mm_struct *mm)
Linus Torvalds1da177e2005-04-16 15:20:36 -070022{
David Howells8feae132009-01-08 12:04:47 +000023 struct vm_area_struct *vma;
David Howells38f71472009-01-08 12:04:47 +000024 struct vm_region *region;
David Howells8feae132009-01-08 12:04:47 +000025 struct rb_node *p;
David Howells38f71472009-01-08 12:04:47 +000026 unsigned long bytes = 0, sbytes = 0, slack = 0, size;
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
28 down_read(&mm->mmap_sem);
David Howells8feae132009-01-08 12:04:47 +000029 for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
30 vma = rb_entry(p, struct vm_area_struct, vm_rb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070031
David Howells8feae132009-01-08 12:04:47 +000032 bytes += kobjsize(vma);
David Howells38f71472009-01-08 12:04:47 +000033
34 region = vma->vm_region;
35 if (region) {
36 size = kobjsize(region);
37 size += region->vm_end - region->vm_start;
Linus Torvalds1da177e2005-04-16 15:20:36 -070038 } else {
David Howells38f71472009-01-08 12:04:47 +000039 size = vma->vm_end - vma->vm_start;
40 }
41
42 if (atomic_read(&mm->mm_count) > 1 ||
43 vma->vm_flags & VM_MAYSHARE) {
44 sbytes += size;
45 } else {
46 bytes += size;
47 if (region)
48 slack = region->vm_end - vma->vm_end;
Linus Torvalds1da177e2005-04-16 15:20:36 -070049 }
50 }
51
52 if (atomic_read(&mm->mm_count) > 1)
53 sbytes += kobjsize(mm);
54 else
55 bytes += kobjsize(mm);
56
Al Viro498052b2009-03-30 07:20:30 -040057 if (current->fs && current->fs->users > 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -070058 sbytes += kobjsize(current->fs);
59 else
60 bytes += kobjsize(current->fs);
61
62 if (current->files && atomic_read(&current->files->count) > 1)
63 sbytes += kobjsize(current->files);
64 else
65 bytes += kobjsize(current->files);
66
Elena Reshetovad036bda2019-01-18 14:27:26 +020067 if (current->sighand && refcount_read(&current->sighand->count) > 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -070068 sbytes += kobjsize(current->sighand);
69 else
70 bytes += kobjsize(current->sighand);
71
72 bytes += kobjsize(current); /* includes kernel stack */
73
Eric W. Biedermandf5f8312008-02-08 04:18:33 -080074 seq_printf(m,
Linus Torvalds1da177e2005-04-16 15:20:36 -070075 "Mem:\t%8lu bytes\n"
76 "Slack:\t%8lu bytes\n"
77 "Shared:\t%8lu bytes\n",
78 bytes, slack, sbytes);
79
80 up_read(&mm->mmap_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -070081}
82
83unsigned long task_vsize(struct mm_struct *mm)
84{
David Howells8feae132009-01-08 12:04:47 +000085 struct vm_area_struct *vma;
86 struct rb_node *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -070087 unsigned long vsize = 0;
88
89 down_read(&mm->mmap_sem);
David Howells8feae132009-01-08 12:04:47 +000090 for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
91 vma = rb_entry(p, struct vm_area_struct, vm_rb);
David Howells38f71472009-01-08 12:04:47 +000092 vsize += vma->vm_end - vma->vm_start;
Linus Torvalds1da177e2005-04-16 15:20:36 -070093 }
94 up_read(&mm->mmap_sem);
95 return vsize;
96}
97
Alexey Dobriyana2ade7b2011-01-12 17:00:32 -080098unsigned long task_statm(struct mm_struct *mm,
99 unsigned long *shared, unsigned long *text,
100 unsigned long *data, unsigned long *resident)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101{
David Howells8feae132009-01-08 12:04:47 +0000102 struct vm_area_struct *vma;
David Howells38f71472009-01-08 12:04:47 +0000103 struct vm_region *region;
David Howells8feae132009-01-08 12:04:47 +0000104 struct rb_node *p;
Alexey Dobriyana2ade7b2011-01-12 17:00:32 -0800105 unsigned long size = kobjsize(mm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106
107 down_read(&mm->mmap_sem);
David Howells8feae132009-01-08 12:04:47 +0000108 for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
109 vma = rb_entry(p, struct vm_area_struct, vm_rb);
110 size += kobjsize(vma);
David Howells38f71472009-01-08 12:04:47 +0000111 region = vma->vm_region;
112 if (region) {
113 size += kobjsize(region);
114 size += region->vm_end - region->vm_start;
115 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116 }
117
Steven J. Magnani7e1e0ef2009-12-14 18:00:04 -0800118 *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
119 >> PAGE_SHIFT;
120 *data = (PAGE_ALIGN(mm->start_stack) - (mm->start_data & PAGE_MASK))
121 >> PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122 up_read(&mm->mmap_sem);
Steven J. Magnani7e1e0ef2009-12-14 18:00:04 -0800123 size >>= PAGE_SHIFT;
124 size += *text + *data;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125 *resident = size;
126 return size;
127}
128
Michal Hocko1240ea02017-09-08 16:13:35 -0700129static int is_stack(struct vm_area_struct *vma)
Oleg Nesterov58cb6542014-10-09 15:25:54 -0700130{
Johannes Weiner65376df2016-02-02 16:57:29 -0800131 struct mm_struct *mm = vma->vm_mm;
Oleg Nesterov58cb6542014-10-09 15:25:54 -0700132
Andy Lutomirskib18cb642016-09-30 10:58:57 -0700133 /*
134 * We make no effort to guess what a given thread considers to be
135 * its "stack". It's not even well-defined for programs written
136 * languages like Go.
137 */
138 return vma->vm_start <= mm->start_stack &&
139 vma->vm_end >= mm->start_stack;
Oleg Nesterov58cb6542014-10-09 15:25:54 -0700140}
141
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142/*
David Howells8feae132009-01-08 12:04:47 +0000143 * display a single VMA to a sequenced file
144 */
Vlastimil Babka871305b2018-08-21 21:52:48 -0700145static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
David Howells8feae132009-01-08 12:04:47 +0000146{
Mike Frysinger3c26c9d2010-06-29 15:05:17 -0700147 struct mm_struct *mm = vma->vm_mm;
David Howells8feae132009-01-08 12:04:47 +0000148 unsigned long ino = 0;
149 struct file *file;
150 dev_t dev = 0;
Tetsuo Handa652586d2013-11-14 14:31:57 -0800151 int flags;
KAMEZAWA Hiroyuki6260a4b2009-04-06 19:00:30 -0700152 unsigned long long pgoff = 0;
David Howells8feae132009-01-08 12:04:47 +0000153
154 flags = vma->vm_flags;
155 file = vma->vm_file;
156
157 if (file) {
Al Viro496ad9a2013-01-23 17:07:38 -0500158 struct inode *inode = file_inode(vma->vm_file);
David Howells8feae132009-01-08 12:04:47 +0000159 dev = inode->i_sb->s_dev;
160 ino = inode->i_ino;
Nobuhiro Iwamatsu4c967292009-04-07 21:21:43 -0700161 pgoff = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
David Howells8feae132009-01-08 12:04:47 +0000162 }
163
Tetsuo Handa652586d2013-11-14 14:31:57 -0800164 seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
David Howells8feae132009-01-08 12:04:47 +0000165 seq_printf(m,
Tetsuo Handa652586d2013-11-14 14:31:57 -0800166 "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
David Howells8feae132009-01-08 12:04:47 +0000167 vma->vm_start,
168 vma->vm_end,
169 flags & VM_READ ? 'r' : '-',
170 flags & VM_WRITE ? 'w' : '-',
171 flags & VM_EXEC ? 'x' : '-',
172 flags & VM_MAYSHARE ? flags & VM_SHARED ? 'S' : 's' : 'p',
KAMEZAWA Hiroyuki6260a4b2009-04-06 19:00:30 -0700173 pgoff,
Tetsuo Handa652586d2013-11-14 14:31:57 -0800174 MAJOR(dev), MINOR(dev), ino);
David Howells8feae132009-01-08 12:04:47 +0000175
176 if (file) {
Tetsuo Handa652586d2013-11-14 14:31:57 -0800177 seq_pad(m, ' ');
Miklos Szeredi2726d562015-06-19 10:30:28 +0200178 seq_file_path(m, file, "");
Michal Hocko1240ea02017-09-08 16:13:35 -0700179 } else if (mm && is_stack(vma)) {
Johannes Weiner65376df2016-02-02 16:57:29 -0800180 seq_pad(m, ' ');
Alexey Dobriyan08b55772019-03-05 15:50:35 -0800181 seq_puts(m, "[stack]");
David Howells8feae132009-01-08 12:04:47 +0000182 }
183
184 seq_putc(m, '\n');
185 return 0;
186}
187
188/*
David Howellsdbf86852006-09-27 01:50:19 -0700189 * display mapping lines for a particular process's /proc/pid/maps
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190 */
Vlastimil Babka871305b2018-08-21 21:52:48 -0700191static int show_map(struct seq_file *m, void *_p)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192{
David Howells8feae132009-01-08 12:04:47 +0000193 struct rb_node *p = _p;
Kees Cook5096add2007-05-08 00:26:04 -0700194
Vlastimil Babka871305b2018-08-21 21:52:48 -0700195 return nommu_vma_show(m, rb_entry(p, struct vm_area_struct, vm_rb));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196}
David Howellsdbf86852006-09-27 01:50:19 -0700197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198static void *m_start(struct seq_file *m, loff_t *pos)
199{
David Howellsdbf86852006-09-27 01:50:19 -0700200 struct proc_maps_private *priv = m->private;
David Howellsdbf86852006-09-27 01:50:19 -0700201 struct mm_struct *mm;
David Howells8feae132009-01-08 12:04:47 +0000202 struct rb_node *p;
David Howellsdbf86852006-09-27 01:50:19 -0700203 loff_t n = *pos;
204
205 /* pin the task and mm whilst we play with them */
Oleg Nesterov2c033762014-10-09 15:25:51 -0700206 priv->task = get_proc_task(priv->inode);
David Howellsdbf86852006-09-27 01:50:19 -0700207 if (!priv->task)
Al Viroec6fd8a2011-02-15 22:22:54 -0500208 return ERR_PTR(-ESRCH);
David Howellsdbf86852006-09-27 01:50:19 -0700209
Oleg Nesterov27692cd2014-10-09 15:25:47 -0700210 mm = priv->mm;
Vegard Nossum388f7932017-02-27 14:30:13 -0800211 if (!mm || !mmget_not_zero(mm))
Oleg Nesterov27692cd2014-10-09 15:25:47 -0700212 return NULL;
David Howellsdbf86852006-09-27 01:50:19 -0700213
Konstantin Khlebnikov8a713e72019-07-11 20:59:50 -0700214 if (down_read_killable(&mm->mmap_sem)) {
215 mmput(mm);
216 return ERR_PTR(-EINTR);
217 }
218
David Howellsdbf86852006-09-27 01:50:19 -0700219 /* start from the Nth VMA */
David Howells8feae132009-01-08 12:04:47 +0000220 for (p = rb_first(&mm->mm_rb); p; p = rb_next(p))
David Howellsdbf86852006-09-27 01:50:19 -0700221 if (n-- == 0)
David Howells8feae132009-01-08 12:04:47 +0000222 return p;
Oleg Nesterov47fecca2014-10-09 15:25:49 -0700223
224 up_read(&mm->mmap_sem);
225 mmput(mm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226 return NULL;
227}
David Howellsdbf86852006-09-27 01:50:19 -0700228
229static void m_stop(struct seq_file *m, void *_vml)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230{
David Howellsdbf86852006-09-27 01:50:19 -0700231 struct proc_maps_private *priv = m->private;
232
Oleg Nesterov47fecca2014-10-09 15:25:49 -0700233 if (!IS_ERR_OR_NULL(_vml)) {
234 up_read(&priv->mm->mmap_sem);
235 mmput(priv->mm);
236 }
David Howellsdbf86852006-09-27 01:50:19 -0700237 if (priv->task) {
David Howellsdbf86852006-09-27 01:50:19 -0700238 put_task_struct(priv->task);
Oleg Nesterov47fecca2014-10-09 15:25:49 -0700239 priv->task = NULL;
David Howellsdbf86852006-09-27 01:50:19 -0700240 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241}
David Howellsdbf86852006-09-27 01:50:19 -0700242
David Howells8feae132009-01-08 12:04:47 +0000243static void *m_next(struct seq_file *m, void *_p, loff_t *pos)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244{
David Howells8feae132009-01-08 12:04:47 +0000245 struct rb_node *p = _p;
David Howellsdbf86852006-09-27 01:50:19 -0700246
247 (*pos)++;
David Howells8feae132009-01-08 12:04:47 +0000248 return p ? rb_next(p) : NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249}
David Howellsdbf86852006-09-27 01:50:19 -0700250
Jan Engelhardt03a44822008-02-08 04:21:19 -0800251static const struct seq_operations proc_pid_maps_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252 .start = m_start,
253 .next = m_next,
254 .stop = m_stop,
Vlastimil Babka871305b2018-08-21 21:52:48 -0700255 .show = show_map
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -0700256};
257
258static int maps_open(struct inode *inode, struct file *file,
259 const struct seq_operations *ops)
Eric W. Biederman662795d2006-06-26 00:25:48 -0700260{
David Howellsdbf86852006-09-27 01:50:19 -0700261 struct proc_maps_private *priv;
David Howellsdbf86852006-09-27 01:50:19 -0700262
Oleg Nesterov27692cd2014-10-09 15:25:47 -0700263 priv = __seq_open_private(file, ops, sizeof(*priv));
Oleg Nesterovce34fdd2014-10-09 15:25:45 -0700264 if (!priv)
265 return -ENOMEM;
266
Oleg Nesterov2c033762014-10-09 15:25:51 -0700267 priv->inode = inode;
Oleg Nesterov27692cd2014-10-09 15:25:47 -0700268 priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
269 if (IS_ERR(priv->mm)) {
270 int err = PTR_ERR(priv->mm);
271
272 seq_release_private(inode, file);
273 return err;
274 }
275
Oleg Nesterovce34fdd2014-10-09 15:25:45 -0700276 return 0;
Eric W. Biederman662795d2006-06-26 00:25:48 -0700277}
278
Oleg Nesterov27692cd2014-10-09 15:25:47 -0700279
280static int map_release(struct inode *inode, struct file *file)
281{
282 struct seq_file *seq = file->private_data;
283 struct proc_maps_private *priv = seq->private;
284
285 if (priv->mm)
286 mmdrop(priv->mm);
287
288 return seq_release_private(inode, file);
289}
290
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -0700291static int pid_maps_open(struct inode *inode, struct file *file)
292{
293 return maps_open(inode, file, &proc_pid_maps_ops);
294}
295
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -0700296const struct file_operations proc_pid_maps_operations = {
297 .open = pid_maps_open,
298 .read = seq_read,
299 .llseek = seq_lseek,
Oleg Nesterov27692cd2014-10-09 15:25:47 -0700300 .release = map_release,
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -0700301};
302