blob: 4559e914452b4b8a47d1cf2efc0320be253e5025 [file] [log] [blame]
Shailabh Nagarc7572492006-07-14 00:24:40 -07001/*
2 * taskstats.c - Export per-task statistics to userland
3 *
4 * Copyright (C) Shailabh Nagar, IBM Corp. 2006
5 * (C) Balbir Singh, IBM Corp. 2006
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 */
18
19#include <linux/kernel.h>
20#include <linux/taskstats_kern.h>
Jay Lanf3cef7a2006-09-30 23:28:55 -070021#include <linux/tsacct_kern.h>
Shailabh Nagar6f449932006-07-14 00:24:41 -070022#include <linux/delayacct.h>
Shailabh Nagarf9fd8912006-07-14 00:24:47 -070023#include <linux/cpumask.h>
24#include <linux/percpu.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090025#include <linux/slab.h>
Balbir Singh846c7bb2007-10-18 23:39:44 -070026#include <linux/cgroupstats.h>
27#include <linux/cgroup.h>
28#include <linux/fs.h>
29#include <linux/file.h>
Eric W. Biederman4bd6e322012-02-07 17:56:49 -080030#include <linux/pid_namespace.h>
Shailabh Nagarc7572492006-07-14 00:24:40 -070031#include <net/genetlink.h>
Arun Sharma600634972011-07-26 16:09:06 -070032#include <linux/atomic.h>
Zhang Xiao8c733422017-05-08 15:56:45 -070033#include <linux/sched/cputime.h>
Shailabh Nagarc7572492006-07-14 00:24:40 -070034
Shailabh Nagarf9fd8912006-07-14 00:24:47 -070035/*
36 * Maximum length of a cpumask that can be specified in
37 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
38 */
39#define TASKSTATS_CPUMASK_MAXLEN (100+6*NR_CPUS)
40
Vegard Nossumb81f3ea2008-07-25 01:48:55 -070041static DEFINE_PER_CPU(__u32, taskstats_seqnum);
Shailabh Nagarc7572492006-07-14 00:24:40 -070042static int family_registered;
Christoph Lametere18b8902006-12-06 20:33:20 -080043struct kmem_cache *taskstats_cache;
Shailabh Nagarc7572492006-07-14 00:24:40 -070044
Johannes Berg489111e2016-10-24 14:40:03 +020045static struct genl_family family;
Shailabh Nagarc7572492006-07-14 00:24:40 -070046
Alexey Dobriyanb54452b2010-02-18 08:14:31 +000047static const struct nla_policy taskstats_cmd_get_policy[TASKSTATS_CMD_ATTR_MAX+1] = {
Shailabh Nagarc7572492006-07-14 00:24:40 -070048 [TASKSTATS_CMD_ATTR_PID] = { .type = NLA_U32 },
49 [TASKSTATS_CMD_ATTR_TGID] = { .type = NLA_U32 },
Shailabh Nagarf9fd8912006-07-14 00:24:47 -070050 [TASKSTATS_CMD_ATTR_REGISTER_CPUMASK] = { .type = NLA_STRING },
51 [TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK] = { .type = NLA_STRING },};
52
WANG Cong243d5212016-11-03 09:42:36 -070053/*
54 * We have to use TASKSTATS_CMD_ATTR_MAX here, it is the maxattr in the family.
55 * Make sure they are always aligned.
56 */
57static const struct nla_policy cgroupstats_cmd_get_policy[TASKSTATS_CMD_ATTR_MAX+1] = {
Balbir Singh846c7bb2007-10-18 23:39:44 -070058 [CGROUPSTATS_CMD_ATTR_FD] = { .type = NLA_U32 },
59};
60
Shailabh Nagarf9fd8912006-07-14 00:24:47 -070061struct listener {
62 struct list_head list;
63 pid_t pid;
Shailabh Nagarbb129992006-07-14 00:24:47 -070064 char valid;
Shailabh Nagarc7572492006-07-14 00:24:40 -070065};
66
Shailabh Nagarf9fd8912006-07-14 00:24:47 -070067struct listener_list {
68 struct rw_semaphore sem;
69 struct list_head list;
70};
71static DEFINE_PER_CPU(struct listener_list, listener_array);
72
73enum actions {
74 REGISTER,
75 DEREGISTER,
76 CPU_DONT_CARE
77};
Shailabh Nagarc7572492006-07-14 00:24:40 -070078
79static int prepare_reply(struct genl_info *info, u8 cmd, struct sk_buff **skbp,
Oleg Nesterov371674852006-12-06 20:36:55 -080080 size_t size)
Shailabh Nagarc7572492006-07-14 00:24:40 -070081{
82 struct sk_buff *skb;
83 void *reply;
84
85 /*
86 * If new attributes are added, please revisit this allocation
87 */
Thomas Graf3dabc712006-11-14 19:44:52 -080088 skb = genlmsg_new(size, GFP_KERNEL);
Shailabh Nagarc7572492006-07-14 00:24:40 -070089 if (!skb)
90 return -ENOMEM;
91
92 if (!info) {
Christoph Lametercd85fc52010-12-08 17:42:22 +010093 int seq = this_cpu_inc_return(taskstats_seqnum) - 1;
Shailabh Nagarc7572492006-07-14 00:24:40 -070094
Thomas Graf17c157c2006-11-14 19:46:02 -080095 reply = genlmsg_put(skb, 0, seq, &family, 0, cmd);
Shailabh Nagarc7572492006-07-14 00:24:40 -070096 } else
Thomas Graf17c157c2006-11-14 19:46:02 -080097 reply = genlmsg_put_reply(skb, info, &family, 0, cmd);
Shailabh Nagarc7572492006-07-14 00:24:40 -070098 if (reply == NULL) {
99 nlmsg_free(skb);
100 return -EINVAL;
101 }
102
103 *skbp = skb;
Shailabh Nagarc7572492006-07-14 00:24:40 -0700104 return 0;
105}
106
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700107/*
108 * Send taskstats data in @skb to listener with nl_pid @pid
109 */
Johannes Berg134e6372009-07-10 09:51:34 +0000110static int send_reply(struct sk_buff *skb, struct genl_info *info)
Shailabh Nagarc7572492006-07-14 00:24:40 -0700111{
Arnaldo Carvalho de Melob529ccf2007-04-25 19:08:35 -0700112 struct genlmsghdr *genlhdr = nlmsg_data(nlmsg_hdr(skb));
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700113 void *reply = genlmsg_data(genlhdr);
Shailabh Nagarc7572492006-07-14 00:24:40 -0700114
Johannes Berg053c0952015-01-16 22:09:00 +0100115 genlmsg_end(skb, reply);
Shailabh Nagarc7572492006-07-14 00:24:40 -0700116
Johannes Berg134e6372009-07-10 09:51:34 +0000117 return genlmsg_reply(skb, info);
Shailabh Nagarc7572492006-07-14 00:24:40 -0700118}
119
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700120/*
121 * Send taskstats data in @skb to listeners registered for @cpu's exit data
122 */
Oleg Nesterov115085e2006-12-06 20:36:51 -0800123static void send_cpu_listeners(struct sk_buff *skb,
124 struct listener_list *listeners)
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700125{
Arnaldo Carvalho de Melob529ccf2007-04-25 19:08:35 -0700126 struct genlmsghdr *genlhdr = nlmsg_data(nlmsg_hdr(skb));
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700127 struct listener *s, *tmp;
128 struct sk_buff *skb_next, *skb_cur = skb;
129 void *reply = genlmsg_data(genlhdr);
Shailabh Nagard94a0412006-07-30 03:03:11 -0700130 int rc, delcount = 0;
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700131
Johannes Berg053c0952015-01-16 22:09:00 +0100132 genlmsg_end(skb, reply);
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700133
134 rc = 0;
Shailabh Nagarbb129992006-07-14 00:24:47 -0700135 down_read(&listeners->sem);
Shailabh Nagard94a0412006-07-30 03:03:11 -0700136 list_for_each_entry(s, &listeners->list, list) {
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700137 skb_next = NULL;
138 if (!list_is_last(&s->list, &listeners->list)) {
139 skb_next = skb_clone(skb_cur, GFP_KERNEL);
Shailabh Nagard94a0412006-07-30 03:03:11 -0700140 if (!skb_next)
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700141 break;
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700142 }
Johannes Berg134e6372009-07-10 09:51:34 +0000143 rc = genlmsg_unicast(&init_net, skb_cur, s->pid);
Shailabh Nagard94a0412006-07-30 03:03:11 -0700144 if (rc == -ECONNREFUSED) {
Shailabh Nagarbb129992006-07-14 00:24:47 -0700145 s->valid = 0;
146 delcount++;
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700147 }
148 skb_cur = skb_next;
149 }
Shailabh Nagarbb129992006-07-14 00:24:47 -0700150 up_read(&listeners->sem);
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700151
Shailabh Nagard94a0412006-07-30 03:03:11 -0700152 if (skb_cur)
153 nlmsg_free(skb_cur);
154
Shailabh Nagarbb129992006-07-14 00:24:47 -0700155 if (!delcount)
Shailabh Nagard94a0412006-07-30 03:03:11 -0700156 return;
Shailabh Nagarbb129992006-07-14 00:24:47 -0700157
158 /* Delete invalidated entries */
159 down_write(&listeners->sem);
160 list_for_each_entry_safe(s, tmp, &listeners->list, list) {
161 if (!s->valid) {
162 list_del(&s->list);
163 kfree(s);
164 }
165 }
166 up_write(&listeners->sem);
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700167}
168
Eric W. Biederman4bd6e322012-02-07 17:56:49 -0800169static void fill_stats(struct user_namespace *user_ns,
170 struct pid_namespace *pid_ns,
171 struct task_struct *tsk, struct taskstats *stats)
Shailabh Nagarc7572492006-07-14 00:24:40 -0700172{
Oleg Nesterov51de4d92006-12-06 20:36:54 -0800173 memset(stats, 0, sizeof(*stats));
Shailabh Nagarc7572492006-07-14 00:24:40 -0700174 /*
175 * Each accounting subsystem adds calls to its functions to
176 * fill in relevant parts of struct taskstsats as follows
177 *
Shailabh Nagar7d94ddd2006-07-30 03:03:10 -0700178 * per-task-foo(stats, tsk);
Shailabh Nagarc7572492006-07-14 00:24:40 -0700179 */
180
Shailabh Nagar7d94ddd2006-07-30 03:03:10 -0700181 delayacct_add_tsk(stats, tsk);
Jay Lanf3cef7a2006-09-30 23:28:55 -0700182
183 /* fill in basic acct fields */
Shailabh Nagar6f449932006-07-14 00:24:41 -0700184 stats->version = TASKSTATS_VERSION;
Maxim Uvarovb663a792007-07-15 23:40:48 -0700185 stats->nvcsw = tsk->nvcsw;
186 stats->nivcsw = tsk->nivcsw;
Eric W. Biederman4bd6e322012-02-07 17:56:49 -0800187 bacct_add_tsk(user_ns, pid_ns, stats, tsk);
Shailabh Nagar6f449932006-07-14 00:24:41 -0700188
Jay Lan9acc1852006-09-30 23:28:58 -0700189 /* fill in extended acct fields */
190 xacct_add_tsk(stats, tsk);
Shailabh Nagarc7572492006-07-14 00:24:40 -0700191}
192
Michael Holzheu3d9e0cf2010-10-27 15:34:44 -0700193static int fill_stats_for_pid(pid_t pid, struct taskstats *stats)
Shailabh Nagarc7572492006-07-14 00:24:40 -0700194{
Oleg Nesterova98b6092006-10-28 10:38:54 -0700195 struct task_struct *tsk;
Michael Holzheu3d9e0cf2010-10-27 15:34:44 -0700196
197 rcu_read_lock();
198 tsk = find_task_by_vpid(pid);
199 if (tsk)
200 get_task_struct(tsk);
201 rcu_read_unlock();
202 if (!tsk)
203 return -ESRCH;
Eric W. Biederman4bd6e322012-02-07 17:56:49 -0800204 fill_stats(current_user_ns(), task_active_pid_ns(current), tsk, stats);
Michael Holzheu3d9e0cf2010-10-27 15:34:44 -0700205 put_task_struct(tsk);
206 return 0;
207}
208
209static int fill_stats_for_tgid(pid_t tgid, struct taskstats *stats)
210{
211 struct task_struct *tsk, *first;
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700212 unsigned long flags;
Oleg Nesterova98b6092006-10-28 10:38:54 -0700213 int rc = -ESRCH;
Zhang Xiao8c733422017-05-08 15:56:45 -0700214 u64 delta, utime, stime;
215 u64 start_time;
Shailabh Nagarc7572492006-07-14 00:24:40 -0700216
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700217 /*
218 * Add additional stats from live tasks except zombie thread group
219 * leaders who are already counted with the dead tasks
220 */
Oleg Nesterova98b6092006-10-28 10:38:54 -0700221 rcu_read_lock();
Michael Holzheu3d9e0cf2010-10-27 15:34:44 -0700222 first = find_task_by_vpid(tgid);
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700223
Oleg Nesterova98b6092006-10-28 10:38:54 -0700224 if (!first || !lock_task_sighand(first, &flags))
225 goto out;
226
227 if (first->signal->stats)
228 memcpy(stats, first->signal->stats, sizeof(*stats));
Oleg Nesterov51de4d92006-12-06 20:36:54 -0800229 else
230 memset(stats, 0, sizeof(*stats));
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700231
Shailabh Nagarc7572492006-07-14 00:24:40 -0700232 tsk = first;
Zhang Xiao8c733422017-05-08 15:56:45 -0700233 start_time = ktime_get_ns();
Shailabh Nagarc7572492006-07-14 00:24:40 -0700234 do {
Oleg Nesterovd7c3f5f2006-10-28 10:38:54 -0700235 if (tsk->exit_state)
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700236 continue;
Shailabh Nagarc7572492006-07-14 00:24:40 -0700237 /*
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700238 * Accounting subsystem can call its functions here to
Shailabh Nagarc7572492006-07-14 00:24:40 -0700239 * fill in relevant parts of struct taskstsats as follows
240 *
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700241 * per-task-foo(stats, tsk);
Shailabh Nagarc7572492006-07-14 00:24:40 -0700242 */
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700243 delayacct_add_tsk(stats, tsk);
Shailabh Nagar6f449932006-07-14 00:24:41 -0700244
Zhang Xiao8c733422017-05-08 15:56:45 -0700245 /* calculate task elapsed time in nsec */
246 delta = start_time - tsk->start_time;
247 /* Convert to micro seconds */
248 do_div(delta, NSEC_PER_USEC);
249 stats->ac_etime += delta;
250
251 task_cputime(tsk, &utime, &stime);
252 stats->ac_utime += div_u64(utime, NSEC_PER_USEC);
253 stats->ac_stime += div_u64(stime, NSEC_PER_USEC);
254
Maxim Uvarovb663a792007-07-15 23:40:48 -0700255 stats->nvcsw += tsk->nvcsw;
256 stats->nivcsw += tsk->nivcsw;
Shailabh Nagarc7572492006-07-14 00:24:40 -0700257 } while_each_thread(first, tsk);
Shailabh Nagar6f449932006-07-14 00:24:41 -0700258
Oleg Nesterova98b6092006-10-28 10:38:54 -0700259 unlock_task_sighand(first, &flags);
260 rc = 0;
261out:
262 rcu_read_unlock();
263
264 stats->version = TASKSTATS_VERSION;
Shailabh Nagarc7572492006-07-14 00:24:40 -0700265 /*
Robert P. J. Day3a4fa0a2007-10-19 23:10:43 +0200266 * Accounting subsystems can also add calls here to modify
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700267 * fields of taskstats.
Shailabh Nagarc7572492006-07-14 00:24:40 -0700268 */
Oleg Nesterova98b6092006-10-28 10:38:54 -0700269 return rc;
Shailabh Nagarc7572492006-07-14 00:24:40 -0700270}
271
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700272static void fill_tgid_exit(struct task_struct *tsk)
273{
274 unsigned long flags;
275
Oleg Nesterovb8534d72006-10-28 10:38:53 -0700276 spin_lock_irqsave(&tsk->sighand->siglock, flags);
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700277 if (!tsk->signal->stats)
278 goto ret;
279
280 /*
281 * Each accounting subsystem calls its functions here to
282 * accumalate its per-task stats for tsk, into the per-tgid structure
283 *
284 * per-task-foo(tsk->signal->stats, tsk);
285 */
286 delayacct_add_tsk(tsk->signal->stats, tsk);
287ret:
Oleg Nesterovb8534d72006-10-28 10:38:53 -0700288 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700289 return;
290}
291
Rusty Russell41c7bb92009-01-01 10:12:28 +1030292static int add_del_listener(pid_t pid, const struct cpumask *mask, int isadd)
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700293{
294 struct listener_list *listeners;
Vasiliy Kulikov26c4cae2011-06-27 16:18:11 -0700295 struct listener *s, *tmp, *s2;
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700296 unsigned int cpu;
Chen Gang0d206332013-11-12 15:11:23 -0800297 int ret = 0;
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700298
Rusty Russell41c7bb92009-01-01 10:12:28 +1030299 if (!cpumask_subset(mask, cpu_possible_mask))
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700300 return -EINVAL;
301
Eric W. Biederman4bd6e322012-02-07 17:56:49 -0800302 if (current_user_ns() != &init_user_ns)
303 return -EINVAL;
304
305 if (task_active_pid_ns(current) != &init_pid_ns)
306 return -EINVAL;
307
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700308 if (isadd == REGISTER) {
Rusty Russell41c7bb92009-01-01 10:12:28 +1030309 for_each_cpu(cpu, mask) {
Oleg Nesterovdfc428b2011-08-03 16:21:04 -0700310 s = kmalloc_node(sizeof(struct listener),
311 GFP_KERNEL, cpu_to_node(cpu));
Chen Gang0d206332013-11-12 15:11:23 -0800312 if (!s) {
313 ret = -ENOMEM;
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700314 goto cleanup;
Chen Gang0d206332013-11-12 15:11:23 -0800315 }
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700316 s->pid = pid;
Shailabh Nagarbb129992006-07-14 00:24:47 -0700317 s->valid = 1;
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700318
319 listeners = &per_cpu(listener_array, cpu);
320 down_write(&listeners->sem);
Oleg Nesterovdfc428b2011-08-03 16:21:04 -0700321 list_for_each_entry(s2, &listeners->list, list) {
Oleg Nesterova7295892011-08-03 16:21:05 -0700322 if (s2->pid == pid && s2->valid)
Oleg Nesterovdfc428b2011-08-03 16:21:04 -0700323 goto exists;
Vasiliy Kulikov26c4cae2011-06-27 16:18:11 -0700324 }
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700325 list_add(&s->list, &listeners->list);
Vasiliy Kulikov26c4cae2011-06-27 16:18:11 -0700326 s = NULL;
Oleg Nesterovdfc428b2011-08-03 16:21:04 -0700327exists:
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700328 up_write(&listeners->sem);
Oleg Nesterovdfc428b2011-08-03 16:21:04 -0700329 kfree(s); /* nop if NULL */
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700330 }
331 return 0;
332 }
333
334 /* Deregister or cleanup */
335cleanup:
Rusty Russell41c7bb92009-01-01 10:12:28 +1030336 for_each_cpu(cpu, mask) {
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700337 listeners = &per_cpu(listener_array, cpu);
338 down_write(&listeners->sem);
339 list_for_each_entry_safe(s, tmp, &listeners->list, list) {
340 if (s->pid == pid) {
341 list_del(&s->list);
342 kfree(s);
343 break;
344 }
345 }
346 up_write(&listeners->sem);
347 }
Chen Gang0d206332013-11-12 15:11:23 -0800348 return ret;
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700349}
350
Rusty Russell41c7bb92009-01-01 10:12:28 +1030351static int parse(struct nlattr *na, struct cpumask *mask)
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700352{
353 char *data;
354 int len;
355 int ret;
356
357 if (na == NULL)
358 return 1;
359 len = nla_len(na);
360 if (len > TASKSTATS_CPUMASK_MAXLEN)
361 return -E2BIG;
362 if (len < 1)
363 return -EINVAL;
364 data = kmalloc(len, GFP_KERNEL);
365 if (!data)
366 return -ENOMEM;
367 nla_strlcpy(data, na, len);
Rusty Russell29c01772008-12-13 21:20:25 +1030368 ret = cpulist_parse(data, mask);
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700369 kfree(data);
370 return ret;
371}
372
Oleg Nesterov51de4d92006-12-06 20:36:54 -0800373static struct taskstats *mk_reply(struct sk_buff *skb, int type, u32 pid)
Oleg Nesterov68062b82006-12-06 20:36:53 -0800374{
Oleg Nesterov51de4d92006-12-06 20:36:54 -0800375 struct nlattr *na, *ret;
Oleg Nesterov68062b82006-12-06 20:36:53 -0800376 int aggr;
377
Oleg Nesterov371674852006-12-06 20:36:55 -0800378 aggr = (type == TASKSTATS_TYPE_PID)
379 ? TASKSTATS_TYPE_AGGR_PID
380 : TASKSTATS_TYPE_AGGR_TGID;
Oleg Nesterov68062b82006-12-06 20:36:53 -0800381
382 na = nla_nest_start(skb, aggr);
Oleg Nesterov371674852006-12-06 20:36:55 -0800383 if (!na)
384 goto err;
Jeff Mahoney4be2c952010-12-21 17:24:30 -0800385
Chen Gang3fa58262013-11-12 15:11:22 -0800386 if (nla_put(skb, type, sizeof(pid), &pid) < 0) {
387 nla_nest_cancel(skb, na);
Oleg Nesterov51de4d92006-12-06 20:36:54 -0800388 goto err;
Chen Gang3fa58262013-11-12 15:11:22 -0800389 }
Nicolas Dichtel80df5542016-04-22 17:31:24 +0200390 ret = nla_reserve_64bit(skb, TASKSTATS_TYPE_STATS,
391 sizeof(struct taskstats), TASKSTATS_TYPE_NULL);
Chen Gang3fa58262013-11-12 15:11:22 -0800392 if (!ret) {
393 nla_nest_cancel(skb, na);
Oleg Nesterov51de4d92006-12-06 20:36:54 -0800394 goto err;
Chen Gang3fa58262013-11-12 15:11:22 -0800395 }
Oleg Nesterov68062b82006-12-06 20:36:53 -0800396 nla_nest_end(skb, na);
397
Oleg Nesterov51de4d92006-12-06 20:36:54 -0800398 return nla_data(ret);
399err:
400 return NULL;
Oleg Nesterov68062b82006-12-06 20:36:53 -0800401}
402
Balbir Singh846c7bb2007-10-18 23:39:44 -0700403static int cgroupstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
404{
405 int rc = 0;
406 struct sk_buff *rep_skb;
407 struct cgroupstats *stats;
408 struct nlattr *na;
409 size_t size;
410 u32 fd;
Al Viro2903ff02012-08-28 12:52:22 -0400411 struct fd f;
Balbir Singh846c7bb2007-10-18 23:39:44 -0700412
413 na = info->attrs[CGROUPSTATS_CMD_ATTR_FD];
414 if (!na)
415 return -EINVAL;
416
417 fd = nla_get_u32(info->attrs[CGROUPSTATS_CMD_ATTR_FD]);
Al Viro2903ff02012-08-28 12:52:22 -0400418 f = fdget(fd);
419 if (!f.file)
Adrian Bunkf9615982007-11-14 17:00:37 -0800420 return 0;
Balbir Singh846c7bb2007-10-18 23:39:44 -0700421
Adrian Bunkf9615982007-11-14 17:00:37 -0800422 size = nla_total_size(sizeof(struct cgroupstats));
Balbir Singh846c7bb2007-10-18 23:39:44 -0700423
Adrian Bunkf9615982007-11-14 17:00:37 -0800424 rc = prepare_reply(info, CGROUPSTATS_CMD_NEW, &rep_skb,
425 size);
426 if (rc < 0)
427 goto err;
Balbir Singh846c7bb2007-10-18 23:39:44 -0700428
Adrian Bunkf9615982007-11-14 17:00:37 -0800429 na = nla_reserve(rep_skb, CGROUPSTATS_TYPE_CGROUP_STATS,
430 sizeof(struct cgroupstats));
Alan Cox25353b32012-07-30 14:42:49 -0700431 if (na == NULL) {
Jesper Juhl0324b5a2012-10-04 17:16:52 -0700432 nlmsg_free(rep_skb);
Alan Cox25353b32012-07-30 14:42:49 -0700433 rc = -EMSGSIZE;
434 goto err;
435 }
436
Adrian Bunkf9615982007-11-14 17:00:37 -0800437 stats = nla_data(na);
438 memset(stats, 0, sizeof(*stats));
Balbir Singh846c7bb2007-10-18 23:39:44 -0700439
Al Virob5830432014-10-31 01:22:04 -0400440 rc = cgroupstats_build(stats, f.file->f_path.dentry);
Adrian Bunkf9615982007-11-14 17:00:37 -0800441 if (rc < 0) {
442 nlmsg_free(rep_skb);
443 goto err;
Balbir Singh846c7bb2007-10-18 23:39:44 -0700444 }
445
Johannes Berg134e6372009-07-10 09:51:34 +0000446 rc = send_reply(rep_skb, info);
Adrian Bunkf9615982007-11-14 17:00:37 -0800447
Balbir Singh846c7bb2007-10-18 23:39:44 -0700448err:
Al Viro2903ff02012-08-28 12:52:22 -0400449 fdput(f);
Balbir Singh846c7bb2007-10-18 23:39:44 -0700450 return rc;
451}
452
Michael Holzheu93233122010-10-27 15:34:44 -0700453static int cmd_attr_register_cpumask(struct genl_info *info)
Shailabh Nagarc7572492006-07-14 00:24:40 -0700454{
Rusty Russell41c7bb92009-01-01 10:12:28 +1030455 cpumask_var_t mask;
Michael Holzheu93233122010-10-27 15:34:44 -0700456 int rc;
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700457
Rusty Russell41c7bb92009-01-01 10:12:28 +1030458 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
459 return -ENOMEM;
Rusty Russell41c7bb92009-01-01 10:12:28 +1030460 rc = parse(info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK], mask);
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700461 if (rc < 0)
Michael Holzheu93233122010-10-27 15:34:44 -0700462 goto out;
Eric W. Biederman15e47302012-09-07 20:12:54 +0000463 rc = add_del_listener(info->snd_portid, mask, REGISTER);
Michael Holzheu93233122010-10-27 15:34:44 -0700464out:
465 free_cpumask_var(mask);
466 return rc;
467}
Rusty Russell41c7bb92009-01-01 10:12:28 +1030468
Michael Holzheu93233122010-10-27 15:34:44 -0700469static int cmd_attr_deregister_cpumask(struct genl_info *info)
470{
471 cpumask_var_t mask;
472 int rc;
473
474 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
475 return -ENOMEM;
Rusty Russell41c7bb92009-01-01 10:12:28 +1030476 rc = parse(info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK], mask);
477 if (rc < 0)
Michael Holzheu93233122010-10-27 15:34:44 -0700478 goto out;
Eric W. Biederman15e47302012-09-07 20:12:54 +0000479 rc = add_del_listener(info->snd_portid, mask, DEREGISTER);
Michael Holzheu93233122010-10-27 15:34:44 -0700480out:
Rusty Russell41c7bb92009-01-01 10:12:28 +1030481 free_cpumask_var(mask);
Michael Holzheu93233122010-10-27 15:34:44 -0700482 return rc;
483}
Shailabh Nagarc7572492006-07-14 00:24:40 -0700484
Jeff Mahoney4be2c952010-12-21 17:24:30 -0800485static size_t taskstats_packet_size(void)
486{
487 size_t size;
488
489 size = nla_total_size(sizeof(u32)) +
Nicolas Dichtel80df5542016-04-22 17:31:24 +0200490 nla_total_size_64bit(sizeof(struct taskstats)) +
491 nla_total_size(0);
492
Jeff Mahoney4be2c952010-12-21 17:24:30 -0800493 return size;
494}
495
Michael Holzheu93233122010-10-27 15:34:44 -0700496static int cmd_attr_pid(struct genl_info *info)
497{
498 struct taskstats *stats;
499 struct sk_buff *rep_skb;
500 size_t size;
501 u32 pid;
502 int rc;
503
Jeff Mahoney4be2c952010-12-21 17:24:30 -0800504 size = taskstats_packet_size();
Shailabh Nagarc7572492006-07-14 00:24:40 -0700505
Oleg Nesterov371674852006-12-06 20:36:55 -0800506 rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size);
Shailabh Nagarc7572492006-07-14 00:24:40 -0700507 if (rc < 0)
508 return rc;
509
Oleg Nesterov51de4d92006-12-06 20:36:54 -0800510 rc = -EINVAL;
Michael Holzheu93233122010-10-27 15:34:44 -0700511 pid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_PID]);
512 stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID, pid);
513 if (!stats)
Shailabh Nagarc7572492006-07-14 00:24:40 -0700514 goto err;
Shailabh Nagarc7572492006-07-14 00:24:40 -0700515
Michael Holzheu3d9e0cf2010-10-27 15:34:44 -0700516 rc = fill_stats_for_pid(pid, stats);
Michael Holzheu93233122010-10-27 15:34:44 -0700517 if (rc < 0)
518 goto err;
Johannes Berg134e6372009-07-10 09:51:34 +0000519 return send_reply(rep_skb, info);
Shailabh Nagarc7572492006-07-14 00:24:40 -0700520err:
521 nlmsg_free(rep_skb);
522 return rc;
523}
524
Michael Holzheu93233122010-10-27 15:34:44 -0700525static int cmd_attr_tgid(struct genl_info *info)
526{
527 struct taskstats *stats;
528 struct sk_buff *rep_skb;
529 size_t size;
530 u32 tgid;
531 int rc;
532
Jeff Mahoney4be2c952010-12-21 17:24:30 -0800533 size = taskstats_packet_size();
Michael Holzheu93233122010-10-27 15:34:44 -0700534
535 rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size);
536 if (rc < 0)
537 return rc;
538
539 rc = -EINVAL;
540 tgid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_TGID]);
541 stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID, tgid);
542 if (!stats)
543 goto err;
544
Michael Holzheu3d9e0cf2010-10-27 15:34:44 -0700545 rc = fill_stats_for_tgid(tgid, stats);
Michael Holzheu93233122010-10-27 15:34:44 -0700546 if (rc < 0)
547 goto err;
548 return send_reply(rep_skb, info);
549err:
550 nlmsg_free(rep_skb);
551 return rc;
552}
553
554static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
555{
556 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
557 return cmd_attr_register_cpumask(info);
558 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
559 return cmd_attr_deregister_cpumask(info);
560 else if (info->attrs[TASKSTATS_CMD_ATTR_PID])
561 return cmd_attr_pid(info);
562 else if (info->attrs[TASKSTATS_CMD_ATTR_TGID])
563 return cmd_attr_tgid(info);
564 else
565 return -EINVAL;
566}
567
Oleg Nesterov34ec12342006-12-06 20:36:52 -0800568static struct taskstats *taskstats_tgid_alloc(struct task_struct *tsk)
569{
570 struct signal_struct *sig = tsk->signal;
571 struct taskstats *stats;
572
573 if (sig->stats || thread_group_empty(tsk))
574 goto ret;
575
576 /* No problem if kmem_cache_zalloc() fails */
577 stats = kmem_cache_zalloc(taskstats_cache, GFP_KERNEL);
578
579 spin_lock_irq(&tsk->sighand->siglock);
580 if (!sig->stats) {
581 sig->stats = stats;
582 stats = NULL;
583 }
584 spin_unlock_irq(&tsk->sighand->siglock);
585
586 if (stats)
587 kmem_cache_free(taskstats_cache, stats);
588ret:
589 return sig->stats;
590}
591
Shailabh Nagarc7572492006-07-14 00:24:40 -0700592/* Send pid data out on exit */
Oleg Nesterov115085e2006-12-06 20:36:51 -0800593void taskstats_exit(struct task_struct *tsk, int group_dead)
Shailabh Nagarc7572492006-07-14 00:24:40 -0700594{
595 int rc;
Oleg Nesterov115085e2006-12-06 20:36:51 -0800596 struct listener_list *listeners;
Oleg Nesterov51de4d92006-12-06 20:36:54 -0800597 struct taskstats *stats;
Shailabh Nagarc7572492006-07-14 00:24:40 -0700598 struct sk_buff *rep_skb;
Shailabh Nagarc7572492006-07-14 00:24:40 -0700599 size_t size;
600 int is_thread_group;
Shailabh Nagarc7572492006-07-14 00:24:40 -0700601
Oleg Nesterov4a279ff2006-10-30 22:07:15 -0800602 if (!family_registered)
Shailabh Nagarc7572492006-07-14 00:24:40 -0700603 return;
604
Shailabh Nagarc7572492006-07-14 00:24:40 -0700605 /*
606 * Size includes space for nested attributes
607 */
Jeff Mahoney4be2c952010-12-21 17:24:30 -0800608 size = taskstats_packet_size();
Shailabh Nagarc7572492006-07-14 00:24:40 -0700609
Oleg Nesterov34ec12342006-12-06 20:36:52 -0800610 is_thread_group = !!taskstats_tgid_alloc(tsk);
Oleg Nesterov4a279ff2006-10-30 22:07:15 -0800611 if (is_thread_group) {
612 /* PID + STATS + TGID + STATS */
613 size = 2 * size;
614 /* fill the tsk->signal->stats structure */
615 fill_tgid_exit(tsk);
616 }
617
Christoph Lameter4a32fea2014-08-17 12:30:27 -0500618 listeners = raw_cpu_ptr(&listener_array);
Oleg Nesterov115085e2006-12-06 20:36:51 -0800619 if (list_empty(&listeners->list))
620 return;
621
Oleg Nesterov371674852006-12-06 20:36:55 -0800622 rc = prepare_reply(NULL, TASKSTATS_CMD_NEW, &rep_skb, size);
Shailabh Nagarc7572492006-07-14 00:24:40 -0700623 if (rc < 0)
Oleg Nesterov51de4d92006-12-06 20:36:54 -0800624 return;
Shailabh Nagarc7572492006-07-14 00:24:40 -0700625
Eric W. Biederman4bd6e322012-02-07 17:56:49 -0800626 stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID,
627 task_pid_nr_ns(tsk, &init_pid_ns));
Oleg Nesterov51de4d92006-12-06 20:36:54 -0800628 if (!stats)
Oleg Nesterov371674852006-12-06 20:36:55 -0800629 goto err;
Oleg Nesterov51de4d92006-12-06 20:36:54 -0800630
Eric W. Biederman4bd6e322012-02-07 17:56:49 -0800631 fill_stats(&init_user_ns, &init_pid_ns, tsk, stats);
Shailabh Nagarc7572492006-07-14 00:24:40 -0700632
Shailabh Nagarc7572492006-07-14 00:24:40 -0700633 /*
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700634 * Doesn't matter if tsk is the leader or the last group member leaving
Shailabh Nagarc7572492006-07-14 00:24:40 -0700635 */
Oleg Nesterov68062b82006-12-06 20:36:53 -0800636 if (!is_thread_group || !group_dead)
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700637 goto send;
Shailabh Nagarc7572492006-07-14 00:24:40 -0700638
Eric W. Biederman4bd6e322012-02-07 17:56:49 -0800639 stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID,
640 task_tgid_nr_ns(tsk, &init_pid_ns));
Oleg Nesterov51de4d92006-12-06 20:36:54 -0800641 if (!stats)
Oleg Nesterov371674852006-12-06 20:36:55 -0800642 goto err;
Oleg Nesterov51de4d92006-12-06 20:36:54 -0800643
644 memcpy(stats, tsk->signal->stats, sizeof(*stats));
Shailabh Nagarc7572492006-07-14 00:24:40 -0700645
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700646send:
Oleg Nesterov115085e2006-12-06 20:36:51 -0800647 send_cpu_listeners(rep_skb, listeners);
Shailabh Nagarad4ecbc2006-07-14 00:24:44 -0700648 return;
Oleg Nesterov371674852006-12-06 20:36:55 -0800649err:
Shailabh Nagarc7572492006-07-14 00:24:40 -0700650 nlmsg_free(rep_skb);
Shailabh Nagarc7572492006-07-14 00:24:40 -0700651}
652
Johannes Berg4534de82013-11-14 17:14:46 +0100653static const struct genl_ops taskstats_ops[] = {
Johannes Berg88d36a92013-11-14 17:14:39 +0100654 {
655 .cmd = TASKSTATS_CMD_GET,
656 .doit = taskstats_user_cmd,
657 .policy = taskstats_cmd_get_policy,
658 .flags = GENL_ADMIN_PERM,
659 },
660 {
661 .cmd = CGROUPSTATS_CMD_GET,
662 .doit = cgroupstats_user_cmd,
663 .policy = cgroupstats_cmd_get_policy,
664 },
Balbir Singh846c7bb2007-10-18 23:39:44 -0700665};
666
Johannes Berg56989f62016-10-24 14:40:05 +0200667static struct genl_family family __ro_after_init = {
Johannes Berg489111e2016-10-24 14:40:03 +0200668 .name = TASKSTATS_GENL_NAME,
669 .version = TASKSTATS_GENL_VERSION,
670 .maxattr = TASKSTATS_CMD_ATTR_MAX,
671 .module = THIS_MODULE,
672 .ops = taskstats_ops,
673 .n_ops = ARRAY_SIZE(taskstats_ops),
674};
675
Shailabh Nagarc7572492006-07-14 00:24:40 -0700676/* Needed early in initialization */
677void __init taskstats_init_early(void)
678{
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700679 unsigned int i;
680
Christoph Lameter0a31bd52007-05-06 14:49:57 -0700681 taskstats_cache = KMEM_CACHE(taskstats, SLAB_PANIC);
Shailabh Nagarf9fd8912006-07-14 00:24:47 -0700682 for_each_possible_cpu(i) {
683 INIT_LIST_HEAD(&(per_cpu(listener_array, i).list));
684 init_rwsem(&(per_cpu(listener_array, i).sem));
685 }
Shailabh Nagarc7572492006-07-14 00:24:40 -0700686}
687
688static int __init taskstats_init(void)
689{
690 int rc;
691
Johannes Berg489111e2016-10-24 14:40:03 +0200692 rc = genl_register_family(&family);
Shailabh Nagarc7572492006-07-14 00:24:40 -0700693 if (rc)
694 return rc;
695
Shailabh Nagarc7572492006-07-14 00:24:40 -0700696 family_registered = 1;
Mandeep Singh Bainesf9b182e2011-03-23 16:43:27 -0700697 pr_info("registered taskstats version %d\n", TASKSTATS_GENL_VERSION);
Shailabh Nagarc7572492006-07-14 00:24:40 -0700698 return 0;
Shailabh Nagarc7572492006-07-14 00:24:40 -0700699}
700
701/*
702 * late initcall ensures initialization of statistics collection
703 * mechanisms precedes initialization of the taskstats interface
704 */
705late_initcall(taskstats_init);