Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 1 | /* |
| 2 | * taskstats.c - Export per-task statistics to userland |
| 3 | * |
| 4 | * Copyright (C) Shailabh Nagar, IBM Corp. 2006 |
| 5 | * (C) Balbir Singh, IBM Corp. 2006 |
| 6 | * |
| 7 | * This program is free software; you can redistribute it and/or modify |
| 8 | * it under the terms of the GNU General Public License as published by |
| 9 | * the Free Software Foundation; either version 2 of the License, or |
| 10 | * (at your option) any later version. |
| 11 | * |
| 12 | * This program is distributed in the hope that it will be useful, |
| 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 15 | * GNU General Public License for more details. |
| 16 | * |
| 17 | */ |
| 18 | |
| 19 | #include <linux/kernel.h> |
| 20 | #include <linux/taskstats_kern.h> |
Jay Lan | f3cef7a | 2006-09-30 23:28:55 -0700 | [diff] [blame] | 21 | #include <linux/tsacct_kern.h> |
Shailabh Nagar | 6f44993 | 2006-07-14 00:24:41 -0700 | [diff] [blame] | 22 | #include <linux/delayacct.h> |
Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 23 | #include <linux/cpumask.h> |
| 24 | #include <linux/percpu.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 25 | #include <linux/slab.h> |
Balbir Singh | 846c7bb | 2007-10-18 23:39:44 -0700 | [diff] [blame] | 26 | #include <linux/cgroupstats.h> |
| 27 | #include <linux/cgroup.h> |
| 28 | #include <linux/fs.h> |
| 29 | #include <linux/file.h> |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 30 | #include <net/genetlink.h> |
| 31 | #include <asm/atomic.h> |
| 32 | |
Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 33 | /* |
| 34 | * Maximum length of a cpumask that can be specified in |
| 35 | * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute |
| 36 | */ |
| 37 | #define TASKSTATS_CPUMASK_MAXLEN (100+6*NR_CPUS) |
| 38 | |
Vegard Nossum | b81f3ea | 2008-07-25 01:48:55 -0700 | [diff] [blame] | 39 | static DEFINE_PER_CPU(__u32, taskstats_seqnum); |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 40 | static int family_registered; |
Christoph Lameter | e18b890 | 2006-12-06 20:33:20 -0800 | [diff] [blame] | 41 | struct kmem_cache *taskstats_cache; |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 42 | |
| 43 | static struct genl_family family = { |
| 44 | .id = GENL_ID_GENERATE, |
| 45 | .name = TASKSTATS_GENL_NAME, |
| 46 | .version = TASKSTATS_GENL_VERSION, |
| 47 | .maxattr = TASKSTATS_CMD_ATTR_MAX, |
| 48 | }; |
| 49 | |
Alexey Dobriyan | b54452b | 2010-02-18 08:14:31 +0000 | [diff] [blame] | 50 | static const struct nla_policy taskstats_cmd_get_policy[TASKSTATS_CMD_ATTR_MAX+1] = { |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 51 | [TASKSTATS_CMD_ATTR_PID] = { .type = NLA_U32 }, |
| 52 | [TASKSTATS_CMD_ATTR_TGID] = { .type = NLA_U32 }, |
Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 53 | [TASKSTATS_CMD_ATTR_REGISTER_CPUMASK] = { .type = NLA_STRING }, |
| 54 | [TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK] = { .type = NLA_STRING },}; |
| 55 | |
Alexey Dobriyan | b54452b | 2010-02-18 08:14:31 +0000 | [diff] [blame] | 56 | static const struct nla_policy cgroupstats_cmd_get_policy[CGROUPSTATS_CMD_ATTR_MAX+1] = { |
Balbir Singh | 846c7bb | 2007-10-18 23:39:44 -0700 | [diff] [blame] | 57 | [CGROUPSTATS_CMD_ATTR_FD] = { .type = NLA_U32 }, |
| 58 | }; |
| 59 | |
Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 60 | struct listener { |
| 61 | struct list_head list; |
| 62 | pid_t pid; |
Shailabh Nagar | bb12999 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 63 | char valid; |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 64 | }; |
| 65 | |
Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 66 | struct listener_list { |
| 67 | struct rw_semaphore sem; |
| 68 | struct list_head list; |
| 69 | }; |
| 70 | static DEFINE_PER_CPU(struct listener_list, listener_array); |
| 71 | |
| 72 | enum actions { |
| 73 | REGISTER, |
| 74 | DEREGISTER, |
| 75 | CPU_DONT_CARE |
| 76 | }; |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 77 | |
| 78 | static int prepare_reply(struct genl_info *info, u8 cmd, struct sk_buff **skbp, |
Oleg Nesterov | 37167485 | 2006-12-06 20:36:55 -0800 | [diff] [blame] | 79 | size_t size) |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 80 | { |
| 81 | struct sk_buff *skb; |
| 82 | void *reply; |
| 83 | |
| 84 | /* |
| 85 | * If new attributes are added, please revisit this allocation |
| 86 | */ |
Thomas Graf | 3dabc71 | 2006-11-14 19:44:52 -0800 | [diff] [blame] | 87 | skb = genlmsg_new(size, GFP_KERNEL); |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 88 | if (!skb) |
| 89 | return -ENOMEM; |
| 90 | |
| 91 | if (!info) { |
Christoph Lameter | cd85fc5 | 2010-12-08 17:42:22 +0100 | [diff] [blame] | 92 | int seq = this_cpu_inc_return(taskstats_seqnum) - 1; |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 93 | |
Thomas Graf | 17c157c | 2006-11-14 19:46:02 -0800 | [diff] [blame] | 94 | reply = genlmsg_put(skb, 0, seq, &family, 0, cmd); |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 95 | } else |
Thomas Graf | 17c157c | 2006-11-14 19:46:02 -0800 | [diff] [blame] | 96 | reply = genlmsg_put_reply(skb, info, &family, 0, cmd); |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 97 | if (reply == NULL) { |
| 98 | nlmsg_free(skb); |
| 99 | return -EINVAL; |
| 100 | } |
| 101 | |
| 102 | *skbp = skb; |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 103 | return 0; |
| 104 | } |
| 105 | |
Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 106 | /* |
| 107 | * Send taskstats data in @skb to listener with nl_pid @pid |
| 108 | */ |
Johannes Berg | 134e637 | 2009-07-10 09:51:34 +0000 | [diff] [blame] | 109 | static int send_reply(struct sk_buff *skb, struct genl_info *info) |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 110 | { |
Arnaldo Carvalho de Melo | b529ccf | 2007-04-25 19:08:35 -0700 | [diff] [blame] | 111 | struct genlmsghdr *genlhdr = nlmsg_data(nlmsg_hdr(skb)); |
Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 112 | void *reply = genlmsg_data(genlhdr); |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 113 | int rc; |
| 114 | |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 115 | rc = genlmsg_end(skb, reply); |
| 116 | if (rc < 0) { |
| 117 | nlmsg_free(skb); |
| 118 | return rc; |
| 119 | } |
| 120 | |
Johannes Berg | 134e637 | 2009-07-10 09:51:34 +0000 | [diff] [blame] | 121 | return genlmsg_reply(skb, info); |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 122 | } |
| 123 | |
Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 124 | /* |
| 125 | * Send taskstats data in @skb to listeners registered for @cpu's exit data |
| 126 | */ |
Oleg Nesterov | 115085e | 2006-12-06 20:36:51 -0800 | [diff] [blame] | 127 | static void send_cpu_listeners(struct sk_buff *skb, |
| 128 | struct listener_list *listeners) |
Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 129 | { |
Arnaldo Carvalho de Melo | b529ccf | 2007-04-25 19:08:35 -0700 | [diff] [blame] | 130 | struct genlmsghdr *genlhdr = nlmsg_data(nlmsg_hdr(skb)); |
Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 131 | struct listener *s, *tmp; |
| 132 | struct sk_buff *skb_next, *skb_cur = skb; |
| 133 | void *reply = genlmsg_data(genlhdr); |
Shailabh Nagar | d94a041 | 2006-07-30 03:03:11 -0700 | [diff] [blame] | 134 | int rc, delcount = 0; |
Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 135 | |
| 136 | rc = genlmsg_end(skb, reply); |
| 137 | if (rc < 0) { |
| 138 | nlmsg_free(skb); |
Shailabh Nagar | d94a041 | 2006-07-30 03:03:11 -0700 | [diff] [blame] | 139 | return; |
Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 140 | } |
| 141 | |
| 142 | rc = 0; |
Shailabh Nagar | bb12999 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 143 | down_read(&listeners->sem); |
Shailabh Nagar | d94a041 | 2006-07-30 03:03:11 -0700 | [diff] [blame] | 144 | list_for_each_entry(s, &listeners->list, list) { |
Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 145 | skb_next = NULL; |
| 146 | if (!list_is_last(&s->list, &listeners->list)) { |
| 147 | skb_next = skb_clone(skb_cur, GFP_KERNEL); |
Shailabh Nagar | d94a041 | 2006-07-30 03:03:11 -0700 | [diff] [blame] | 148 | if (!skb_next) |
Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 149 | break; |
Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 150 | } |
Johannes Berg | 134e637 | 2009-07-10 09:51:34 +0000 | [diff] [blame] | 151 | rc = genlmsg_unicast(&init_net, skb_cur, s->pid); |
Shailabh Nagar | d94a041 | 2006-07-30 03:03:11 -0700 | [diff] [blame] | 152 | if (rc == -ECONNREFUSED) { |
Shailabh Nagar | bb12999 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 153 | s->valid = 0; |
| 154 | delcount++; |
Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 155 | } |
| 156 | skb_cur = skb_next; |
| 157 | } |
Shailabh Nagar | bb12999 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 158 | up_read(&listeners->sem); |
Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 159 | |
Shailabh Nagar | d94a041 | 2006-07-30 03:03:11 -0700 | [diff] [blame] | 160 | if (skb_cur) |
| 161 | nlmsg_free(skb_cur); |
| 162 | |
Shailabh Nagar | bb12999 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 163 | if (!delcount) |
Shailabh Nagar | d94a041 | 2006-07-30 03:03:11 -0700 | [diff] [blame] | 164 | return; |
Shailabh Nagar | bb12999 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 165 | |
| 166 | /* Delete invalidated entries */ |
| 167 | down_write(&listeners->sem); |
| 168 | list_for_each_entry_safe(s, tmp, &listeners->list, list) { |
| 169 | if (!s->valid) { |
| 170 | list_del(&s->list); |
| 171 | kfree(s); |
| 172 | } |
| 173 | } |
| 174 | up_write(&listeners->sem); |
Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 175 | } |
| 176 | |
Michael Holzheu | 3d9e0cf | 2010-10-27 15:34:44 -0700 | [diff] [blame] | 177 | static void fill_stats(struct task_struct *tsk, struct taskstats *stats) |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 178 | { |
Oleg Nesterov | 51de4d9 | 2006-12-06 20:36:54 -0800 | [diff] [blame] | 179 | memset(stats, 0, sizeof(*stats)); |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 180 | /* |
| 181 | * Each accounting subsystem adds calls to its functions to |
| 182 | * fill in relevant parts of struct taskstsats as follows |
| 183 | * |
Shailabh Nagar | 7d94ddd | 2006-07-30 03:03:10 -0700 | [diff] [blame] | 184 | * per-task-foo(stats, tsk); |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 185 | */ |
| 186 | |
Shailabh Nagar | 7d94ddd | 2006-07-30 03:03:10 -0700 | [diff] [blame] | 187 | delayacct_add_tsk(stats, tsk); |
Jay Lan | f3cef7a | 2006-09-30 23:28:55 -0700 | [diff] [blame] | 188 | |
| 189 | /* fill in basic acct fields */ |
Shailabh Nagar | 6f44993 | 2006-07-14 00:24:41 -0700 | [diff] [blame] | 190 | stats->version = TASKSTATS_VERSION; |
Maxim Uvarov | b663a79 | 2007-07-15 23:40:48 -0700 | [diff] [blame] | 191 | stats->nvcsw = tsk->nvcsw; |
| 192 | stats->nivcsw = tsk->nivcsw; |
Jay Lan | f3cef7a | 2006-09-30 23:28:55 -0700 | [diff] [blame] | 193 | bacct_add_tsk(stats, tsk); |
Shailabh Nagar | 6f44993 | 2006-07-14 00:24:41 -0700 | [diff] [blame] | 194 | |
Jay Lan | 9acc185 | 2006-09-30 23:28:58 -0700 | [diff] [blame] | 195 | /* fill in extended acct fields */ |
| 196 | xacct_add_tsk(stats, tsk); |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 197 | } |
| 198 | |
Michael Holzheu | 3d9e0cf | 2010-10-27 15:34:44 -0700 | [diff] [blame] | 199 | static int fill_stats_for_pid(pid_t pid, struct taskstats *stats) |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 200 | { |
Oleg Nesterov | a98b609 | 2006-10-28 10:38:54 -0700 | [diff] [blame] | 201 | struct task_struct *tsk; |
Michael Holzheu | 3d9e0cf | 2010-10-27 15:34:44 -0700 | [diff] [blame] | 202 | |
| 203 | rcu_read_lock(); |
| 204 | tsk = find_task_by_vpid(pid); |
| 205 | if (tsk) |
| 206 | get_task_struct(tsk); |
| 207 | rcu_read_unlock(); |
| 208 | if (!tsk) |
| 209 | return -ESRCH; |
| 210 | fill_stats(tsk, stats); |
| 211 | put_task_struct(tsk); |
| 212 | return 0; |
| 213 | } |
| 214 | |
| 215 | static int fill_stats_for_tgid(pid_t tgid, struct taskstats *stats) |
| 216 | { |
| 217 | struct task_struct *tsk, *first; |
Shailabh Nagar | ad4ecbc | 2006-07-14 00:24:44 -0700 | [diff] [blame] | 218 | unsigned long flags; |
Oleg Nesterov | a98b609 | 2006-10-28 10:38:54 -0700 | [diff] [blame] | 219 | int rc = -ESRCH; |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 220 | |
Shailabh Nagar | ad4ecbc | 2006-07-14 00:24:44 -0700 | [diff] [blame] | 221 | /* |
| 222 | * Add additional stats from live tasks except zombie thread group |
| 223 | * leaders who are already counted with the dead tasks |
| 224 | */ |
Oleg Nesterov | a98b609 | 2006-10-28 10:38:54 -0700 | [diff] [blame] | 225 | rcu_read_lock(); |
Michael Holzheu | 3d9e0cf | 2010-10-27 15:34:44 -0700 | [diff] [blame] | 226 | first = find_task_by_vpid(tgid); |
Shailabh Nagar | ad4ecbc | 2006-07-14 00:24:44 -0700 | [diff] [blame] | 227 | |
Oleg Nesterov | a98b609 | 2006-10-28 10:38:54 -0700 | [diff] [blame] | 228 | if (!first || !lock_task_sighand(first, &flags)) |
| 229 | goto out; |
| 230 | |
| 231 | if (first->signal->stats) |
| 232 | memcpy(stats, first->signal->stats, sizeof(*stats)); |
Oleg Nesterov | 51de4d9 | 2006-12-06 20:36:54 -0800 | [diff] [blame] | 233 | else |
| 234 | memset(stats, 0, sizeof(*stats)); |
Shailabh Nagar | ad4ecbc | 2006-07-14 00:24:44 -0700 | [diff] [blame] | 235 | |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 236 | tsk = first; |
| 237 | do { |
Oleg Nesterov | d7c3f5f | 2006-10-28 10:38:54 -0700 | [diff] [blame] | 238 | if (tsk->exit_state) |
Shailabh Nagar | ad4ecbc | 2006-07-14 00:24:44 -0700 | [diff] [blame] | 239 | continue; |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 240 | /* |
Shailabh Nagar | ad4ecbc | 2006-07-14 00:24:44 -0700 | [diff] [blame] | 241 | * Accounting subsystem can call its functions here to |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 242 | * fill in relevant parts of struct taskstsats as follows |
| 243 | * |
Shailabh Nagar | ad4ecbc | 2006-07-14 00:24:44 -0700 | [diff] [blame] | 244 | * per-task-foo(stats, tsk); |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 245 | */ |
Shailabh Nagar | ad4ecbc | 2006-07-14 00:24:44 -0700 | [diff] [blame] | 246 | delayacct_add_tsk(stats, tsk); |
Shailabh Nagar | 6f44993 | 2006-07-14 00:24:41 -0700 | [diff] [blame] | 247 | |
Maxim Uvarov | b663a79 | 2007-07-15 23:40:48 -0700 | [diff] [blame] | 248 | stats->nvcsw += tsk->nvcsw; |
| 249 | stats->nivcsw += tsk->nivcsw; |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 250 | } while_each_thread(first, tsk); |
Shailabh Nagar | 6f44993 | 2006-07-14 00:24:41 -0700 | [diff] [blame] | 251 | |
Oleg Nesterov | a98b609 | 2006-10-28 10:38:54 -0700 | [diff] [blame] | 252 | unlock_task_sighand(first, &flags); |
| 253 | rc = 0; |
| 254 | out: |
| 255 | rcu_read_unlock(); |
| 256 | |
| 257 | stats->version = TASKSTATS_VERSION; |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 258 | /* |
Robert P. J. Day | 3a4fa0a | 2007-10-19 23:10:43 +0200 | [diff] [blame] | 259 | * Accounting subsystems can also add calls here to modify |
Shailabh Nagar | ad4ecbc | 2006-07-14 00:24:44 -0700 | [diff] [blame] | 260 | * fields of taskstats. |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 261 | */ |
Oleg Nesterov | a98b609 | 2006-10-28 10:38:54 -0700 | [diff] [blame] | 262 | return rc; |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 263 | } |
| 264 | |
Shailabh Nagar | ad4ecbc | 2006-07-14 00:24:44 -0700 | [diff] [blame] | 265 | static void fill_tgid_exit(struct task_struct *tsk) |
| 266 | { |
| 267 | unsigned long flags; |
| 268 | |
Oleg Nesterov | b8534d7 | 2006-10-28 10:38:53 -0700 | [diff] [blame] | 269 | spin_lock_irqsave(&tsk->sighand->siglock, flags); |
Shailabh Nagar | ad4ecbc | 2006-07-14 00:24:44 -0700 | [diff] [blame] | 270 | if (!tsk->signal->stats) |
| 271 | goto ret; |
| 272 | |
| 273 | /* |
| 274 | * Each accounting subsystem calls its functions here to |
| 275 | * accumalate its per-task stats for tsk, into the per-tgid structure |
| 276 | * |
| 277 | * per-task-foo(tsk->signal->stats, tsk); |
| 278 | */ |
| 279 | delayacct_add_tsk(tsk->signal->stats, tsk); |
| 280 | ret: |
Oleg Nesterov | b8534d7 | 2006-10-28 10:38:53 -0700 | [diff] [blame] | 281 | spin_unlock_irqrestore(&tsk->sighand->siglock, flags); |
Shailabh Nagar | ad4ecbc | 2006-07-14 00:24:44 -0700 | [diff] [blame] | 282 | return; |
| 283 | } |
| 284 | |
Rusty Russell | 41c7bb9 | 2009-01-01 10:12:28 +1030 | [diff] [blame] | 285 | static int add_del_listener(pid_t pid, const struct cpumask *mask, int isadd) |
Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 286 | { |
| 287 | struct listener_list *listeners; |
| 288 | struct listener *s, *tmp; |
| 289 | unsigned int cpu; |
Shailabh Nagar | ad4ecbc | 2006-07-14 00:24:44 -0700 | [diff] [blame] | 290 | |
Rusty Russell | 41c7bb9 | 2009-01-01 10:12:28 +1030 | [diff] [blame] | 291 | if (!cpumask_subset(mask, cpu_possible_mask)) |
Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 292 | return -EINVAL; |
| 293 | |
| 294 | if (isadd == REGISTER) { |
Rusty Russell | 41c7bb9 | 2009-01-01 10:12:28 +1030 | [diff] [blame] | 295 | for_each_cpu(cpu, mask) { |
Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 296 | s = kmalloc_node(sizeof(struct listener), GFP_KERNEL, |
| 297 | cpu_to_node(cpu)); |
| 298 | if (!s) |
| 299 | goto cleanup; |
| 300 | s->pid = pid; |
| 301 | INIT_LIST_HEAD(&s->list); |
Shailabh Nagar | bb12999 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 302 | s->valid = 1; |
Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 303 | |
| 304 | listeners = &per_cpu(listener_array, cpu); |
| 305 | down_write(&listeners->sem); |
| 306 | list_add(&s->list, &listeners->list); |
| 307 | up_write(&listeners->sem); |
| 308 | } |
| 309 | return 0; |
| 310 | } |
| 311 | |
| 312 | /* Deregister or cleanup */ |
| 313 | cleanup: |
Rusty Russell | 41c7bb9 | 2009-01-01 10:12:28 +1030 | [diff] [blame] | 314 | for_each_cpu(cpu, mask) { |
Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 315 | listeners = &per_cpu(listener_array, cpu); |
| 316 | down_write(&listeners->sem); |
| 317 | list_for_each_entry_safe(s, tmp, &listeners->list, list) { |
| 318 | if (s->pid == pid) { |
| 319 | list_del(&s->list); |
| 320 | kfree(s); |
| 321 | break; |
| 322 | } |
| 323 | } |
| 324 | up_write(&listeners->sem); |
| 325 | } |
| 326 | return 0; |
| 327 | } |
| 328 | |
Rusty Russell | 41c7bb9 | 2009-01-01 10:12:28 +1030 | [diff] [blame] | 329 | static int parse(struct nlattr *na, struct cpumask *mask) |
Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 330 | { |
| 331 | char *data; |
| 332 | int len; |
| 333 | int ret; |
| 334 | |
| 335 | if (na == NULL) |
| 336 | return 1; |
| 337 | len = nla_len(na); |
| 338 | if (len > TASKSTATS_CPUMASK_MAXLEN) |
| 339 | return -E2BIG; |
| 340 | if (len < 1) |
| 341 | return -EINVAL; |
| 342 | data = kmalloc(len, GFP_KERNEL); |
| 343 | if (!data) |
| 344 | return -ENOMEM; |
| 345 | nla_strlcpy(data, na, len); |
Rusty Russell | 29c0177 | 2008-12-13 21:20:25 +1030 | [diff] [blame] | 346 | ret = cpulist_parse(data, mask); |
Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 347 | kfree(data); |
| 348 | return ret; |
| 349 | } |
| 350 | |
Jeff Mahoney | 9ab020c | 2011-01-12 17:00:48 -0800 | [diff] [blame] | 351 | #if defined(CONFIG_64BIT) && !defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) |
Jeff Mahoney | 4be2c95 | 2010-12-21 17:24:30 -0800 | [diff] [blame] | 352 | #define TASKSTATS_NEEDS_PADDING 1 |
| 353 | #endif |
| 354 | |
Oleg Nesterov | 51de4d9 | 2006-12-06 20:36:54 -0800 | [diff] [blame] | 355 | static struct taskstats *mk_reply(struct sk_buff *skb, int type, u32 pid) |
Oleg Nesterov | 68062b8 | 2006-12-06 20:36:53 -0800 | [diff] [blame] | 356 | { |
Oleg Nesterov | 51de4d9 | 2006-12-06 20:36:54 -0800 | [diff] [blame] | 357 | struct nlattr *na, *ret; |
Oleg Nesterov | 68062b8 | 2006-12-06 20:36:53 -0800 | [diff] [blame] | 358 | int aggr; |
| 359 | |
Oleg Nesterov | 37167485 | 2006-12-06 20:36:55 -0800 | [diff] [blame] | 360 | aggr = (type == TASKSTATS_TYPE_PID) |
| 361 | ? TASKSTATS_TYPE_AGGR_PID |
| 362 | : TASKSTATS_TYPE_AGGR_TGID; |
Oleg Nesterov | 68062b8 | 2006-12-06 20:36:53 -0800 | [diff] [blame] | 363 | |
Jeff Mahoney | 4be2c95 | 2010-12-21 17:24:30 -0800 | [diff] [blame] | 364 | /* |
| 365 | * The taskstats structure is internally aligned on 8 byte |
| 366 | * boundaries but the layout of the aggregrate reply, with |
| 367 | * two NLA headers and the pid (each 4 bytes), actually |
| 368 | * force the entire structure to be unaligned. This causes |
| 369 | * the kernel to issue unaligned access warnings on some |
| 370 | * architectures like ia64. Unfortunately, some software out there |
| 371 | * doesn't properly unroll the NLA packet and assumes that the start |
| 372 | * of the taskstats structure will always be 20 bytes from the start |
| 373 | * of the netlink payload. Aligning the start of the taskstats |
| 374 | * structure breaks this software, which we don't want. So, for now |
| 375 | * the alignment only happens on architectures that require it |
| 376 | * and those users will have to update to fixed versions of those |
| 377 | * packages. Space is reserved in the packet only when needed. |
| 378 | * This ifdef should be removed in several years e.g. 2012 once |
| 379 | * we can be confident that fixed versions are installed on most |
| 380 | * systems. We add the padding before the aggregate since the |
| 381 | * aggregate is already a defined type. |
| 382 | */ |
| 383 | #ifdef TASKSTATS_NEEDS_PADDING |
| 384 | if (nla_put(skb, TASKSTATS_TYPE_NULL, 0, NULL) < 0) |
| 385 | goto err; |
| 386 | #endif |
Oleg Nesterov | 68062b8 | 2006-12-06 20:36:53 -0800 | [diff] [blame] | 387 | na = nla_nest_start(skb, aggr); |
Oleg Nesterov | 37167485 | 2006-12-06 20:36:55 -0800 | [diff] [blame] | 388 | if (!na) |
| 389 | goto err; |
Jeff Mahoney | 4be2c95 | 2010-12-21 17:24:30 -0800 | [diff] [blame] | 390 | |
| 391 | if (nla_put(skb, type, sizeof(pid), &pid) < 0) |
Oleg Nesterov | 51de4d9 | 2006-12-06 20:36:54 -0800 | [diff] [blame] | 392 | goto err; |
| 393 | ret = nla_reserve(skb, TASKSTATS_TYPE_STATS, sizeof(struct taskstats)); |
| 394 | if (!ret) |
| 395 | goto err; |
Oleg Nesterov | 68062b8 | 2006-12-06 20:36:53 -0800 | [diff] [blame] | 396 | nla_nest_end(skb, na); |
| 397 | |
Oleg Nesterov | 51de4d9 | 2006-12-06 20:36:54 -0800 | [diff] [blame] | 398 | return nla_data(ret); |
| 399 | err: |
| 400 | return NULL; |
Oleg Nesterov | 68062b8 | 2006-12-06 20:36:53 -0800 | [diff] [blame] | 401 | } |
| 402 | |
Balbir Singh | 846c7bb | 2007-10-18 23:39:44 -0700 | [diff] [blame] | 403 | static int cgroupstats_user_cmd(struct sk_buff *skb, struct genl_info *info) |
| 404 | { |
| 405 | int rc = 0; |
| 406 | struct sk_buff *rep_skb; |
| 407 | struct cgroupstats *stats; |
| 408 | struct nlattr *na; |
| 409 | size_t size; |
| 410 | u32 fd; |
| 411 | struct file *file; |
| 412 | int fput_needed; |
| 413 | |
| 414 | na = info->attrs[CGROUPSTATS_CMD_ATTR_FD]; |
| 415 | if (!na) |
| 416 | return -EINVAL; |
| 417 | |
| 418 | fd = nla_get_u32(info->attrs[CGROUPSTATS_CMD_ATTR_FD]); |
| 419 | file = fget_light(fd, &fput_needed); |
Adrian Bunk | f961598 | 2007-11-14 17:00:37 -0800 | [diff] [blame] | 420 | if (!file) |
| 421 | return 0; |
Balbir Singh | 846c7bb | 2007-10-18 23:39:44 -0700 | [diff] [blame] | 422 | |
Adrian Bunk | f961598 | 2007-11-14 17:00:37 -0800 | [diff] [blame] | 423 | size = nla_total_size(sizeof(struct cgroupstats)); |
Balbir Singh | 846c7bb | 2007-10-18 23:39:44 -0700 | [diff] [blame] | 424 | |
Adrian Bunk | f961598 | 2007-11-14 17:00:37 -0800 | [diff] [blame] | 425 | rc = prepare_reply(info, CGROUPSTATS_CMD_NEW, &rep_skb, |
| 426 | size); |
| 427 | if (rc < 0) |
| 428 | goto err; |
Balbir Singh | 846c7bb | 2007-10-18 23:39:44 -0700 | [diff] [blame] | 429 | |
Adrian Bunk | f961598 | 2007-11-14 17:00:37 -0800 | [diff] [blame] | 430 | na = nla_reserve(rep_skb, CGROUPSTATS_TYPE_CGROUP_STATS, |
| 431 | sizeof(struct cgroupstats)); |
| 432 | stats = nla_data(na); |
| 433 | memset(stats, 0, sizeof(*stats)); |
Balbir Singh | 846c7bb | 2007-10-18 23:39:44 -0700 | [diff] [blame] | 434 | |
Adrian Bunk | f961598 | 2007-11-14 17:00:37 -0800 | [diff] [blame] | 435 | rc = cgroupstats_build(stats, file->f_dentry); |
| 436 | if (rc < 0) { |
| 437 | nlmsg_free(rep_skb); |
| 438 | goto err; |
Balbir Singh | 846c7bb | 2007-10-18 23:39:44 -0700 | [diff] [blame] | 439 | } |
| 440 | |
Johannes Berg | 134e637 | 2009-07-10 09:51:34 +0000 | [diff] [blame] | 441 | rc = send_reply(rep_skb, info); |
Adrian Bunk | f961598 | 2007-11-14 17:00:37 -0800 | [diff] [blame] | 442 | |
Balbir Singh | 846c7bb | 2007-10-18 23:39:44 -0700 | [diff] [blame] | 443 | err: |
Adrian Bunk | f961598 | 2007-11-14 17:00:37 -0800 | [diff] [blame] | 444 | fput_light(file, fput_needed); |
Balbir Singh | 846c7bb | 2007-10-18 23:39:44 -0700 | [diff] [blame] | 445 | return rc; |
| 446 | } |
| 447 | |
Michael Holzheu | 9323312 | 2010-10-27 15:34:44 -0700 | [diff] [blame] | 448 | static int cmd_attr_register_cpumask(struct genl_info *info) |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 449 | { |
Rusty Russell | 41c7bb9 | 2009-01-01 10:12:28 +1030 | [diff] [blame] | 450 | cpumask_var_t mask; |
Michael Holzheu | 9323312 | 2010-10-27 15:34:44 -0700 | [diff] [blame] | 451 | int rc; |
Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 452 | |
Rusty Russell | 41c7bb9 | 2009-01-01 10:12:28 +1030 | [diff] [blame] | 453 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) |
| 454 | return -ENOMEM; |
Rusty Russell | 41c7bb9 | 2009-01-01 10:12:28 +1030 | [diff] [blame] | 455 | rc = parse(info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK], mask); |
Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 456 | if (rc < 0) |
Michael Holzheu | 9323312 | 2010-10-27 15:34:44 -0700 | [diff] [blame] | 457 | goto out; |
| 458 | rc = add_del_listener(info->snd_pid, mask, REGISTER); |
| 459 | out: |
| 460 | free_cpumask_var(mask); |
| 461 | return rc; |
| 462 | } |
Rusty Russell | 41c7bb9 | 2009-01-01 10:12:28 +1030 | [diff] [blame] | 463 | |
Michael Holzheu | 9323312 | 2010-10-27 15:34:44 -0700 | [diff] [blame] | 464 | static int cmd_attr_deregister_cpumask(struct genl_info *info) |
| 465 | { |
| 466 | cpumask_var_t mask; |
| 467 | int rc; |
| 468 | |
| 469 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) |
| 470 | return -ENOMEM; |
Rusty Russell | 41c7bb9 | 2009-01-01 10:12:28 +1030 | [diff] [blame] | 471 | rc = parse(info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK], mask); |
| 472 | if (rc < 0) |
Michael Holzheu | 9323312 | 2010-10-27 15:34:44 -0700 | [diff] [blame] | 473 | goto out; |
| 474 | rc = add_del_listener(info->snd_pid, mask, DEREGISTER); |
| 475 | out: |
Rusty Russell | 41c7bb9 | 2009-01-01 10:12:28 +1030 | [diff] [blame] | 476 | free_cpumask_var(mask); |
Michael Holzheu | 9323312 | 2010-10-27 15:34:44 -0700 | [diff] [blame] | 477 | return rc; |
| 478 | } |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 479 | |
Jeff Mahoney | 4be2c95 | 2010-12-21 17:24:30 -0800 | [diff] [blame] | 480 | static size_t taskstats_packet_size(void) |
| 481 | { |
| 482 | size_t size; |
| 483 | |
| 484 | size = nla_total_size(sizeof(u32)) + |
| 485 | nla_total_size(sizeof(struct taskstats)) + nla_total_size(0); |
| 486 | #ifdef TASKSTATS_NEEDS_PADDING |
| 487 | size += nla_total_size(0); /* Padding for alignment */ |
| 488 | #endif |
| 489 | return size; |
| 490 | } |
| 491 | |
Michael Holzheu | 9323312 | 2010-10-27 15:34:44 -0700 | [diff] [blame] | 492 | static int cmd_attr_pid(struct genl_info *info) |
| 493 | { |
| 494 | struct taskstats *stats; |
| 495 | struct sk_buff *rep_skb; |
| 496 | size_t size; |
| 497 | u32 pid; |
| 498 | int rc; |
| 499 | |
Jeff Mahoney | 4be2c95 | 2010-12-21 17:24:30 -0800 | [diff] [blame] | 500 | size = taskstats_packet_size(); |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 501 | |
Oleg Nesterov | 37167485 | 2006-12-06 20:36:55 -0800 | [diff] [blame] | 502 | rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size); |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 503 | if (rc < 0) |
| 504 | return rc; |
| 505 | |
Oleg Nesterov | 51de4d9 | 2006-12-06 20:36:54 -0800 | [diff] [blame] | 506 | rc = -EINVAL; |
Michael Holzheu | 9323312 | 2010-10-27 15:34:44 -0700 | [diff] [blame] | 507 | pid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_PID]); |
| 508 | stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID, pid); |
| 509 | if (!stats) |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 510 | goto err; |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 511 | |
Michael Holzheu | 3d9e0cf | 2010-10-27 15:34:44 -0700 | [diff] [blame] | 512 | rc = fill_stats_for_pid(pid, stats); |
Michael Holzheu | 9323312 | 2010-10-27 15:34:44 -0700 | [diff] [blame] | 513 | if (rc < 0) |
| 514 | goto err; |
Johannes Berg | 134e637 | 2009-07-10 09:51:34 +0000 | [diff] [blame] | 515 | return send_reply(rep_skb, info); |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 516 | err: |
| 517 | nlmsg_free(rep_skb); |
| 518 | return rc; |
| 519 | } |
| 520 | |
Michael Holzheu | 9323312 | 2010-10-27 15:34:44 -0700 | [diff] [blame] | 521 | static int cmd_attr_tgid(struct genl_info *info) |
| 522 | { |
| 523 | struct taskstats *stats; |
| 524 | struct sk_buff *rep_skb; |
| 525 | size_t size; |
| 526 | u32 tgid; |
| 527 | int rc; |
| 528 | |
Jeff Mahoney | 4be2c95 | 2010-12-21 17:24:30 -0800 | [diff] [blame] | 529 | size = taskstats_packet_size(); |
Michael Holzheu | 9323312 | 2010-10-27 15:34:44 -0700 | [diff] [blame] | 530 | |
| 531 | rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size); |
| 532 | if (rc < 0) |
| 533 | return rc; |
| 534 | |
| 535 | rc = -EINVAL; |
| 536 | tgid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_TGID]); |
| 537 | stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID, tgid); |
| 538 | if (!stats) |
| 539 | goto err; |
| 540 | |
Michael Holzheu | 3d9e0cf | 2010-10-27 15:34:44 -0700 | [diff] [blame] | 541 | rc = fill_stats_for_tgid(tgid, stats); |
Michael Holzheu | 9323312 | 2010-10-27 15:34:44 -0700 | [diff] [blame] | 542 | if (rc < 0) |
| 543 | goto err; |
| 544 | return send_reply(rep_skb, info); |
| 545 | err: |
| 546 | nlmsg_free(rep_skb); |
| 547 | return rc; |
| 548 | } |
| 549 | |
| 550 | static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info) |
| 551 | { |
| 552 | if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK]) |
| 553 | return cmd_attr_register_cpumask(info); |
| 554 | else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK]) |
| 555 | return cmd_attr_deregister_cpumask(info); |
| 556 | else if (info->attrs[TASKSTATS_CMD_ATTR_PID]) |
| 557 | return cmd_attr_pid(info); |
| 558 | else if (info->attrs[TASKSTATS_CMD_ATTR_TGID]) |
| 559 | return cmd_attr_tgid(info); |
| 560 | else |
| 561 | return -EINVAL; |
| 562 | } |
| 563 | |
Oleg Nesterov | 34ec123 | 2006-12-06 20:36:52 -0800 | [diff] [blame] | 564 | static struct taskstats *taskstats_tgid_alloc(struct task_struct *tsk) |
| 565 | { |
| 566 | struct signal_struct *sig = tsk->signal; |
| 567 | struct taskstats *stats; |
| 568 | |
| 569 | if (sig->stats || thread_group_empty(tsk)) |
| 570 | goto ret; |
| 571 | |
| 572 | /* No problem if kmem_cache_zalloc() fails */ |
| 573 | stats = kmem_cache_zalloc(taskstats_cache, GFP_KERNEL); |
| 574 | |
| 575 | spin_lock_irq(&tsk->sighand->siglock); |
| 576 | if (!sig->stats) { |
| 577 | sig->stats = stats; |
| 578 | stats = NULL; |
| 579 | } |
| 580 | spin_unlock_irq(&tsk->sighand->siglock); |
| 581 | |
| 582 | if (stats) |
| 583 | kmem_cache_free(taskstats_cache, stats); |
| 584 | ret: |
| 585 | return sig->stats; |
| 586 | } |
| 587 | |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 588 | /* Send pid data out on exit */ |
Oleg Nesterov | 115085e | 2006-12-06 20:36:51 -0800 | [diff] [blame] | 589 | void taskstats_exit(struct task_struct *tsk, int group_dead) |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 590 | { |
| 591 | int rc; |
Oleg Nesterov | 115085e | 2006-12-06 20:36:51 -0800 | [diff] [blame] | 592 | struct listener_list *listeners; |
Oleg Nesterov | 51de4d9 | 2006-12-06 20:36:54 -0800 | [diff] [blame] | 593 | struct taskstats *stats; |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 594 | struct sk_buff *rep_skb; |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 595 | size_t size; |
| 596 | int is_thread_group; |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 597 | |
Oleg Nesterov | 4a279ff | 2006-10-30 22:07:15 -0800 | [diff] [blame] | 598 | if (!family_registered) |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 599 | return; |
| 600 | |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 601 | /* |
| 602 | * Size includes space for nested attributes |
| 603 | */ |
Jeff Mahoney | 4be2c95 | 2010-12-21 17:24:30 -0800 | [diff] [blame] | 604 | size = taskstats_packet_size(); |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 605 | |
Oleg Nesterov | 34ec123 | 2006-12-06 20:36:52 -0800 | [diff] [blame] | 606 | is_thread_group = !!taskstats_tgid_alloc(tsk); |
Oleg Nesterov | 4a279ff | 2006-10-30 22:07:15 -0800 | [diff] [blame] | 607 | if (is_thread_group) { |
| 608 | /* PID + STATS + TGID + STATS */ |
| 609 | size = 2 * size; |
| 610 | /* fill the tsk->signal->stats structure */ |
| 611 | fill_tgid_exit(tsk); |
| 612 | } |
| 613 | |
Christoph Lameter | cd85fc5 | 2010-12-08 17:42:22 +0100 | [diff] [blame] | 614 | listeners = __this_cpu_ptr(&listener_array); |
Oleg Nesterov | 115085e | 2006-12-06 20:36:51 -0800 | [diff] [blame] | 615 | if (list_empty(&listeners->list)) |
| 616 | return; |
| 617 | |
Oleg Nesterov | 37167485 | 2006-12-06 20:36:55 -0800 | [diff] [blame] | 618 | rc = prepare_reply(NULL, TASKSTATS_CMD_NEW, &rep_skb, size); |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 619 | if (rc < 0) |
Oleg Nesterov | 51de4d9 | 2006-12-06 20:36:54 -0800 | [diff] [blame] | 620 | return; |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 621 | |
Oleg Nesterov | 51de4d9 | 2006-12-06 20:36:54 -0800 | [diff] [blame] | 622 | stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID, tsk->pid); |
| 623 | if (!stats) |
Oleg Nesterov | 37167485 | 2006-12-06 20:36:55 -0800 | [diff] [blame] | 624 | goto err; |
Oleg Nesterov | 51de4d9 | 2006-12-06 20:36:54 -0800 | [diff] [blame] | 625 | |
Michael Holzheu | 3d9e0cf | 2010-10-27 15:34:44 -0700 | [diff] [blame] | 626 | fill_stats(tsk, stats); |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 627 | |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 628 | /* |
Shailabh Nagar | ad4ecbc | 2006-07-14 00:24:44 -0700 | [diff] [blame] | 629 | * Doesn't matter if tsk is the leader or the last group member leaving |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 630 | */ |
Oleg Nesterov | 68062b8 | 2006-12-06 20:36:53 -0800 | [diff] [blame] | 631 | if (!is_thread_group || !group_dead) |
Shailabh Nagar | ad4ecbc | 2006-07-14 00:24:44 -0700 | [diff] [blame] | 632 | goto send; |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 633 | |
Oleg Nesterov | 51de4d9 | 2006-12-06 20:36:54 -0800 | [diff] [blame] | 634 | stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID, tsk->tgid); |
| 635 | if (!stats) |
Oleg Nesterov | 37167485 | 2006-12-06 20:36:55 -0800 | [diff] [blame] | 636 | goto err; |
Oleg Nesterov | 51de4d9 | 2006-12-06 20:36:54 -0800 | [diff] [blame] | 637 | |
| 638 | memcpy(stats, tsk->signal->stats, sizeof(*stats)); |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 639 | |
Shailabh Nagar | ad4ecbc | 2006-07-14 00:24:44 -0700 | [diff] [blame] | 640 | send: |
Oleg Nesterov | 115085e | 2006-12-06 20:36:51 -0800 | [diff] [blame] | 641 | send_cpu_listeners(rep_skb, listeners); |
Shailabh Nagar | ad4ecbc | 2006-07-14 00:24:44 -0700 | [diff] [blame] | 642 | return; |
Oleg Nesterov | 37167485 | 2006-12-06 20:36:55 -0800 | [diff] [blame] | 643 | err: |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 644 | nlmsg_free(rep_skb); |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 645 | } |
| 646 | |
| 647 | static struct genl_ops taskstats_ops = { |
| 648 | .cmd = TASKSTATS_CMD_GET, |
Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 649 | .doit = taskstats_user_cmd, |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 650 | .policy = taskstats_cmd_get_policy, |
| 651 | }; |
| 652 | |
Balbir Singh | 846c7bb | 2007-10-18 23:39:44 -0700 | [diff] [blame] | 653 | static struct genl_ops cgroupstats_ops = { |
| 654 | .cmd = CGROUPSTATS_CMD_GET, |
| 655 | .doit = cgroupstats_user_cmd, |
| 656 | .policy = cgroupstats_cmd_get_policy, |
| 657 | }; |
| 658 | |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 659 | /* Needed early in initialization */ |
| 660 | void __init taskstats_init_early(void) |
| 661 | { |
Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 662 | unsigned int i; |
| 663 | |
Christoph Lameter | 0a31bd5 | 2007-05-06 14:49:57 -0700 | [diff] [blame] | 664 | taskstats_cache = KMEM_CACHE(taskstats, SLAB_PANIC); |
Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 665 | for_each_possible_cpu(i) { |
| 666 | INIT_LIST_HEAD(&(per_cpu(listener_array, i).list)); |
| 667 | init_rwsem(&(per_cpu(listener_array, i).sem)); |
| 668 | } |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 669 | } |
| 670 | |
| 671 | static int __init taskstats_init(void) |
| 672 | { |
| 673 | int rc; |
| 674 | |
| 675 | rc = genl_register_family(&family); |
| 676 | if (rc) |
| 677 | return rc; |
| 678 | |
| 679 | rc = genl_register_ops(&family, &taskstats_ops); |
| 680 | if (rc < 0) |
| 681 | goto err; |
| 682 | |
Balbir Singh | 846c7bb | 2007-10-18 23:39:44 -0700 | [diff] [blame] | 683 | rc = genl_register_ops(&family, &cgroupstats_ops); |
| 684 | if (rc < 0) |
| 685 | goto err_cgroup_ops; |
| 686 | |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 687 | family_registered = 1; |
Balbir Singh | 846c7bb | 2007-10-18 23:39:44 -0700 | [diff] [blame] | 688 | printk("registered taskstats version %d\n", TASKSTATS_GENL_VERSION); |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 689 | return 0; |
Balbir Singh | 846c7bb | 2007-10-18 23:39:44 -0700 | [diff] [blame] | 690 | err_cgroup_ops: |
| 691 | genl_unregister_ops(&family, &taskstats_ops); |
Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 692 | err: |
| 693 | genl_unregister_family(&family); |
| 694 | return rc; |
| 695 | } |
| 696 | |
| 697 | /* |
| 698 | * late initcall ensures initialization of statistics collection |
| 699 | * mechanisms precedes initialization of the taskstats interface |
| 700 | */ |
| 701 | late_initcall(taskstats_init); |