blob: d58ce664da8435050aa500326ca1ca07ba159f35 [file] [log] [blame]
Thomas Gleixner1a59d1b82019-05-27 08:55:05 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Matt Helsley9f460802005-11-07 00:59:16 -08002/*
3 * cn_proc.c - process events connector
4 *
5 * Copyright (C) Matt Helsley, IBM Corp. 2005
6 * Based on cn_fork.c by Guillaume Thouvenin <guillaume.thouvenin@bull.net>
7 * Original copyright notice follows:
8 * Copyright (C) 2005 BULL SA.
Matt Helsley9f460802005-11-07 00:59:16 -08009 */
10
Matt Helsley9f460802005-11-07 00:59:16 -080011#include <linux/kernel.h>
Matt Helsleycaf3c9d2006-01-09 20:52:40 -080012#include <linux/ktime.h>
Matt Helsley9f460802005-11-07 00:59:16 -080013#include <linux/init.h>
Matt Helsley1d31a4e2006-06-23 02:05:42 -070014#include <linux/connector.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090015#include <linux/gfp.h>
Vladimir Zapolskiyf701e5b2011-07-15 20:45:18 +030016#include <linux/ptrace.h>
Arun Sharma600634972011-07-26 16:09:06 -070017#include <linux/atomic.h>
Eric W. Biederman9582d902012-02-07 16:48:16 -080018#include <linux/pid_namespace.h>
Arun Sharma600634972011-07-26 16:09:06 -070019
Matt Helsley9f460802005-11-07 00:59:16 -080020#include <linux/cn_proc.h>
21
Chris Metcalf1ca1a4c2013-11-14 12:09:21 -050022/*
23 * Size of a cn_msg followed by a proc_event structure. Since the
24 * sizeof struct cn_msg is a multiple of 4 bytes, but not 8 bytes, we
25 * add one 4-byte word to the size here, and then start the actual
26 * cn_msg structure 4 bytes into the stack buffer. The result is that
27 * the immediately following proc_event structure is aligned to 8 bytes.
28 */
29#define CN_PROC_MSG_SIZE (sizeof(struct cn_msg) + sizeof(struct proc_event) + 4)
30
31/* See comment above; we test our assumption about sizeof struct cn_msg here. */
32static inline struct cn_msg *buffer_to_cn_msg(__u8 *buffer)
33{
34 BUILD_BUG_ON(sizeof(struct cn_msg) != 20);
35 return (struct cn_msg *)(buffer + 4);
36}
Matt Helsley9f460802005-11-07 00:59:16 -080037
38static atomic_t proc_event_num_listeners = ATOMIC_INIT(0);
39static struct cb_id cn_proc_event_id = { CN_IDX_PROC, CN_VAL_PROC };
40
David S. Millercc398c22006-01-08 01:03:34 -080041/* proc_event_counts is used as the sequence number of the netlink message */
Matt Helsley9f460802005-11-07 00:59:16 -080042static DEFINE_PER_CPU(__u32, proc_event_counts) = { 0 };
43
Aaron Campbellab8ed952016-06-24 10:05:32 -030044static inline void send_msg(struct cn_msg *msg)
Matt Helsley9f460802005-11-07 00:59:16 -080045{
Christoph Lameter3ea9f682010-12-08 17:42:23 +010046 preempt_disable();
Aaron Campbellab8ed952016-06-24 10:05:32 -030047
48 msg->seq = __this_cpu_inc_return(proc_event_counts) - 1;
49 ((struct proc_event *)msg->data)->cpu = smp_processor_id();
50
51 /*
52 * Preemption remains disabled during send to ensure the messages are
53 * ordered according to their sequence numbers.
54 *
55 * If cn_netlink_send() fails, the data is not sent.
56 */
57 cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_NOWAIT);
58
Christoph Lameter3ea9f682010-12-08 17:42:23 +010059 preempt_enable();
Matt Helsley9f460802005-11-07 00:59:16 -080060}
61
62void proc_fork_connector(struct task_struct *task)
63{
64 struct cn_msg *msg;
65 struct proc_event *ev;
Chris Metcalf1ca1a4c2013-11-14 12:09:21 -050066 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
Oleg Nesterov9e8f90d2011-07-28 18:26:32 -070067 struct task_struct *parent;
Matt Helsley9f460802005-11-07 00:59:16 -080068
69 if (atomic_read(&proc_event_num_listeners) < 1)
70 return;
71
Chris Metcalf1ca1a4c2013-11-14 12:09:21 -050072 msg = buffer_to_cn_msg(buffer);
Valentin Ilief3c48ec2012-07-14 13:08:29 +000073 ev = (struct proc_event *)msg->data;
Mathias Krausee727ca82013-09-30 22:03:06 +020074 memset(&ev->event_data, 0, sizeof(ev->event_data));
Thomas Gleixner9e93f212014-07-16 21:04:40 +000075 ev->timestamp_ns = ktime_get_ns();
Matt Helsley9f460802005-11-07 00:59:16 -080076 ev->what = PROC_EVENT_FORK;
Oleg Nesterov9e8f90d2011-07-28 18:26:32 -070077 rcu_read_lock();
78 parent = rcu_dereference(task->real_parent);
79 ev->event_data.fork.parent_pid = parent->pid;
80 ev->event_data.fork.parent_tgid = parent->tgid;
81 rcu_read_unlock();
Matt Helsley9f460802005-11-07 00:59:16 -080082 ev->event_data.fork.child_pid = task->pid;
83 ev->event_data.fork.child_tgid = task->tgid;
84
85 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
86 msg->ack = 0; /* not used */
87 msg->len = sizeof(*ev);
Mathias Krausee727ca82013-09-30 22:03:06 +020088 msg->flags = 0; /* not used */
Aaron Campbellab8ed952016-06-24 10:05:32 -030089 send_msg(msg);
Matt Helsley9f460802005-11-07 00:59:16 -080090}
91
92void proc_exec_connector(struct task_struct *task)
93{
94 struct cn_msg *msg;
95 struct proc_event *ev;
Chris Metcalf1ca1a4c2013-11-14 12:09:21 -050096 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
Matt Helsley9f460802005-11-07 00:59:16 -080097
98 if (atomic_read(&proc_event_num_listeners) < 1)
99 return;
100
Chris Metcalf1ca1a4c2013-11-14 12:09:21 -0500101 msg = buffer_to_cn_msg(buffer);
Valentin Ilief3c48ec2012-07-14 13:08:29 +0000102 ev = (struct proc_event *)msg->data;
Mathias Krausee727ca82013-09-30 22:03:06 +0200103 memset(&ev->event_data, 0, sizeof(ev->event_data));
Thomas Gleixner9e93f212014-07-16 21:04:40 +0000104 ev->timestamp_ns = ktime_get_ns();
Matt Helsley9f460802005-11-07 00:59:16 -0800105 ev->what = PROC_EVENT_EXEC;
106 ev->event_data.exec.process_pid = task->pid;
107 ev->event_data.exec.process_tgid = task->tgid;
108
109 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
110 msg->ack = 0; /* not used */
111 msg->len = sizeof(*ev);
Mathias Krausee727ca82013-09-30 22:03:06 +0200112 msg->flags = 0; /* not used */
Aaron Campbellab8ed952016-06-24 10:05:32 -0300113 send_msg(msg);
Matt Helsley9f460802005-11-07 00:59:16 -0800114}
115
116void proc_id_connector(struct task_struct *task, int which_id)
117{
118 struct cn_msg *msg;
119 struct proc_event *ev;
Chris Metcalf1ca1a4c2013-11-14 12:09:21 -0500120 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
David Howellsc69e8d92008-11-14 10:39:19 +1100121 const struct cred *cred;
Matt Helsley9f460802005-11-07 00:59:16 -0800122
123 if (atomic_read(&proc_event_num_listeners) < 1)
124 return;
125
Chris Metcalf1ca1a4c2013-11-14 12:09:21 -0500126 msg = buffer_to_cn_msg(buffer);
Valentin Ilief3c48ec2012-07-14 13:08:29 +0000127 ev = (struct proc_event *)msg->data;
Mathias Krausee727ca82013-09-30 22:03:06 +0200128 memset(&ev->event_data, 0, sizeof(ev->event_data));
Matt Helsley9f460802005-11-07 00:59:16 -0800129 ev->what = which_id;
130 ev->event_data.id.process_pid = task->pid;
131 ev->event_data.id.process_tgid = task->tgid;
David Howellsc69e8d92008-11-14 10:39:19 +1100132 rcu_read_lock();
133 cred = __task_cred(task);
Matt Helsley9f460802005-11-07 00:59:16 -0800134 if (which_id == PROC_EVENT_UID) {
Eric W. Biederman9582d902012-02-07 16:48:16 -0800135 ev->event_data.id.r.ruid = from_kuid_munged(&init_user_ns, cred->uid);
136 ev->event_data.id.e.euid = from_kuid_munged(&init_user_ns, cred->euid);
Matt Helsley9f460802005-11-07 00:59:16 -0800137 } else if (which_id == PROC_EVENT_GID) {
Eric W. Biederman9582d902012-02-07 16:48:16 -0800138 ev->event_data.id.r.rgid = from_kgid_munged(&init_user_ns, cred->gid);
139 ev->event_data.id.e.egid = from_kgid_munged(&init_user_ns, cred->egid);
David Howellsc69e8d92008-11-14 10:39:19 +1100140 } else {
141 rcu_read_unlock();
Valentin Ilief3c48ec2012-07-14 13:08:29 +0000142 return;
David Howellsc69e8d92008-11-14 10:39:19 +1100143 }
144 rcu_read_unlock();
Thomas Gleixner9e93f212014-07-16 21:04:40 +0000145 ev->timestamp_ns = ktime_get_ns();
Matt Helsley9f460802005-11-07 00:59:16 -0800146
147 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
148 msg->ack = 0; /* not used */
149 msg->len = sizeof(*ev);
Mathias Krausee727ca82013-09-30 22:03:06 +0200150 msg->flags = 0; /* not used */
Aaron Campbellab8ed952016-06-24 10:05:32 -0300151 send_msg(msg);
Matt Helsley9f460802005-11-07 00:59:16 -0800152}
153
Scott James Remnant02b51df2009-09-22 16:43:44 -0700154void proc_sid_connector(struct task_struct *task)
155{
156 struct cn_msg *msg;
157 struct proc_event *ev;
Chris Metcalf1ca1a4c2013-11-14 12:09:21 -0500158 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
Scott James Remnant02b51df2009-09-22 16:43:44 -0700159
160 if (atomic_read(&proc_event_num_listeners) < 1)
161 return;
162
Chris Metcalf1ca1a4c2013-11-14 12:09:21 -0500163 msg = buffer_to_cn_msg(buffer);
Scott James Remnant02b51df2009-09-22 16:43:44 -0700164 ev = (struct proc_event *)msg->data;
Mathias Krausee727ca82013-09-30 22:03:06 +0200165 memset(&ev->event_data, 0, sizeof(ev->event_data));
Thomas Gleixner9e93f212014-07-16 21:04:40 +0000166 ev->timestamp_ns = ktime_get_ns();
Scott James Remnant02b51df2009-09-22 16:43:44 -0700167 ev->what = PROC_EVENT_SID;
168 ev->event_data.sid.process_pid = task->pid;
169 ev->event_data.sid.process_tgid = task->tgid;
170
171 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
172 msg->ack = 0; /* not used */
173 msg->len = sizeof(*ev);
Mathias Krausee727ca82013-09-30 22:03:06 +0200174 msg->flags = 0; /* not used */
Aaron Campbellab8ed952016-06-24 10:05:32 -0300175 send_msg(msg);
Scott James Remnant02b51df2009-09-22 16:43:44 -0700176}
177
Vladimir Zapolskiyf701e5b2011-07-15 20:45:18 +0300178void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
179{
180 struct cn_msg *msg;
181 struct proc_event *ev;
Chris Metcalf1ca1a4c2013-11-14 12:09:21 -0500182 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
Vladimir Zapolskiyf701e5b2011-07-15 20:45:18 +0300183
184 if (atomic_read(&proc_event_num_listeners) < 1)
185 return;
186
Chris Metcalf1ca1a4c2013-11-14 12:09:21 -0500187 msg = buffer_to_cn_msg(buffer);
Vladimir Zapolskiyf701e5b2011-07-15 20:45:18 +0300188 ev = (struct proc_event *)msg->data;
Mathias Krausee727ca82013-09-30 22:03:06 +0200189 memset(&ev->event_data, 0, sizeof(ev->event_data));
Thomas Gleixner9e93f212014-07-16 21:04:40 +0000190 ev->timestamp_ns = ktime_get_ns();
Vladimir Zapolskiyf701e5b2011-07-15 20:45:18 +0300191 ev->what = PROC_EVENT_PTRACE;
192 ev->event_data.ptrace.process_pid = task->pid;
193 ev->event_data.ptrace.process_tgid = task->tgid;
194 if (ptrace_id == PTRACE_ATTACH) {
195 ev->event_data.ptrace.tracer_pid = current->pid;
196 ev->event_data.ptrace.tracer_tgid = current->tgid;
197 } else if (ptrace_id == PTRACE_DETACH) {
198 ev->event_data.ptrace.tracer_pid = 0;
199 ev->event_data.ptrace.tracer_tgid = 0;
200 } else
201 return;
202
203 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
204 msg->ack = 0; /* not used */
205 msg->len = sizeof(*ev);
Mathias Krausee727ca82013-09-30 22:03:06 +0200206 msg->flags = 0; /* not used */
Aaron Campbellab8ed952016-06-24 10:05:32 -0300207 send_msg(msg);
Vladimir Zapolskiyf701e5b2011-07-15 20:45:18 +0300208}
209
Vladimir Zapolskiyf786ecb2011-09-21 09:26:44 +0000210void proc_comm_connector(struct task_struct *task)
211{
212 struct cn_msg *msg;
213 struct proc_event *ev;
Chris Metcalf1ca1a4c2013-11-14 12:09:21 -0500214 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
Vladimir Zapolskiyf786ecb2011-09-21 09:26:44 +0000215
216 if (atomic_read(&proc_event_num_listeners) < 1)
217 return;
218
Chris Metcalf1ca1a4c2013-11-14 12:09:21 -0500219 msg = buffer_to_cn_msg(buffer);
Vladimir Zapolskiyf786ecb2011-09-21 09:26:44 +0000220 ev = (struct proc_event *)msg->data;
Mathias Krausee727ca82013-09-30 22:03:06 +0200221 memset(&ev->event_data, 0, sizeof(ev->event_data));
Thomas Gleixner9e93f212014-07-16 21:04:40 +0000222 ev->timestamp_ns = ktime_get_ns();
Vladimir Zapolskiyf786ecb2011-09-21 09:26:44 +0000223 ev->what = PROC_EVENT_COMM;
224 ev->event_data.comm.process_pid = task->pid;
225 ev->event_data.comm.process_tgid = task->tgid;
226 get_task_comm(ev->event_data.comm.comm, task);
227
228 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
229 msg->ack = 0; /* not used */
230 msg->len = sizeof(*ev);
Mathias Krausee727ca82013-09-30 22:03:06 +0200231 msg->flags = 0; /* not used */
Aaron Campbellab8ed952016-06-24 10:05:32 -0300232 send_msg(msg);
Vladimir Zapolskiyf786ecb2011-09-21 09:26:44 +0000233}
234
Jesper Derehag2b5faa42013-03-19 20:50:05 +0000235void proc_coredump_connector(struct task_struct *task)
236{
237 struct cn_msg *msg;
238 struct proc_event *ev;
Li RongQing6d2b0f02019-03-06 14:46:27 +0800239 struct task_struct *parent;
Chris Metcalf1ca1a4c2013-11-14 12:09:21 -0500240 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
Jesper Derehag2b5faa42013-03-19 20:50:05 +0000241
242 if (atomic_read(&proc_event_num_listeners) < 1)
243 return;
244
Chris Metcalf1ca1a4c2013-11-14 12:09:21 -0500245 msg = buffer_to_cn_msg(buffer);
Jesper Derehag2b5faa42013-03-19 20:50:05 +0000246 ev = (struct proc_event *)msg->data;
Mathias Krausee727ca82013-09-30 22:03:06 +0200247 memset(&ev->event_data, 0, sizeof(ev->event_data));
Thomas Gleixner9e93f212014-07-16 21:04:40 +0000248 ev->timestamp_ns = ktime_get_ns();
Jesper Derehag2b5faa42013-03-19 20:50:05 +0000249 ev->what = PROC_EVENT_COREDUMP;
250 ev->event_data.coredump.process_pid = task->pid;
251 ev->event_data.coredump.process_tgid = task->tgid;
Li RongQing6d2b0f02019-03-06 14:46:27 +0800252
253 rcu_read_lock();
254 if (pid_alive(task)) {
255 parent = rcu_dereference(task->real_parent);
256 ev->event_data.coredump.parent_pid = parent->pid;
257 ev->event_data.coredump.parent_tgid = parent->tgid;
258 }
259 rcu_read_unlock();
Jesper Derehag2b5faa42013-03-19 20:50:05 +0000260
261 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
262 msg->ack = 0; /* not used */
263 msg->len = sizeof(*ev);
Mathias Krausee727ca82013-09-30 22:03:06 +0200264 msg->flags = 0; /* not used */
Aaron Campbellab8ed952016-06-24 10:05:32 -0300265 send_msg(msg);
Jesper Derehag2b5faa42013-03-19 20:50:05 +0000266}
267
Matt Helsley9f460802005-11-07 00:59:16 -0800268void proc_exit_connector(struct task_struct *task)
269{
270 struct cn_msg *msg;
271 struct proc_event *ev;
Li RongQing6d2b0f02019-03-06 14:46:27 +0800272 struct task_struct *parent;
Chris Metcalf1ca1a4c2013-11-14 12:09:21 -0500273 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
Matt Helsley9f460802005-11-07 00:59:16 -0800274
275 if (atomic_read(&proc_event_num_listeners) < 1)
276 return;
277
Chris Metcalf1ca1a4c2013-11-14 12:09:21 -0500278 msg = buffer_to_cn_msg(buffer);
Valentin Ilief3c48ec2012-07-14 13:08:29 +0000279 ev = (struct proc_event *)msg->data;
Mathias Krausee727ca82013-09-30 22:03:06 +0200280 memset(&ev->event_data, 0, sizeof(ev->event_data));
Thomas Gleixner9e93f212014-07-16 21:04:40 +0000281 ev->timestamp_ns = ktime_get_ns();
Matt Helsley9f460802005-11-07 00:59:16 -0800282 ev->what = PROC_EVENT_EXIT;
283 ev->event_data.exit.process_pid = task->pid;
284 ev->event_data.exit.process_tgid = task->tgid;
285 ev->event_data.exit.exit_code = task->exit_code;
286 ev->event_data.exit.exit_signal = task->exit_signal;
Li RongQing6d2b0f02019-03-06 14:46:27 +0800287
288 rcu_read_lock();
289 if (pid_alive(task)) {
290 parent = rcu_dereference(task->real_parent);
291 ev->event_data.exit.parent_pid = parent->pid;
292 ev->event_data.exit.parent_tgid = parent->tgid;
293 }
294 rcu_read_unlock();
Matt Helsley9f460802005-11-07 00:59:16 -0800295
296 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
297 msg->ack = 0; /* not used */
298 msg->len = sizeof(*ev);
Mathias Krausee727ca82013-09-30 22:03:06 +0200299 msg->flags = 0; /* not used */
Aaron Campbellab8ed952016-06-24 10:05:32 -0300300 send_msg(msg);
Matt Helsley9f460802005-11-07 00:59:16 -0800301}
302
303/*
304 * Send an acknowledgement message to userspace
305 *
306 * Use 0 for success, EFOO otherwise.
307 * Note: this is the negative of conventional kernel error
308 * values because it's not being returned via syscall return
309 * mechanisms.
310 */
311static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
312{
313 struct cn_msg *msg;
314 struct proc_event *ev;
Chris Metcalf1ca1a4c2013-11-14 12:09:21 -0500315 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
Matt Helsley9f460802005-11-07 00:59:16 -0800316
317 if (atomic_read(&proc_event_num_listeners) < 1)
318 return;
319
Chris Metcalf1ca1a4c2013-11-14 12:09:21 -0500320 msg = buffer_to_cn_msg(buffer);
Valentin Ilief3c48ec2012-07-14 13:08:29 +0000321 ev = (struct proc_event *)msg->data;
Mathias Krausee727ca82013-09-30 22:03:06 +0200322 memset(&ev->event_data, 0, sizeof(ev->event_data));
Matt Helsley9f460802005-11-07 00:59:16 -0800323 msg->seq = rcvd_seq;
Thomas Gleixner9e93f212014-07-16 21:04:40 +0000324 ev->timestamp_ns = ktime_get_ns();
Matt Helsley9f460802005-11-07 00:59:16 -0800325 ev->cpu = -1;
326 ev->what = PROC_EVENT_NONE;
327 ev->event_data.ack.err = err;
328 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
329 msg->ack = rcvd_ack + 1;
330 msg->len = sizeof(*ev);
Mathias Krausee727ca82013-09-30 22:03:06 +0200331 msg->flags = 0; /* not used */
Aaron Campbellab8ed952016-06-24 10:05:32 -0300332 send_msg(msg);
Matt Helsley9f460802005-11-07 00:59:16 -0800333}
334
335/**
336 * cn_proc_mcast_ctl
337 * @data: message sent from userspace via the connector
338 */
Stephen Boydf0b25932009-10-06 01:39:51 -0700339static void cn_proc_mcast_ctl(struct cn_msg *msg,
340 struct netlink_skb_parms *nsp)
Matt Helsley9f460802005-11-07 00:59:16 -0800341{
Matt Helsley9f460802005-11-07 00:59:16 -0800342 enum proc_cn_mcast_op *mc_op = NULL;
343 int err = 0;
344
345 if (msg->len != sizeof(*mc_op))
346 return;
347
Eric W. Biederman9582d902012-02-07 16:48:16 -0800348 /*
349 * Events are reported with respect to the initial pid
350 * and user namespaces so ignore requestors from
351 * other namespaces.
352 */
353 if ((current_user_ns() != &init_user_ns) ||
354 (task_active_pid_ns(current) != &init_pid_ns))
355 return;
356
Kees Cooke70ab972013-02-25 21:32:25 +0000357 /* Can only change if privileged. */
Eric W. Biederman90f62cf2014-04-23 14:29:27 -0700358 if (!__netlink_ns_capable(nsp, &init_user_ns, CAP_NET_ADMIN)) {
Kees Cooke70ab972013-02-25 21:32:25 +0000359 err = EPERM;
360 goto out;
361 }
362
Valentin Ilief3c48ec2012-07-14 13:08:29 +0000363 mc_op = (enum proc_cn_mcast_op *)msg->data;
Matt Helsley9f460802005-11-07 00:59:16 -0800364 switch (*mc_op) {
365 case PROC_CN_MCAST_LISTEN:
366 atomic_inc(&proc_event_num_listeners);
367 break;
368 case PROC_CN_MCAST_IGNORE:
369 atomic_dec(&proc_event_num_listeners);
370 break;
371 default:
372 err = EINVAL;
373 break;
374 }
Kees Cooke70ab972013-02-25 21:32:25 +0000375
376out:
Matt Helsley9f460802005-11-07 00:59:16 -0800377 cn_proc_ack(err, msg->seq, msg->ack);
378}
379
380/*
381 * cn_proc_init - initialization entry point
382 *
383 * Adds the connector callback to the connector driver.
384 */
385static int __init cn_proc_init(void)
386{
Valentin Ilief3c48ec2012-07-14 13:08:29 +0000387 int err = cn_add_callback(&cn_proc_event_id,
388 "cn_proc",
389 &cn_proc_mcast_ctl);
390 if (err) {
391 pr_warn("cn_proc failed to register\n");
Matt Helsley9f460802005-11-07 00:59:16 -0800392 return err;
393 }
394 return 0;
395}
Paul Gortmaker8297f2d2016-07-04 17:50:58 -0400396device_initcall(cn_proc_init);