cn_proc.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * cn_proc.c - process events connector
  4. *
  5. * Copyright (C) Matt Helsley, IBM Corp. 2005
  6. * Based on cn_fork.c by Guillaume Thouvenin <[email protected]>
  7. * Original copyright notice follows:
  8. * Copyright (C) 2005 BULL SA.
  9. */
  10. #include <linux/kernel.h>
  11. #include <linux/ktime.h>
  12. #include <linux/init.h>
  13. #include <linux/connector.h>
  14. #include <linux/gfp.h>
  15. #include <linux/ptrace.h>
  16. #include <linux/atomic.h>
  17. #include <linux/pid_namespace.h>
  18. #include <linux/cn_proc.h>
  19. #include <linux/local_lock.h>
  20. /*
  21. * Size of a cn_msg followed by a proc_event structure. Since the
  22. * sizeof struct cn_msg is a multiple of 4 bytes, but not 8 bytes, we
  23. * add one 4-byte word to the size here, and then start the actual
  24. * cn_msg structure 4 bytes into the stack buffer. The result is that
  25. * the immediately following proc_event structure is aligned to 8 bytes.
  26. */
  27. #define CN_PROC_MSG_SIZE (sizeof(struct cn_msg) + sizeof(struct proc_event) + 4)
  28. /* See comment above; we test our assumption about sizeof struct cn_msg here. */
  29. static inline struct cn_msg *buffer_to_cn_msg(__u8 *buffer)
  30. {
  31. BUILD_BUG_ON(sizeof(struct cn_msg) != 20);
  32. return (struct cn_msg *)(buffer + 4);
  33. }
  34. static atomic_t proc_event_num_listeners = ATOMIC_INIT(0);
  35. static struct cb_id cn_proc_event_id = { CN_IDX_PROC, CN_VAL_PROC };
  36. /* local_event.count is used as the sequence number of the netlink message */
  37. struct local_event {
  38. local_lock_t lock;
  39. __u32 count;
  40. };
  41. static DEFINE_PER_CPU(struct local_event, local_event) = {
  42. .lock = INIT_LOCAL_LOCK(lock),
  43. };
  44. static inline void send_msg(struct cn_msg *msg)
  45. {
  46. local_lock(&local_event.lock);
  47. msg->seq = __this_cpu_inc_return(local_event.count) - 1;
  48. ((struct proc_event *)msg->data)->cpu = smp_processor_id();
  49. /*
  50. * local_lock() disables preemption during send to ensure the messages
  51. * are ordered according to their sequence numbers.
  52. *
  53. * If cn_netlink_send() fails, the data is not sent.
  54. */
  55. cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_NOWAIT);
  56. local_unlock(&local_event.lock);
  57. }
  58. void proc_fork_connector(struct task_struct *task)
  59. {
  60. struct cn_msg *msg;
  61. struct proc_event *ev;
  62. __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
  63. struct task_struct *parent;
  64. if (atomic_read(&proc_event_num_listeners) < 1)
  65. return;
  66. msg = buffer_to_cn_msg(buffer);
  67. ev = (struct proc_event *)msg->data;
  68. memset(&ev->event_data, 0, sizeof(ev->event_data));
  69. ev->timestamp_ns = ktime_get_ns();
  70. ev->what = PROC_EVENT_FORK;
  71. rcu_read_lock();
  72. parent = rcu_dereference(task->real_parent);
  73. ev->event_data.fork.parent_pid = parent->pid;
  74. ev->event_data.fork.parent_tgid = parent->tgid;
  75. rcu_read_unlock();
  76. ev->event_data.fork.child_pid = task->pid;
  77. ev->event_data.fork.child_tgid = task->tgid;
  78. memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
  79. msg->ack = 0; /* not used */
  80. msg->len = sizeof(*ev);
  81. msg->flags = 0; /* not used */
  82. send_msg(msg);
  83. }
  84. void proc_exec_connector(struct task_struct *task)
  85. {
  86. struct cn_msg *msg;
  87. struct proc_event *ev;
  88. __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
  89. if (atomic_read(&proc_event_num_listeners) < 1)
  90. return;
  91. msg = buffer_to_cn_msg(buffer);
  92. ev = (struct proc_event *)msg->data;
  93. memset(&ev->event_data, 0, sizeof(ev->event_data));
  94. ev->timestamp_ns = ktime_get_ns();
  95. ev->what = PROC_EVENT_EXEC;
  96. ev->event_data.exec.process_pid = task->pid;
  97. ev->event_data.exec.process_tgid = task->tgid;
  98. memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
  99. msg->ack = 0; /* not used */
  100. msg->len = sizeof(*ev);
  101. msg->flags = 0; /* not used */
  102. send_msg(msg);
  103. }
  104. void proc_id_connector(struct task_struct *task, int which_id)
  105. {
  106. struct cn_msg *msg;
  107. struct proc_event *ev;
  108. __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
  109. const struct cred *cred;
  110. if (atomic_read(&proc_event_num_listeners) < 1)
  111. return;
  112. msg = buffer_to_cn_msg(buffer);
  113. ev = (struct proc_event *)msg->data;
  114. memset(&ev->event_data, 0, sizeof(ev->event_data));
  115. ev->what = which_id;
  116. ev->event_data.id.process_pid = task->pid;
  117. ev->event_data.id.process_tgid = task->tgid;
  118. rcu_read_lock();
  119. cred = __task_cred(task);
  120. if (which_id == PROC_EVENT_UID) {
  121. ev->event_data.id.r.ruid = from_kuid_munged(&init_user_ns, cred->uid);
  122. ev->event_data.id.e.euid = from_kuid_munged(&init_user_ns, cred->euid);
  123. } else if (which_id == PROC_EVENT_GID) {
  124. ev->event_data.id.r.rgid = from_kgid_munged(&init_user_ns, cred->gid);
  125. ev->event_data.id.e.egid = from_kgid_munged(&init_user_ns, cred->egid);
  126. } else {
  127. rcu_read_unlock();
  128. return;
  129. }
  130. rcu_read_unlock();
  131. ev->timestamp_ns = ktime_get_ns();
  132. memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
  133. msg->ack = 0; /* not used */
  134. msg->len = sizeof(*ev);
  135. msg->flags = 0; /* not used */
  136. send_msg(msg);
  137. }
  138. void proc_sid_connector(struct task_struct *task)
  139. {
  140. struct cn_msg *msg;
  141. struct proc_event *ev;
  142. __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
  143. if (atomic_read(&proc_event_num_listeners) < 1)
  144. return;
  145. msg = buffer_to_cn_msg(buffer);
  146. ev = (struct proc_event *)msg->data;
  147. memset(&ev->event_data, 0, sizeof(ev->event_data));
  148. ev->timestamp_ns = ktime_get_ns();
  149. ev->what = PROC_EVENT_SID;
  150. ev->event_data.sid.process_pid = task->pid;
  151. ev->event_data.sid.process_tgid = task->tgid;
  152. memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
  153. msg->ack = 0; /* not used */
  154. msg->len = sizeof(*ev);
  155. msg->flags = 0; /* not used */
  156. send_msg(msg);
  157. }
  158. void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
  159. {
  160. struct cn_msg *msg;
  161. struct proc_event *ev;
  162. __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
  163. if (atomic_read(&proc_event_num_listeners) < 1)
  164. return;
  165. msg = buffer_to_cn_msg(buffer);
  166. ev = (struct proc_event *)msg->data;
  167. memset(&ev->event_data, 0, sizeof(ev->event_data));
  168. ev->timestamp_ns = ktime_get_ns();
  169. ev->what = PROC_EVENT_PTRACE;
  170. ev->event_data.ptrace.process_pid = task->pid;
  171. ev->event_data.ptrace.process_tgid = task->tgid;
  172. if (ptrace_id == PTRACE_ATTACH) {
  173. ev->event_data.ptrace.tracer_pid = current->pid;
  174. ev->event_data.ptrace.tracer_tgid = current->tgid;
  175. } else if (ptrace_id == PTRACE_DETACH) {
  176. ev->event_data.ptrace.tracer_pid = 0;
  177. ev->event_data.ptrace.tracer_tgid = 0;
  178. } else
  179. return;
  180. memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
  181. msg->ack = 0; /* not used */
  182. msg->len = sizeof(*ev);
  183. msg->flags = 0; /* not used */
  184. send_msg(msg);
  185. }
  186. void proc_comm_connector(struct task_struct *task)
  187. {
  188. struct cn_msg *msg;
  189. struct proc_event *ev;
  190. __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
  191. if (atomic_read(&proc_event_num_listeners) < 1)
  192. return;
  193. msg = buffer_to_cn_msg(buffer);
  194. ev = (struct proc_event *)msg->data;
  195. memset(&ev->event_data, 0, sizeof(ev->event_data));
  196. ev->timestamp_ns = ktime_get_ns();
  197. ev->what = PROC_EVENT_COMM;
  198. ev->event_data.comm.process_pid = task->pid;
  199. ev->event_data.comm.process_tgid = task->tgid;
  200. get_task_comm(ev->event_data.comm.comm, task);
  201. memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
  202. msg->ack = 0; /* not used */
  203. msg->len = sizeof(*ev);
  204. msg->flags = 0; /* not used */
  205. send_msg(msg);
  206. }
  207. void proc_coredump_connector(struct task_struct *task)
  208. {
  209. struct cn_msg *msg;
  210. struct proc_event *ev;
  211. struct task_struct *parent;
  212. __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
  213. if (atomic_read(&proc_event_num_listeners) < 1)
  214. return;
  215. msg = buffer_to_cn_msg(buffer);
  216. ev = (struct proc_event *)msg->data;
  217. memset(&ev->event_data, 0, sizeof(ev->event_data));
  218. ev->timestamp_ns = ktime_get_ns();
  219. ev->what = PROC_EVENT_COREDUMP;
  220. ev->event_data.coredump.process_pid = task->pid;
  221. ev->event_data.coredump.process_tgid = task->tgid;
  222. rcu_read_lock();
  223. if (pid_alive(task)) {
  224. parent = rcu_dereference(task->real_parent);
  225. ev->event_data.coredump.parent_pid = parent->pid;
  226. ev->event_data.coredump.parent_tgid = parent->tgid;
  227. }
  228. rcu_read_unlock();
  229. memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
  230. msg->ack = 0; /* not used */
  231. msg->len = sizeof(*ev);
  232. msg->flags = 0; /* not used */
  233. send_msg(msg);
  234. }
  235. void proc_exit_connector(struct task_struct *task)
  236. {
  237. struct cn_msg *msg;
  238. struct proc_event *ev;
  239. struct task_struct *parent;
  240. __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
  241. if (atomic_read(&proc_event_num_listeners) < 1)
  242. return;
  243. msg = buffer_to_cn_msg(buffer);
  244. ev = (struct proc_event *)msg->data;
  245. memset(&ev->event_data, 0, sizeof(ev->event_data));
  246. ev->timestamp_ns = ktime_get_ns();
  247. ev->what = PROC_EVENT_EXIT;
  248. ev->event_data.exit.process_pid = task->pid;
  249. ev->event_data.exit.process_tgid = task->tgid;
  250. ev->event_data.exit.exit_code = task->exit_code;
  251. ev->event_data.exit.exit_signal = task->exit_signal;
  252. rcu_read_lock();
  253. if (pid_alive(task)) {
  254. parent = rcu_dereference(task->real_parent);
  255. ev->event_data.exit.parent_pid = parent->pid;
  256. ev->event_data.exit.parent_tgid = parent->tgid;
  257. }
  258. rcu_read_unlock();
  259. memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
  260. msg->ack = 0; /* not used */
  261. msg->len = sizeof(*ev);
  262. msg->flags = 0; /* not used */
  263. send_msg(msg);
  264. }
  265. /*
  266. * Send an acknowledgement message to userspace
  267. *
  268. * Use 0 for success, EFOO otherwise.
  269. * Note: this is the negative of conventional kernel error
  270. * values because it's not being returned via syscall return
  271. * mechanisms.
  272. */
  273. static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
  274. {
  275. struct cn_msg *msg;
  276. struct proc_event *ev;
  277. __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
  278. if (atomic_read(&proc_event_num_listeners) < 1)
  279. return;
  280. msg = buffer_to_cn_msg(buffer);
  281. ev = (struct proc_event *)msg->data;
  282. memset(&ev->event_data, 0, sizeof(ev->event_data));
  283. msg->seq = rcvd_seq;
  284. ev->timestamp_ns = ktime_get_ns();
  285. ev->cpu = -1;
  286. ev->what = PROC_EVENT_NONE;
  287. ev->event_data.ack.err = err;
  288. memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
  289. msg->ack = rcvd_ack + 1;
  290. msg->len = sizeof(*ev);
  291. msg->flags = 0; /* not used */
  292. send_msg(msg);
  293. }
  294. /**
  295. * cn_proc_mcast_ctl
  296. * @data: message sent from userspace via the connector
  297. */
  298. static void cn_proc_mcast_ctl(struct cn_msg *msg,
  299. struct netlink_skb_parms *nsp)
  300. {
  301. enum proc_cn_mcast_op *mc_op = NULL;
  302. int err = 0;
  303. if (msg->len != sizeof(*mc_op))
  304. return;
  305. /*
  306. * Events are reported with respect to the initial pid
  307. * and user namespaces so ignore requestors from
  308. * other namespaces.
  309. */
  310. if ((current_user_ns() != &init_user_ns) ||
  311. !task_is_in_init_pid_ns(current))
  312. return;
  313. /* Can only change if privileged. */
  314. if (!__netlink_ns_capable(nsp, &init_user_ns, CAP_NET_ADMIN)) {
  315. err = EPERM;
  316. goto out;
  317. }
  318. mc_op = (enum proc_cn_mcast_op *)msg->data;
  319. switch (*mc_op) {
  320. case PROC_CN_MCAST_LISTEN:
  321. atomic_inc(&proc_event_num_listeners);
  322. break;
  323. case PROC_CN_MCAST_IGNORE:
  324. atomic_dec(&proc_event_num_listeners);
  325. break;
  326. default:
  327. err = EINVAL;
  328. break;
  329. }
  330. out:
  331. cn_proc_ack(err, msg->seq, msg->ack);
  332. }
  333. /*
  334. * cn_proc_init - initialization entry point
  335. *
  336. * Adds the connector callback to the connector driver.
  337. */
  338. static int __init cn_proc_init(void)
  339. {
  340. int err = cn_add_callback(&cn_proc_event_id,
  341. "cn_proc",
  342. &cn_proc_mcast_ctl);
  343. if (err) {
  344. pr_warn("cn_proc failed to register\n");
  345. return err;
  346. }
  347. return 0;
  348. }
  349. device_initcall(cn_proc_init);