msm_sysstats.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/kernel.h>
  7. #include <linux/module.h>
  8. #include <linux/msm_sysstats.h>
  9. #include <linux/percpu.h>
  10. #include <linux/slab.h>
  11. #include <linux/swap.h>
  12. #include <linux/pid_namespace.h>
  13. #include <net/genetlink.h>
  14. #include <linux/atomic.h>
  15. #include <linux/sched/cputime.h>
  16. #include <linux/vmalloc.h>
  17. #include <linux/sched.h>
  18. #include <linux/fdtable.h>
  19. #include <linux/dma-buf.h>
  20. #include <linux/dma-resv.h>
  21. #include <linux/qcom_dma_heap.h>
  22. struct tgid_iter {
  23. unsigned int tgid;
  24. struct task_struct *task;
  25. };
  26. static struct genl_family family;
  27. static u64 (*sysstats_kgsl_get_stats)(pid_t pid);
  28. static DEFINE_PER_CPU(__u32, sysstats_seqnum);
  29. #define SYSSTATS_CMD_ATTR_MAX 3
  30. static const struct nla_policy sysstats_cmd_get_policy[SYSSTATS_CMD_ATTR_MAX + 1] = {
  31. [SYSSTATS_TASK_CMD_ATTR_PID] = { .type = NLA_U32 },
  32. [SYSSTATS_TASK_CMD_ATTR_FOREACH] = { .type = NLA_U32 },
  33. [SYSSTATS_TASK_CMD_ATTR_PIDS_OF_NAME] = { .type = NLA_NUL_STRING}};
  34. /*
  35. * The below dummy function is a means to get rid of calling
  36. * callbacks with out any external sync.
  37. */
  38. static u64 sysstats_kgsl_stats(pid_t pid)
  39. {
  40. return 0;
  41. }
  42. void sysstats_register_kgsl_stats_cb(u64 (*cb)(pid_t pid))
  43. {
  44. sysstats_kgsl_get_stats = cb;
  45. }
  46. EXPORT_SYMBOL(sysstats_register_kgsl_stats_cb);
  47. void sysstats_unregister_kgsl_stats_cb(void)
  48. {
  49. sysstats_kgsl_get_stats = sysstats_kgsl_stats;
  50. }
  51. EXPORT_SYMBOL(sysstats_unregister_kgsl_stats_cb);
  52. static int sysstats_pre_doit(const struct genl_ops *ops, struct sk_buff *skb,
  53. struct genl_info *info)
  54. {
  55. const struct nla_policy *policy = NULL;
  56. switch (ops->cmd) {
  57. case SYSSTATS_TASK_CMD_GET:
  58. case SYSSTATS_PIDS_CMD_GET:
  59. policy = sysstats_cmd_get_policy;
  60. break;
  61. case SYSSTATS_MEMINFO_CMD_GET:
  62. break;
  63. default:
  64. return -EINVAL;
  65. }
  66. return nlmsg_validate_deprecated(info->nlhdr, GENL_HDRLEN,
  67. SYSSTATS_CMD_ATTR_MAX, policy,
  68. info->extack);
  69. }
  70. static int send_reply(struct sk_buff *skb, struct genl_info *info)
  71. {
  72. struct genlmsghdr *genlhdr = nlmsg_data(nlmsg_hdr(skb));
  73. void *reply = genlmsg_data(genlhdr);
  74. genlmsg_end(skb, reply);
  75. return genlmsg_reply(skb, info);
  76. }
  77. static int prepare_reply(struct genl_info *info, u8 cmd, struct sk_buff **skbp,
  78. size_t size)
  79. {
  80. struct sk_buff *skb;
  81. void *reply;
  82. skb = genlmsg_new(size, GFP_KERNEL);
  83. if (!skb)
  84. return -ENOMEM;
  85. if (!info) {
  86. int seq = this_cpu_inc_return(sysstats_seqnum) - 1;
  87. reply = genlmsg_put(skb, 0, seq, &family, 0, cmd);
  88. } else
  89. reply = genlmsg_put_reply(skb, info, &family, 0, cmd);
  90. if (reply == NULL) {
  91. nlmsg_free(skb);
  92. return -EINVAL;
  93. }
  94. *skbp = skb;
  95. return 0;
  96. }
  97. static struct task_struct *find_lock_task_mm(struct task_struct *p)
  98. {
  99. struct task_struct *t;
  100. rcu_read_lock();
  101. for_each_thread(p, t) {
  102. task_lock(t);
  103. if (likely(t->mm))
  104. goto found;
  105. task_unlock(t);
  106. }
  107. t = NULL;
  108. found:
  109. rcu_read_unlock();
  110. return t;
  111. }
  112. static struct sighand_struct *sysstats_lock_task_sighand(struct task_struct *tsk,
  113. unsigned long *flags)
  114. {
  115. struct sighand_struct *sighand;
  116. rcu_read_lock();
  117. for (;;) {
  118. sighand = rcu_dereference(tsk->sighand);
  119. if (unlikely(sighand == NULL))
  120. break;
  121. spin_lock_irqsave(&sighand->siglock, *flags);
  122. if (likely(sighand == tsk->sighand))
  123. break;
  124. spin_unlock_irqrestore(&sighand->siglock, *flags);
  125. }
  126. rcu_read_unlock();
  127. return sighand;
  128. }
  129. static bool is_system_dmabufheap(struct dma_buf *dmabuf)
  130. {
  131. if (!strcmp(dmabuf->exp_name, "qcom,system") ||
  132. !strcmp(dmabuf->exp_name, "qcom,system-uncached") ||
  133. !strcmp(dmabuf->exp_name, "system-secure") ||
  134. !strcmp(dmabuf->exp_name, "qcom,secure-pixel") ||
  135. !strcmp(dmabuf->exp_name, "qcom,secure-non-pixel"))
  136. return true;
  137. return false;
  138. }
  139. static int get_dma_info(const void *data, struct file *file, unsigned int n)
  140. {
  141. struct dma_buf *dmabuf;
  142. unsigned long *size = (unsigned long *)data;
  143. if (!qcom_is_dma_buf_file(file))
  144. return 0;
  145. dmabuf = (struct dma_buf *)file->private_data;
  146. if (is_system_dmabufheap(dmabuf))
  147. *size += dmabuf->size;
  148. return 0;
  149. }
  150. static unsigned long get_task_unreclaimable_info(struct task_struct *task)
  151. {
  152. struct task_struct *thread;
  153. struct files_struct *files;
  154. struct files_struct *group_leader_files = NULL;
  155. unsigned long size = 0;
  156. int ret = 0;
  157. for_each_thread(task, thread) {
  158. /* task is already locked don't lock/unlock again. */
  159. if (task != thread)
  160. task_lock(thread);
  161. if (unlikely(!group_leader_files))
  162. group_leader_files = task->group_leader->files;
  163. files = thread->files;
  164. if (files && (group_leader_files != files ||
  165. thread == task->group_leader))
  166. ret = iterate_fd(files, 0, get_dma_info, &size);
  167. if (task != thread)
  168. task_unlock(thread);
  169. if (ret)
  170. break;
  171. }
  172. return size >> PAGE_SHIFT;
  173. }
  174. static unsigned long get_system_unreclaimble_info(void)
  175. {
  176. struct task_struct *task;
  177. unsigned long size = 0;
  178. rcu_read_lock();
  179. for_each_process(task) {
  180. task_lock(task);
  181. size += get_task_unreclaimable_info(task);
  182. task_unlock(task);
  183. }
  184. rcu_read_unlock();
  185. /* Account the kgsl information. */
  186. size += sysstats_kgsl_get_stats(-1) >> PAGE_SHIFT;
  187. return size;
  188. }
  189. static char *nla_strdup_cust(const struct nlattr *nla, gfp_t flags)
  190. {
  191. size_t srclen = nla_len(nla);
  192. char *src = nla_data(nla), *dst;
  193. if (srclen > 0 && src[srclen - 1] == '\0')
  194. srclen--;
  195. dst = kmalloc(srclen + 1, flags);
  196. if (dst != NULL) {
  197. memcpy(dst, src, srclen);
  198. dst[srclen] = '\0';
  199. }
  200. return dst;
  201. }
  202. static int sysstats_task_cmd_attr_pid(struct genl_info *info)
  203. {
  204. struct sysstats_task *stats;
  205. struct sk_buff *rep_skb;
  206. struct nlattr *ret;
  207. struct task_struct *tsk;
  208. struct task_struct *p;
  209. size_t size;
  210. u32 pid;
  211. int rc;
  212. u64 utime, stime;
  213. const struct cred *tcred;
  214. #ifdef CONFIG_CPUSETS
  215. struct cgroup_subsys_state *css;
  216. #endif
  217. unsigned long flags;
  218. struct signal_struct *sig;
  219. size = nla_total_size_64bit(sizeof(struct sysstats_task));
  220. rc = prepare_reply(info, SYSSTATS_TASK_CMD_NEW, &rep_skb, size);
  221. if (rc < 0)
  222. return rc;
  223. rc = -EINVAL;
  224. pid = nla_get_u32(info->attrs[SYSSTATS_TASK_CMD_ATTR_PID]);
  225. ret = nla_reserve_64bit(rep_skb, SYSSTATS_TASK_TYPE_STATS,
  226. sizeof(struct sysstats_task), SYSSTATS_TYPE_NULL);
  227. if (!ret)
  228. goto err;
  229. stats = nla_data(ret);
  230. rcu_read_lock();
  231. tsk = find_task_by_vpid(pid);
  232. if (tsk)
  233. get_task_struct(tsk);
  234. rcu_read_unlock();
  235. if (!tsk) {
  236. rc = -ESRCH;
  237. goto err;
  238. }
  239. memset(stats, 0, sizeof(*stats));
  240. stats->pid = task_pid_nr_ns(tsk, task_active_pid_ns(current));
  241. p = find_lock_task_mm(tsk);
  242. if (p) {
  243. __acquire(p->alloc_lock);
  244. #define K(x) ((x) << (PAGE_SHIFT - 10))
  245. stats->anon_rss = K(get_mm_counter(p->mm, MM_ANONPAGES));
  246. stats->file_rss = K(get_mm_counter(p->mm, MM_FILEPAGES));
  247. stats->shmem_rss = K(get_mm_counter(p->mm, MM_SHMEMPAGES));
  248. stats->swap_rss = K(get_mm_counter(p->mm, MM_SWAPENTS));
  249. stats->unreclaimable = K(get_task_unreclaimable_info(p));
  250. #undef K
  251. task_unlock(p);
  252. }
  253. stats->unreclaimable += sysstats_kgsl_get_stats(stats->pid) >> 10;
  254. task_cputime(tsk, &utime, &stime);
  255. stats->utime = div_u64(utime, NSEC_PER_USEC);
  256. stats->stime = div_u64(stime, NSEC_PER_USEC);
  257. if (sysstats_lock_task_sighand(tsk, &flags)) {
  258. sig = tsk->signal;
  259. stats->cutime = sig->cutime;
  260. stats->cstime = sig->cstime;
  261. unlock_task_sighand(tsk, &flags);
  262. }
  263. rcu_read_lock();
  264. tcred = __task_cred(tsk);
  265. stats->uid = from_kuid_munged(current_user_ns(), tcred->uid);
  266. stats->ppid = pid_alive(tsk) ?
  267. task_tgid_nr_ns(rcu_dereference(tsk->real_parent),
  268. task_active_pid_ns(current)) : 0;
  269. rcu_read_unlock();
  270. strscpy(stats->name, tsk->comm, sizeof(stats->name));
  271. #ifdef CONFIG_CPUSETS
  272. css = task_get_css(tsk, cpuset_cgrp_id);
  273. cgroup_path_ns(css->cgroup, stats->state, sizeof(stats->state),
  274. current->nsproxy->cgroup_ns);
  275. css_put(css);
  276. #endif
  277. put_task_struct(tsk);
  278. return send_reply(rep_skb, info);
  279. err:
  280. nlmsg_free(rep_skb);
  281. return rc;
  282. }
  283. static int sysstats_task_user_cmd(struct sk_buff *skb, struct genl_info *info)
  284. {
  285. if (info->attrs[SYSSTATS_TASK_CMD_ATTR_PID])
  286. return sysstats_task_cmd_attr_pid(info);
  287. else
  288. return -EINVAL;
  289. }
  290. static struct tgid_iter next_tgid(struct pid_namespace *ns, struct tgid_iter iter)
  291. {
  292. struct pid *pid;
  293. if (iter.task)
  294. put_task_struct(iter.task);
  295. rcu_read_lock();
  296. retry:
  297. iter.task = NULL;
  298. pid = idr_get_next(&ns->idr, &iter.tgid);
  299. if (pid) {
  300. iter.tgid = pid_nr_ns(pid, ns);
  301. iter.task = pid_task(pid, PIDTYPE_TGID);
  302. if (!iter.task) {
  303. iter.tgid += 1;
  304. goto retry;
  305. }
  306. get_task_struct(iter.task);
  307. }
  308. rcu_read_unlock();
  309. return iter;
  310. }
  311. static int sysstats_all_pids_of_name(struct sk_buff *skb, struct netlink_callback *cb)
  312. {
  313. struct pid_namespace *ns = task_active_pid_ns(current);
  314. struct tgid_iter iter;
  315. void *reply;
  316. struct nlattr *attr;
  317. struct nlattr *nla;
  318. struct sysstats_pid *stats;
  319. char *comm;
  320. nla = nla_find(nlmsg_attrdata(cb->nlh, GENL_HDRLEN),
  321. nlmsg_attrlen(cb->nlh, GENL_HDRLEN),
  322. SYSSTATS_TASK_CMD_ATTR_PIDS_OF_NAME);
  323. if (!nla)
  324. goto out;
  325. comm = nla_strdup_cust(nla, GFP_KERNEL);
  326. if (!comm)
  327. goto out;
  328. iter.tgid = cb->args[0];
  329. iter.task = NULL;
  330. for (iter = next_tgid(ns, iter); iter.task;
  331. iter.tgid += 1, iter = next_tgid(ns, iter)) {
  332. if (strcmp(iter.task->comm, comm))
  333. continue;
  334. reply = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
  335. cb->nlh->nlmsg_seq, &family, 0, SYSSTATS_PIDS_CMD_GET);
  336. if (reply == NULL) {
  337. put_task_struct(iter.task);
  338. break;
  339. }
  340. attr = nla_reserve(skb, SYSSTATS_PID_TYPE_STATS,
  341. sizeof(struct sysstats_pid));
  342. if (!attr) {
  343. put_task_struct(iter.task);
  344. genlmsg_cancel(skb, reply);
  345. break;
  346. }
  347. stats = nla_data(attr);
  348. memset(stats, 0, sizeof(struct sysstats_pid));
  349. rcu_read_lock();
  350. stats->pid = task_pid_nr_ns(iter.task,
  351. task_active_pid_ns(current));
  352. rcu_read_unlock();
  353. genlmsg_end(skb, reply);
  354. }
  355. cb->args[0] = iter.tgid;
  356. kfree(comm);
  357. out:
  358. return skb->len;
  359. }
  360. static int sysstats_task_foreach(struct sk_buff *skb, struct netlink_callback *cb)
  361. {
  362. struct pid_namespace *ns = task_active_pid_ns(current);
  363. struct tgid_iter iter;
  364. void *reply;
  365. struct nlattr *attr;
  366. struct nlattr *nla;
  367. struct sysstats_task *stats;
  368. struct task_struct *p;
  369. short oom_score;
  370. short oom_score_min;
  371. short oom_score_max;
  372. u32 buf;
  373. nla = nla_find(nlmsg_attrdata(cb->nlh, GENL_HDRLEN),
  374. nlmsg_attrlen(cb->nlh, GENL_HDRLEN),
  375. SYSSTATS_TASK_CMD_ATTR_FOREACH);
  376. if (!nla)
  377. goto out;
  378. buf = nla_get_u32(nla);
  379. oom_score_min = (short) (buf & 0xFFFF);
  380. oom_score_max = (short) ((buf >> 16) & 0xFFFF);
  381. iter.tgid = cb->args[0];
  382. iter.task = NULL;
  383. for (iter = next_tgid(ns, iter); iter.task;
  384. iter.tgid += 1, iter = next_tgid(ns, iter)) {
  385. if (iter.task->flags & PF_KTHREAD)
  386. continue;
  387. oom_score = iter.task->signal->oom_score_adj;
  388. if ((oom_score < oom_score_min)
  389. || (oom_score > oom_score_max))
  390. continue;
  391. reply = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
  392. cb->nlh->nlmsg_seq, &family, 0, SYSSTATS_TASK_CMD_GET);
  393. if (reply == NULL) {
  394. put_task_struct(iter.task);
  395. break;
  396. }
  397. attr = nla_reserve(skb, SYSSTATS_TASK_TYPE_FOREACH,
  398. sizeof(struct sysstats_task));
  399. if (!attr) {
  400. put_task_struct(iter.task);
  401. genlmsg_cancel(skb, reply);
  402. break;
  403. }
  404. stats = nla_data(attr);
  405. memset(stats, 0, sizeof(struct sysstats_task));
  406. rcu_read_lock();
  407. stats->pid = task_pid_nr_ns(iter.task,
  408. task_active_pid_ns(current));
  409. stats->oom_score = iter.task->signal->oom_score_adj;
  410. rcu_read_unlock();
  411. p = find_lock_task_mm(iter.task);
  412. if (p) {
  413. #define K(x) ((x) << (PAGE_SHIFT - 10))
  414. __acquire(p->alloc_lock);
  415. stats->anon_rss =
  416. K(get_mm_counter(p->mm, MM_ANONPAGES));
  417. stats->file_rss =
  418. K(get_mm_counter(p->mm, MM_FILEPAGES));
  419. stats->shmem_rss =
  420. K(get_mm_counter(p->mm, MM_SHMEMPAGES));
  421. stats->swap_rss =
  422. K(get_mm_counter(p->mm, MM_SWAPENTS));
  423. stats->unreclaimable = K(get_task_unreclaimable_info(p));
  424. task_unlock(p);
  425. #undef K
  426. }
  427. genlmsg_end(skb, reply);
  428. }
  429. cb->args[0] = iter.tgid;
  430. out:
  431. return skb->len;
  432. }
  433. #define K(x) ((x) << (PAGE_SHIFT - 10))
  434. #ifndef CONFIG_NUMA
  435. static void sysstats_fill_zoneinfo(struct sysstats_mem *stats)
  436. {
  437. pg_data_t *pgdat;
  438. struct zone *zone;
  439. struct zone *node_zones;
  440. unsigned long zspages = 0;
  441. pgdat = NODE_DATA(0);
  442. node_zones = pgdat->node_zones;
  443. for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
  444. if (!populated_zone(zone))
  445. continue;
  446. zspages += zone_page_state(zone, NR_ZSPAGES);
  447. if (!strcmp(zone->name, "DMA")) {
  448. stats->dma_nr_free =
  449. K(zone_page_state(zone, NR_FREE_PAGES));
  450. stats->dma_nr_active_anon =
  451. K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON));
  452. stats->dma_nr_inactive_anon =
  453. K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON));
  454. stats->dma_nr_active_file =
  455. K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE));
  456. stats->dma_nr_inactive_file =
  457. K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE));
  458. } else if (!strcmp(zone->name, "Normal")) {
  459. stats->normal_nr_free =
  460. K(zone_page_state(zone, NR_FREE_PAGES));
  461. stats->normal_nr_active_anon =
  462. K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON));
  463. stats->normal_nr_inactive_anon =
  464. K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON));
  465. stats->normal_nr_active_file =
  466. K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE));
  467. stats->normal_nr_inactive_file =
  468. K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE));
  469. } else if (!strcmp(zone->name, "HighMem")) {
  470. stats->highmem_nr_free =
  471. K(zone_page_state(zone, NR_FREE_PAGES));
  472. stats->highmem_nr_active_anon =
  473. K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON));
  474. stats->highmem_nr_inactive_anon =
  475. K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON));
  476. stats->highmem_nr_active_file =
  477. K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE));
  478. stats->highmem_nr_inactive_file =
  479. K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE));
  480. } else if (!strcmp(zone->name, "Movable")) {
  481. stats->movable_nr_free =
  482. K(zone_page_state(zone, NR_FREE_PAGES));
  483. stats->movable_nr_active_anon =
  484. K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON));
  485. stats->movable_nr_inactive_anon =
  486. K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON));
  487. stats->movable_nr_active_file =
  488. K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE));
  489. stats->movable_nr_inactive_file =
  490. K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE));
  491. }
  492. }
  493. stats->zram_compressed = K(zspages);
  494. }
  495. #elif
  496. static void sysstats_fill_zoneinfo(struct sysstats_mem *stats)
  497. {
  498. }
  499. #endif
  500. static void sysstats_build(struct sysstats_mem *stats)
  501. {
  502. struct sysinfo i;
  503. si_meminfo(&i);
  504. #ifndef CONFIG_MSM_SYSSTATS_STUB_NONEXPORTED_SYMBOLS
  505. si_swapinfo(&i);
  506. stats->swap_used = K(i.totalswap - i.freeswap);
  507. stats->swap_total = K(i.totalswap);
  508. stats->vmalloc_total = K(vmalloc_nr_pages());
  509. #else
  510. stats->swap_used = 0;
  511. stats->swap_total = 0;
  512. stats->vmalloc_total = 0;
  513. #endif
  514. stats->memtotal = K(i.totalram);
  515. stats->misc_reclaimable =
  516. K(global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE));
  517. stats->unreclaimable = K(get_system_unreclaimble_info());
  518. stats->buffer = K(i.bufferram);
  519. stats->swapcache = K(total_swapcache_pages());
  520. stats->slab_reclaimable =
  521. K(global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B));
  522. stats->slab_unreclaimable =
  523. K(global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B));
  524. stats->free_cma = K(global_zone_page_state(NR_FREE_CMA_PAGES));
  525. stats->file_mapped = K(global_node_page_state(NR_FILE_MAPPED));
  526. stats->kernelstack = global_node_page_state(NR_KERNEL_STACK_KB);
  527. stats->pagetable = K(global_node_page_state(NR_PAGETABLE));
  528. stats->shmem = K(i.sharedram);
  529. sysstats_fill_zoneinfo(stats);
  530. }
  531. #undef K
  532. static int sysstats_meminfo_user_cmd(struct sk_buff *skb, struct genl_info *info)
  533. {
  534. int rc = 0;
  535. struct sk_buff *rep_skb;
  536. struct sysstats_mem *stats;
  537. struct nlattr *na;
  538. size_t size;
  539. size = nla_total_size(sizeof(struct sysstats_mem));
  540. rc = prepare_reply(info, SYSSTATS_MEMINFO_CMD_NEW, &rep_skb,
  541. size);
  542. if (rc < 0)
  543. goto err;
  544. na = nla_reserve(rep_skb, SYSSTATS_MEMINFO_TYPE_STATS,
  545. sizeof(struct sysstats_mem));
  546. if (na == NULL) {
  547. nlmsg_free(rep_skb);
  548. rc = -EMSGSIZE;
  549. goto err;
  550. }
  551. stats = nla_data(na);
  552. memset(stats, 0, sizeof(*stats));
  553. sysstats_build(stats);
  554. rc = send_reply(rep_skb, info);
  555. err:
  556. return rc;
  557. }
  558. static const struct genl_ops sysstats_ops[] = {
  559. {
  560. .cmd = SYSSTATS_TASK_CMD_GET,
  561. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  562. .doit = sysstats_task_user_cmd,
  563. .dumpit = sysstats_task_foreach,
  564. },
  565. {
  566. .cmd = SYSSTATS_MEMINFO_CMD_GET,
  567. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  568. .doit = sysstats_meminfo_user_cmd,
  569. },
  570. {
  571. .cmd = SYSSTATS_PIDS_CMD_GET,
  572. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  573. .dumpit = sysstats_all_pids_of_name,
  574. }
  575. };
  576. static struct genl_family family __ro_after_init = {
  577. .name = SYSSTATS_GENL_NAME,
  578. .version = SYSSTATS_GENL_VERSION,
  579. .maxattr = SYSSTATS_CMD_ATTR_MAX,
  580. .module = THIS_MODULE,
  581. .ops = sysstats_ops,
  582. .n_ops = ARRAY_SIZE(sysstats_ops),
  583. .pre_doit = sysstats_pre_doit,
  584. .resv_start_op = SYSSTATS_PIDS_CMD_GET + 1,
  585. };
  586. static int __init sysstats_init(void)
  587. {
  588. int rc;
  589. rc = genl_register_family(&family);
  590. if (rc)
  591. return rc;
  592. sysstats_register_kgsl_stats_cb(sysstats_kgsl_stats);
  593. pr_info("registered sysstats version %d\n", SYSSTATS_GENL_VERSION);
  594. return 0;
  595. }
  596. static void __exit sysstats_exit(void)
  597. {
  598. genl_unregister_family(&family);
  599. }
  600. module_init(sysstats_init);
  601. module_exit(sysstats_exit);
  602. MODULE_IMPORT_NS(MINIDUMP);
  603. MODULE_LICENSE("GPL v2");