net-procfs.c 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/netdevice.h>
  3. #include <linux/proc_fs.h>
  4. #include <linux/seq_file.h>
  5. #include <net/wext.h>
  6. #include "dev.h"
  7. #define BUCKET_SPACE (32 - NETDEV_HASHBITS - 1)
  8. #define get_bucket(x) ((x) >> BUCKET_SPACE)
  9. #define get_offset(x) ((x) & ((1 << BUCKET_SPACE) - 1))
  10. #define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
  11. static inline struct net_device *dev_from_same_bucket(struct seq_file *seq, loff_t *pos)
  12. {
  13. struct net *net = seq_file_net(seq);
  14. struct net_device *dev;
  15. struct hlist_head *h;
  16. unsigned int count = 0, offset = get_offset(*pos);
  17. h = &net->dev_index_head[get_bucket(*pos)];
  18. hlist_for_each_entry_rcu(dev, h, index_hlist) {
  19. if (++count == offset)
  20. return dev;
  21. }
  22. return NULL;
  23. }
  24. static inline struct net_device *dev_from_bucket(struct seq_file *seq, loff_t *pos)
  25. {
  26. struct net_device *dev;
  27. unsigned int bucket;
  28. do {
  29. dev = dev_from_same_bucket(seq, pos);
  30. if (dev)
  31. return dev;
  32. bucket = get_bucket(*pos) + 1;
  33. *pos = set_bucket_offset(bucket, 1);
  34. } while (bucket < NETDEV_HASHENTRIES);
  35. return NULL;
  36. }
  37. /*
  38. * This is invoked by the /proc filesystem handler to display a device
  39. * in detail.
  40. */
  41. static void *dev_seq_start(struct seq_file *seq, loff_t *pos)
  42. __acquires(RCU)
  43. {
  44. rcu_read_lock();
  45. if (!*pos)
  46. return SEQ_START_TOKEN;
  47. if (get_bucket(*pos) >= NETDEV_HASHENTRIES)
  48. return NULL;
  49. return dev_from_bucket(seq, pos);
  50. }
  51. static void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  52. {
  53. ++*pos;
  54. return dev_from_bucket(seq, pos);
  55. }
  56. static void dev_seq_stop(struct seq_file *seq, void *v)
  57. __releases(RCU)
  58. {
  59. rcu_read_unlock();
  60. }
  61. static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
  62. {
  63. struct rtnl_link_stats64 temp;
  64. const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
  65. seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
  66. "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
  67. dev->name, stats->rx_bytes, stats->rx_packets,
  68. stats->rx_errors,
  69. stats->rx_dropped + stats->rx_missed_errors,
  70. stats->rx_fifo_errors,
  71. stats->rx_length_errors + stats->rx_over_errors +
  72. stats->rx_crc_errors + stats->rx_frame_errors,
  73. stats->rx_compressed, stats->multicast,
  74. stats->tx_bytes, stats->tx_packets,
  75. stats->tx_errors, stats->tx_dropped,
  76. stats->tx_fifo_errors, stats->collisions,
  77. stats->tx_carrier_errors +
  78. stats->tx_aborted_errors +
  79. stats->tx_window_errors +
  80. stats->tx_heartbeat_errors,
  81. stats->tx_compressed);
  82. }
  83. /*
  84. * Called from the PROCfs module. This now uses the new arbitrary sized
  85. * /proc/net interface to create /proc/net/dev
  86. */
  87. static int dev_seq_show(struct seq_file *seq, void *v)
  88. {
  89. if (v == SEQ_START_TOKEN)
  90. seq_puts(seq, "Inter-| Receive "
  91. " | Transmit\n"
  92. " face |bytes packets errs drop fifo frame "
  93. "compressed multicast|bytes packets errs "
  94. "drop fifo colls carrier compressed\n");
  95. else
  96. dev_seq_printf_stats(seq, v);
  97. return 0;
  98. }
  99. static u32 softnet_backlog_len(struct softnet_data *sd)
  100. {
  101. return skb_queue_len_lockless(&sd->input_pkt_queue) +
  102. skb_queue_len_lockless(&sd->process_queue);
  103. }
  104. static struct softnet_data *softnet_get_online(loff_t *pos)
  105. {
  106. struct softnet_data *sd = NULL;
  107. while (*pos < nr_cpu_ids)
  108. if (cpu_online(*pos)) {
  109. sd = &per_cpu(softnet_data, *pos);
  110. break;
  111. } else
  112. ++*pos;
  113. return sd;
  114. }
  115. static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
  116. {
  117. return softnet_get_online(pos);
  118. }
  119. static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  120. {
  121. ++*pos;
  122. return softnet_get_online(pos);
  123. }
  124. static void softnet_seq_stop(struct seq_file *seq, void *v)
  125. {
  126. }
  127. static int softnet_seq_show(struct seq_file *seq, void *v)
  128. {
  129. struct softnet_data *sd = v;
  130. unsigned int flow_limit_count = 0;
  131. #ifdef CONFIG_NET_FLOW_LIMIT
  132. struct sd_flow_limit *fl;
  133. rcu_read_lock();
  134. fl = rcu_dereference(sd->flow_limit);
  135. if (fl)
  136. flow_limit_count = fl->count;
  137. rcu_read_unlock();
  138. #endif
  139. /* the index is the CPU id owing this sd. Since offline CPUs are not
  140. * displayed, it would be othrwise not trivial for the user-space
  141. * mapping the data a specific CPU
  142. */
  143. seq_printf(seq,
  144. "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
  145. sd->processed, sd->dropped, sd->time_squeeze, 0,
  146. 0, 0, 0, 0, /* was fastroute */
  147. 0, /* was cpu_collision */
  148. sd->received_rps, flow_limit_count,
  149. softnet_backlog_len(sd), (int)seq->index);
  150. return 0;
  151. }
  152. static const struct seq_operations dev_seq_ops = {
  153. .start = dev_seq_start,
  154. .next = dev_seq_next,
  155. .stop = dev_seq_stop,
  156. .show = dev_seq_show,
  157. };
  158. static const struct seq_operations softnet_seq_ops = {
  159. .start = softnet_seq_start,
  160. .next = softnet_seq_next,
  161. .stop = softnet_seq_stop,
  162. .show = softnet_seq_show,
  163. };
  164. static void *ptype_get_idx(struct seq_file *seq, loff_t pos)
  165. {
  166. struct list_head *ptype_list = NULL;
  167. struct packet_type *pt = NULL;
  168. struct net_device *dev;
  169. loff_t i = 0;
  170. int t;
  171. for_each_netdev_rcu(seq_file_net(seq), dev) {
  172. ptype_list = &dev->ptype_all;
  173. list_for_each_entry_rcu(pt, ptype_list, list) {
  174. if (i == pos)
  175. return pt;
  176. ++i;
  177. }
  178. }
  179. list_for_each_entry_rcu(pt, &ptype_all, list) {
  180. if (i == pos)
  181. return pt;
  182. ++i;
  183. }
  184. for (t = 0; t < PTYPE_HASH_SIZE; t++) {
  185. list_for_each_entry_rcu(pt, &ptype_base[t], list) {
  186. if (i == pos)
  187. return pt;
  188. ++i;
  189. }
  190. }
  191. return NULL;
  192. }
  193. static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
  194. __acquires(RCU)
  195. {
  196. rcu_read_lock();
  197. return *pos ? ptype_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
  198. }
  199. static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  200. {
  201. struct net_device *dev;
  202. struct packet_type *pt;
  203. struct list_head *nxt;
  204. int hash;
  205. ++*pos;
  206. if (v == SEQ_START_TOKEN)
  207. return ptype_get_idx(seq, 0);
  208. pt = v;
  209. nxt = pt->list.next;
  210. if (pt->dev) {
  211. if (nxt != &pt->dev->ptype_all)
  212. goto found;
  213. dev = pt->dev;
  214. for_each_netdev_continue_rcu(seq_file_net(seq), dev) {
  215. if (!list_empty(&dev->ptype_all)) {
  216. nxt = dev->ptype_all.next;
  217. goto found;
  218. }
  219. }
  220. nxt = ptype_all.next;
  221. goto ptype_all;
  222. }
  223. if (pt->type == htons(ETH_P_ALL)) {
  224. ptype_all:
  225. if (nxt != &ptype_all)
  226. goto found;
  227. hash = 0;
  228. nxt = ptype_base[0].next;
  229. } else
  230. hash = ntohs(pt->type) & PTYPE_HASH_MASK;
  231. while (nxt == &ptype_base[hash]) {
  232. if (++hash >= PTYPE_HASH_SIZE)
  233. return NULL;
  234. nxt = ptype_base[hash].next;
  235. }
  236. found:
  237. return list_entry(nxt, struct packet_type, list);
  238. }
  239. static void ptype_seq_stop(struct seq_file *seq, void *v)
  240. __releases(RCU)
  241. {
  242. rcu_read_unlock();
  243. }
  244. static int ptype_seq_show(struct seq_file *seq, void *v)
  245. {
  246. struct packet_type *pt = v;
  247. if (v == SEQ_START_TOKEN)
  248. seq_puts(seq, "Type Device Function\n");
  249. else if ((!pt->af_packet_net || net_eq(pt->af_packet_net, seq_file_net(seq))) &&
  250. (!pt->dev || net_eq(dev_net(pt->dev), seq_file_net(seq)))) {
  251. if (pt->type == htons(ETH_P_ALL))
  252. seq_puts(seq, "ALL ");
  253. else
  254. seq_printf(seq, "%04x", ntohs(pt->type));
  255. seq_printf(seq, " %-8s %ps\n",
  256. pt->dev ? pt->dev->name : "", pt->func);
  257. }
  258. return 0;
  259. }
  260. static const struct seq_operations ptype_seq_ops = {
  261. .start = ptype_seq_start,
  262. .next = ptype_seq_next,
  263. .stop = ptype_seq_stop,
  264. .show = ptype_seq_show,
  265. };
  266. static int __net_init dev_proc_net_init(struct net *net)
  267. {
  268. int rc = -ENOMEM;
  269. if (!proc_create_net("dev", 0444, net->proc_net, &dev_seq_ops,
  270. sizeof(struct seq_net_private)))
  271. goto out;
  272. if (!proc_create_seq("softnet_stat", 0444, net->proc_net,
  273. &softnet_seq_ops))
  274. goto out_dev;
  275. if (!proc_create_net("ptype", 0444, net->proc_net, &ptype_seq_ops,
  276. sizeof(struct seq_net_private)))
  277. goto out_softnet;
  278. if (wext_proc_init(net))
  279. goto out_ptype;
  280. rc = 0;
  281. out:
  282. return rc;
  283. out_ptype:
  284. remove_proc_entry("ptype", net->proc_net);
  285. out_softnet:
  286. remove_proc_entry("softnet_stat", net->proc_net);
  287. out_dev:
  288. remove_proc_entry("dev", net->proc_net);
  289. goto out;
  290. }
  291. static void __net_exit dev_proc_net_exit(struct net *net)
  292. {
  293. wext_proc_exit(net);
  294. remove_proc_entry("ptype", net->proc_net);
  295. remove_proc_entry("softnet_stat", net->proc_net);
  296. remove_proc_entry("dev", net->proc_net);
  297. }
  298. static struct pernet_operations __net_initdata dev_proc_ops = {
  299. .init = dev_proc_net_init,
  300. .exit = dev_proc_net_exit,
  301. };
  302. static int dev_mc_seq_show(struct seq_file *seq, void *v)
  303. {
  304. struct netdev_hw_addr *ha;
  305. struct net_device *dev = v;
  306. if (v == SEQ_START_TOKEN)
  307. return 0;
  308. netif_addr_lock_bh(dev);
  309. netdev_for_each_mc_addr(ha, dev) {
  310. seq_printf(seq, "%-4d %-15s %-5d %-5d %*phN\n",
  311. dev->ifindex, dev->name,
  312. ha->refcount, ha->global_use,
  313. (int)dev->addr_len, ha->addr);
  314. }
  315. netif_addr_unlock_bh(dev);
  316. return 0;
  317. }
  318. static const struct seq_operations dev_mc_seq_ops = {
  319. .start = dev_seq_start,
  320. .next = dev_seq_next,
  321. .stop = dev_seq_stop,
  322. .show = dev_mc_seq_show,
  323. };
  324. static int __net_init dev_mc_net_init(struct net *net)
  325. {
  326. if (!proc_create_net("dev_mcast", 0, net->proc_net, &dev_mc_seq_ops,
  327. sizeof(struct seq_net_private)))
  328. return -ENOMEM;
  329. return 0;
  330. }
  331. static void __net_exit dev_mc_net_exit(struct net *net)
  332. {
  333. remove_proc_entry("dev_mcast", net->proc_net);
  334. }
  335. static struct pernet_operations __net_initdata dev_mc_net_ops = {
  336. .init = dev_mc_net_init,
  337. .exit = dev_mc_net_exit,
  338. };
  339. int __init dev_proc_init(void)
  340. {
  341. int ret = register_pernet_subsys(&dev_proc_ops);
  342. if (!ret)
  343. return register_pernet_subsys(&dev_mc_net_ops);
  344. return ret;
  345. }