proc.c 9.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /* /proc/net/ support for AF_RXRPC
  3. *
  4. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  5. * Written by David Howells ([email protected])
  6. */
  7. #include <linux/module.h>
  8. #include <net/sock.h>
  9. #include <net/af_rxrpc.h>
  10. #include "ar-internal.h"
  11. static const char *const rxrpc_conn_states[RXRPC_CONN__NR_STATES] = {
  12. [RXRPC_CONN_UNUSED] = "Unused ",
  13. [RXRPC_CONN_CLIENT] = "Client ",
  14. [RXRPC_CONN_SERVICE_PREALLOC] = "SvPrealc",
  15. [RXRPC_CONN_SERVICE_UNSECURED] = "SvUnsec ",
  16. [RXRPC_CONN_SERVICE_CHALLENGING] = "SvChall ",
  17. [RXRPC_CONN_SERVICE] = "SvSecure",
  18. [RXRPC_CONN_REMOTELY_ABORTED] = "RmtAbort",
  19. [RXRPC_CONN_LOCALLY_ABORTED] = "LocAbort",
  20. };
  21. /*
  22. * generate a list of extant and dead calls in /proc/net/rxrpc_calls
  23. */
  24. static void *rxrpc_call_seq_start(struct seq_file *seq, loff_t *_pos)
  25. __acquires(rcu)
  26. {
  27. struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
  28. rcu_read_lock();
  29. return seq_list_start_head_rcu(&rxnet->calls, *_pos);
  30. }
  31. static void *rxrpc_call_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  32. {
  33. struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
  34. return seq_list_next_rcu(v, &rxnet->calls, pos);
  35. }
  36. static void rxrpc_call_seq_stop(struct seq_file *seq, void *v)
  37. __releases(rcu)
  38. {
  39. rcu_read_unlock();
  40. }
  41. static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
  42. {
  43. struct rxrpc_local *local;
  44. struct rxrpc_sock *rx;
  45. struct rxrpc_peer *peer;
  46. struct rxrpc_call *call;
  47. struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
  48. unsigned long timeout = 0;
  49. rxrpc_seq_t tx_hard_ack, rx_hard_ack;
  50. char lbuff[50], rbuff[50];
  51. if (v == &rxnet->calls) {
  52. seq_puts(seq,
  53. "Proto Local "
  54. " Remote "
  55. " SvID ConnID CallID End Use State Abort "
  56. " DebugId TxSeq TW RxSeq RW RxSerial RxTimo\n");
  57. return 0;
  58. }
  59. call = list_entry(v, struct rxrpc_call, link);
  60. rx = rcu_dereference(call->socket);
  61. if (rx) {
  62. local = READ_ONCE(rx->local);
  63. if (local)
  64. sprintf(lbuff, "%pISpc", &local->srx.transport);
  65. else
  66. strcpy(lbuff, "no_local");
  67. } else {
  68. strcpy(lbuff, "no_socket");
  69. }
  70. peer = call->peer;
  71. if (peer)
  72. sprintf(rbuff, "%pISpc", &peer->srx.transport);
  73. else
  74. strcpy(rbuff, "no_connection");
  75. if (call->state != RXRPC_CALL_SERVER_PREALLOC) {
  76. timeout = READ_ONCE(call->expect_rx_by);
  77. timeout -= jiffies;
  78. }
  79. tx_hard_ack = READ_ONCE(call->tx_hard_ack);
  80. rx_hard_ack = READ_ONCE(call->rx_hard_ack);
  81. seq_printf(seq,
  82. "UDP %-47.47s %-47.47s %4x %08x %08x %s %3u"
  83. " %-8.8s %08x %08x %08x %02x %08x %02x %08x %06lx\n",
  84. lbuff,
  85. rbuff,
  86. call->service_id,
  87. call->cid,
  88. call->call_id,
  89. rxrpc_is_service_call(call) ? "Svc" : "Clt",
  90. refcount_read(&call->ref),
  91. rxrpc_call_states[call->state],
  92. call->abort_code,
  93. call->debug_id,
  94. tx_hard_ack, READ_ONCE(call->tx_top) - tx_hard_ack,
  95. rx_hard_ack, READ_ONCE(call->rx_top) - rx_hard_ack,
  96. call->rx_serial,
  97. timeout);
  98. return 0;
  99. }
  100. const struct seq_operations rxrpc_call_seq_ops = {
  101. .start = rxrpc_call_seq_start,
  102. .next = rxrpc_call_seq_next,
  103. .stop = rxrpc_call_seq_stop,
  104. .show = rxrpc_call_seq_show,
  105. };
  106. /*
  107. * generate a list of extant virtual connections in /proc/net/rxrpc_conns
  108. */
  109. static void *rxrpc_connection_seq_start(struct seq_file *seq, loff_t *_pos)
  110. __acquires(rxnet->conn_lock)
  111. {
  112. struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
  113. read_lock(&rxnet->conn_lock);
  114. return seq_list_start_head(&rxnet->conn_proc_list, *_pos);
  115. }
  116. static void *rxrpc_connection_seq_next(struct seq_file *seq, void *v,
  117. loff_t *pos)
  118. {
  119. struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
  120. return seq_list_next(v, &rxnet->conn_proc_list, pos);
  121. }
  122. static void rxrpc_connection_seq_stop(struct seq_file *seq, void *v)
  123. __releases(rxnet->conn_lock)
  124. {
  125. struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
  126. read_unlock(&rxnet->conn_lock);
  127. }
  128. static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
  129. {
  130. struct rxrpc_connection *conn;
  131. struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
  132. char lbuff[50], rbuff[50];
  133. if (v == &rxnet->conn_proc_list) {
  134. seq_puts(seq,
  135. "Proto Local "
  136. " Remote "
  137. " SvID ConnID End Use State Key "
  138. " Serial ISerial CallId0 CallId1 CallId2 CallId3\n"
  139. );
  140. return 0;
  141. }
  142. conn = list_entry(v, struct rxrpc_connection, proc_link);
  143. if (conn->state == RXRPC_CONN_SERVICE_PREALLOC) {
  144. strcpy(lbuff, "no_local");
  145. strcpy(rbuff, "no_connection");
  146. goto print;
  147. }
  148. sprintf(lbuff, "%pISpc", &conn->params.local->srx.transport);
  149. sprintf(rbuff, "%pISpc", &conn->params.peer->srx.transport);
  150. print:
  151. seq_printf(seq,
  152. "UDP %-47.47s %-47.47s %4x %08x %s %3u"
  153. " %s %08x %08x %08x %08x %08x %08x %08x\n",
  154. lbuff,
  155. rbuff,
  156. conn->service_id,
  157. conn->proto.cid,
  158. rxrpc_conn_is_service(conn) ? "Svc" : "Clt",
  159. refcount_read(&conn->ref),
  160. rxrpc_conn_states[conn->state],
  161. key_serial(conn->params.key),
  162. atomic_read(&conn->serial),
  163. conn->hi_serial,
  164. conn->channels[0].call_id,
  165. conn->channels[1].call_id,
  166. conn->channels[2].call_id,
  167. conn->channels[3].call_id);
  168. return 0;
  169. }
  170. const struct seq_operations rxrpc_connection_seq_ops = {
  171. .start = rxrpc_connection_seq_start,
  172. .next = rxrpc_connection_seq_next,
  173. .stop = rxrpc_connection_seq_stop,
  174. .show = rxrpc_connection_seq_show,
  175. };
  176. /*
  177. * generate a list of extant virtual peers in /proc/net/rxrpc/peers
  178. */
  179. static int rxrpc_peer_seq_show(struct seq_file *seq, void *v)
  180. {
  181. struct rxrpc_peer *peer;
  182. time64_t now;
  183. char lbuff[50], rbuff[50];
  184. if (v == SEQ_START_TOKEN) {
  185. seq_puts(seq,
  186. "Proto Local "
  187. " Remote "
  188. " Use CW MTU LastUse RTT RTO\n"
  189. );
  190. return 0;
  191. }
  192. peer = list_entry(v, struct rxrpc_peer, hash_link);
  193. sprintf(lbuff, "%pISpc", &peer->local->srx.transport);
  194. sprintf(rbuff, "%pISpc", &peer->srx.transport);
  195. now = ktime_get_seconds();
  196. seq_printf(seq,
  197. "UDP %-47.47s %-47.47s %3u"
  198. " %3u %5u %6llus %8u %8u\n",
  199. lbuff,
  200. rbuff,
  201. refcount_read(&peer->ref),
  202. peer->cong_cwnd,
  203. peer->mtu,
  204. now - peer->last_tx_at,
  205. peer->srtt_us >> 3,
  206. jiffies_to_usecs(peer->rto_j));
  207. return 0;
  208. }
  209. static void *rxrpc_peer_seq_start(struct seq_file *seq, loff_t *_pos)
  210. __acquires(rcu)
  211. {
  212. struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
  213. unsigned int bucket, n;
  214. unsigned int shift = 32 - HASH_BITS(rxnet->peer_hash);
  215. void *p;
  216. rcu_read_lock();
  217. if (*_pos >= UINT_MAX)
  218. return NULL;
  219. n = *_pos & ((1U << shift) - 1);
  220. bucket = *_pos >> shift;
  221. for (;;) {
  222. if (bucket >= HASH_SIZE(rxnet->peer_hash)) {
  223. *_pos = UINT_MAX;
  224. return NULL;
  225. }
  226. if (n == 0) {
  227. if (bucket == 0)
  228. return SEQ_START_TOKEN;
  229. *_pos += 1;
  230. n++;
  231. }
  232. p = seq_hlist_start_rcu(&rxnet->peer_hash[bucket], n - 1);
  233. if (p)
  234. return p;
  235. bucket++;
  236. n = 1;
  237. *_pos = (bucket << shift) | n;
  238. }
  239. }
  240. static void *rxrpc_peer_seq_next(struct seq_file *seq, void *v, loff_t *_pos)
  241. {
  242. struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
  243. unsigned int bucket, n;
  244. unsigned int shift = 32 - HASH_BITS(rxnet->peer_hash);
  245. void *p;
  246. if (*_pos >= UINT_MAX)
  247. return NULL;
  248. bucket = *_pos >> shift;
  249. p = seq_hlist_next_rcu(v, &rxnet->peer_hash[bucket], _pos);
  250. if (p)
  251. return p;
  252. for (;;) {
  253. bucket++;
  254. n = 1;
  255. *_pos = (bucket << shift) | n;
  256. if (bucket >= HASH_SIZE(rxnet->peer_hash)) {
  257. *_pos = UINT_MAX;
  258. return NULL;
  259. }
  260. if (n == 0) {
  261. *_pos += 1;
  262. n++;
  263. }
  264. p = seq_hlist_start_rcu(&rxnet->peer_hash[bucket], n - 1);
  265. if (p)
  266. return p;
  267. }
  268. }
  269. static void rxrpc_peer_seq_stop(struct seq_file *seq, void *v)
  270. __releases(rcu)
  271. {
  272. rcu_read_unlock();
  273. }
  274. const struct seq_operations rxrpc_peer_seq_ops = {
  275. .start = rxrpc_peer_seq_start,
  276. .next = rxrpc_peer_seq_next,
  277. .stop = rxrpc_peer_seq_stop,
  278. .show = rxrpc_peer_seq_show,
  279. };
  280. /*
  281. * Generate a list of extant virtual local endpoints in /proc/net/rxrpc/locals
  282. */
  283. static int rxrpc_local_seq_show(struct seq_file *seq, void *v)
  284. {
  285. struct rxrpc_local *local;
  286. char lbuff[50];
  287. if (v == SEQ_START_TOKEN) {
  288. seq_puts(seq,
  289. "Proto Local "
  290. " Use Act\n");
  291. return 0;
  292. }
  293. local = hlist_entry(v, struct rxrpc_local, link);
  294. sprintf(lbuff, "%pISpc", &local->srx.transport);
  295. seq_printf(seq,
  296. "UDP %-47.47s %3u %3u\n",
  297. lbuff,
  298. refcount_read(&local->ref),
  299. atomic_read(&local->active_users));
  300. return 0;
  301. }
  302. static void *rxrpc_local_seq_start(struct seq_file *seq, loff_t *_pos)
  303. __acquires(rcu)
  304. {
  305. struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
  306. unsigned int n;
  307. rcu_read_lock();
  308. if (*_pos >= UINT_MAX)
  309. return NULL;
  310. n = *_pos;
  311. if (n == 0)
  312. return SEQ_START_TOKEN;
  313. return seq_hlist_start_rcu(&rxnet->local_endpoints, n - 1);
  314. }
  315. static void *rxrpc_local_seq_next(struct seq_file *seq, void *v, loff_t *_pos)
  316. {
  317. struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
  318. if (*_pos >= UINT_MAX)
  319. return NULL;
  320. return seq_hlist_next_rcu(v, &rxnet->local_endpoints, _pos);
  321. }
  322. static void rxrpc_local_seq_stop(struct seq_file *seq, void *v)
  323. __releases(rcu)
  324. {
  325. rcu_read_unlock();
  326. }
  327. const struct seq_operations rxrpc_local_seq_ops = {
  328. .start = rxrpc_local_seq_start,
  329. .next = rxrpc_local_seq_next,
  330. .stop = rxrpc_local_seq_stop,
  331. .show = rxrpc_local_seq_show,
  332. };