smc_stats.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Shared Memory Communications over RDMA (SMC-R) and RoCE
  4. *
  5. * SMC statistics netlink routines
  6. *
  7. * Copyright IBM Corp. 2021
  8. *
  9. * Author(s): Guvenc Gulce
  10. */
  11. #include <linux/init.h>
  12. #include <linux/mutex.h>
  13. #include <linux/percpu.h>
  14. #include <linux/ctype.h>
  15. #include <linux/smc.h>
  16. #include <net/genetlink.h>
  17. #include <net/sock.h>
  18. #include "smc_netlink.h"
  19. #include "smc_stats.h"
  20. int smc_stats_init(struct net *net)
  21. {
  22. net->smc.fback_rsn = kzalloc(sizeof(*net->smc.fback_rsn), GFP_KERNEL);
  23. if (!net->smc.fback_rsn)
  24. goto err_fback;
  25. net->smc.smc_stats = alloc_percpu(struct smc_stats);
  26. if (!net->smc.smc_stats)
  27. goto err_stats;
  28. mutex_init(&net->smc.mutex_fback_rsn);
  29. return 0;
  30. err_stats:
  31. kfree(net->smc.fback_rsn);
  32. err_fback:
  33. return -ENOMEM;
  34. }
  35. void smc_stats_exit(struct net *net)
  36. {
  37. kfree(net->smc.fback_rsn);
  38. if (net->smc.smc_stats)
  39. free_percpu(net->smc.smc_stats);
  40. }
  41. static int smc_nl_fill_stats_rmb_data(struct sk_buff *skb,
  42. struct smc_stats *stats, int tech,
  43. int type)
  44. {
  45. struct smc_stats_rmbcnt *stats_rmb_cnt;
  46. struct nlattr *attrs;
  47. if (type == SMC_NLA_STATS_T_TX_RMB_STATS)
  48. stats_rmb_cnt = &stats->smc[tech].rmb_tx;
  49. else
  50. stats_rmb_cnt = &stats->smc[tech].rmb_rx;
  51. attrs = nla_nest_start(skb, type);
  52. if (!attrs)
  53. goto errout;
  54. if (nla_put_u64_64bit(skb, SMC_NLA_STATS_RMB_REUSE_CNT,
  55. stats_rmb_cnt->reuse_cnt,
  56. SMC_NLA_STATS_RMB_PAD))
  57. goto errattr;
  58. if (nla_put_u64_64bit(skb, SMC_NLA_STATS_RMB_SIZE_SM_PEER_CNT,
  59. stats_rmb_cnt->buf_size_small_peer_cnt,
  60. SMC_NLA_STATS_RMB_PAD))
  61. goto errattr;
  62. if (nla_put_u64_64bit(skb, SMC_NLA_STATS_RMB_SIZE_SM_CNT,
  63. stats_rmb_cnt->buf_size_small_cnt,
  64. SMC_NLA_STATS_RMB_PAD))
  65. goto errattr;
  66. if (nla_put_u64_64bit(skb, SMC_NLA_STATS_RMB_FULL_PEER_CNT,
  67. stats_rmb_cnt->buf_full_peer_cnt,
  68. SMC_NLA_STATS_RMB_PAD))
  69. goto errattr;
  70. if (nla_put_u64_64bit(skb, SMC_NLA_STATS_RMB_FULL_CNT,
  71. stats_rmb_cnt->buf_full_cnt,
  72. SMC_NLA_STATS_RMB_PAD))
  73. goto errattr;
  74. if (nla_put_u64_64bit(skb, SMC_NLA_STATS_RMB_ALLOC_CNT,
  75. stats_rmb_cnt->alloc_cnt,
  76. SMC_NLA_STATS_RMB_PAD))
  77. goto errattr;
  78. if (nla_put_u64_64bit(skb, SMC_NLA_STATS_RMB_DGRADE_CNT,
  79. stats_rmb_cnt->dgrade_cnt,
  80. SMC_NLA_STATS_RMB_PAD))
  81. goto errattr;
  82. nla_nest_end(skb, attrs);
  83. return 0;
  84. errattr:
  85. nla_nest_cancel(skb, attrs);
  86. errout:
  87. return -EMSGSIZE;
  88. }
  89. static int smc_nl_fill_stats_bufsize_data(struct sk_buff *skb,
  90. struct smc_stats *stats, int tech,
  91. int type)
  92. {
  93. struct smc_stats_memsize *stats_pload;
  94. struct nlattr *attrs;
  95. if (type == SMC_NLA_STATS_T_TXPLOAD_SIZE)
  96. stats_pload = &stats->smc[tech].tx_pd;
  97. else if (type == SMC_NLA_STATS_T_RXPLOAD_SIZE)
  98. stats_pload = &stats->smc[tech].rx_pd;
  99. else if (type == SMC_NLA_STATS_T_TX_RMB_SIZE)
  100. stats_pload = &stats->smc[tech].tx_rmbsize;
  101. else if (type == SMC_NLA_STATS_T_RX_RMB_SIZE)
  102. stats_pload = &stats->smc[tech].rx_rmbsize;
  103. else
  104. goto errout;
  105. attrs = nla_nest_start(skb, type);
  106. if (!attrs)
  107. goto errout;
  108. if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_8K,
  109. stats_pload->buf[SMC_BUF_8K],
  110. SMC_NLA_STATS_PLOAD_PAD))
  111. goto errattr;
  112. if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_16K,
  113. stats_pload->buf[SMC_BUF_16K],
  114. SMC_NLA_STATS_PLOAD_PAD))
  115. goto errattr;
  116. if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_32K,
  117. stats_pload->buf[SMC_BUF_32K],
  118. SMC_NLA_STATS_PLOAD_PAD))
  119. goto errattr;
  120. if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_64K,
  121. stats_pload->buf[SMC_BUF_64K],
  122. SMC_NLA_STATS_PLOAD_PAD))
  123. goto errattr;
  124. if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_128K,
  125. stats_pload->buf[SMC_BUF_128K],
  126. SMC_NLA_STATS_PLOAD_PAD))
  127. goto errattr;
  128. if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_256K,
  129. stats_pload->buf[SMC_BUF_256K],
  130. SMC_NLA_STATS_PLOAD_PAD))
  131. goto errattr;
  132. if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_512K,
  133. stats_pload->buf[SMC_BUF_512K],
  134. SMC_NLA_STATS_PLOAD_PAD))
  135. goto errattr;
  136. if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_1024K,
  137. stats_pload->buf[SMC_BUF_1024K],
  138. SMC_NLA_STATS_PLOAD_PAD))
  139. goto errattr;
  140. if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_G_1024K,
  141. stats_pload->buf[SMC_BUF_G_1024K],
  142. SMC_NLA_STATS_PLOAD_PAD))
  143. goto errattr;
  144. nla_nest_end(skb, attrs);
  145. return 0;
  146. errattr:
  147. nla_nest_cancel(skb, attrs);
  148. errout:
  149. return -EMSGSIZE;
  150. }
  151. static int smc_nl_fill_stats_tech_data(struct sk_buff *skb,
  152. struct smc_stats *stats, int tech)
  153. {
  154. struct smc_stats_tech *smc_tech;
  155. struct nlattr *attrs;
  156. smc_tech = &stats->smc[tech];
  157. if (tech == SMC_TYPE_D)
  158. attrs = nla_nest_start(skb, SMC_NLA_STATS_SMCD_TECH);
  159. else
  160. attrs = nla_nest_start(skb, SMC_NLA_STATS_SMCR_TECH);
  161. if (!attrs)
  162. goto errout;
  163. if (smc_nl_fill_stats_rmb_data(skb, stats, tech,
  164. SMC_NLA_STATS_T_TX_RMB_STATS))
  165. goto errattr;
  166. if (smc_nl_fill_stats_rmb_data(skb, stats, tech,
  167. SMC_NLA_STATS_T_RX_RMB_STATS))
  168. goto errattr;
  169. if (smc_nl_fill_stats_bufsize_data(skb, stats, tech,
  170. SMC_NLA_STATS_T_TXPLOAD_SIZE))
  171. goto errattr;
  172. if (smc_nl_fill_stats_bufsize_data(skb, stats, tech,
  173. SMC_NLA_STATS_T_RXPLOAD_SIZE))
  174. goto errattr;
  175. if (smc_nl_fill_stats_bufsize_data(skb, stats, tech,
  176. SMC_NLA_STATS_T_TX_RMB_SIZE))
  177. goto errattr;
  178. if (smc_nl_fill_stats_bufsize_data(skb, stats, tech,
  179. SMC_NLA_STATS_T_RX_RMB_SIZE))
  180. goto errattr;
  181. if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_CLNT_V1_SUCC,
  182. smc_tech->clnt_v1_succ_cnt,
  183. SMC_NLA_STATS_PAD))
  184. goto errattr;
  185. if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_CLNT_V2_SUCC,
  186. smc_tech->clnt_v2_succ_cnt,
  187. SMC_NLA_STATS_PAD))
  188. goto errattr;
  189. if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_SRV_V1_SUCC,
  190. smc_tech->srv_v1_succ_cnt,
  191. SMC_NLA_STATS_PAD))
  192. goto errattr;
  193. if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_SRV_V2_SUCC,
  194. smc_tech->srv_v2_succ_cnt,
  195. SMC_NLA_STATS_PAD))
  196. goto errattr;
  197. if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_RX_BYTES,
  198. smc_tech->rx_bytes,
  199. SMC_NLA_STATS_PAD))
  200. goto errattr;
  201. if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_TX_BYTES,
  202. smc_tech->tx_bytes,
  203. SMC_NLA_STATS_PAD))
  204. goto errattr;
  205. if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_RX_CNT,
  206. smc_tech->rx_cnt,
  207. SMC_NLA_STATS_PAD))
  208. goto errattr;
  209. if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_TX_CNT,
  210. smc_tech->tx_cnt,
  211. SMC_NLA_STATS_PAD))
  212. goto errattr;
  213. if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_SENDPAGE_CNT,
  214. smc_tech->sendpage_cnt,
  215. SMC_NLA_STATS_PAD))
  216. goto errattr;
  217. if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_CORK_CNT,
  218. smc_tech->cork_cnt,
  219. SMC_NLA_STATS_PAD))
  220. goto errattr;
  221. if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_NDLY_CNT,
  222. smc_tech->ndly_cnt,
  223. SMC_NLA_STATS_PAD))
  224. goto errattr;
  225. if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_SPLICE_CNT,
  226. smc_tech->splice_cnt,
  227. SMC_NLA_STATS_PAD))
  228. goto errattr;
  229. if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_URG_DATA_CNT,
  230. smc_tech->urg_data_cnt,
  231. SMC_NLA_STATS_PAD))
  232. goto errattr;
  233. nla_nest_end(skb, attrs);
  234. return 0;
  235. errattr:
  236. nla_nest_cancel(skb, attrs);
  237. errout:
  238. return -EMSGSIZE;
  239. }
  240. int smc_nl_get_stats(struct sk_buff *skb,
  241. struct netlink_callback *cb)
  242. {
  243. struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
  244. struct net *net = sock_net(skb->sk);
  245. struct smc_stats *stats;
  246. struct nlattr *attrs;
  247. int cpu, i, size;
  248. void *nlh;
  249. u64 *src;
  250. u64 *sum;
  251. if (cb_ctx->pos[0])
  252. goto errmsg;
  253. nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
  254. &smc_gen_nl_family, NLM_F_MULTI,
  255. SMC_NETLINK_GET_STATS);
  256. if (!nlh)
  257. goto errmsg;
  258. attrs = nla_nest_start(skb, SMC_GEN_STATS);
  259. if (!attrs)
  260. goto errnest;
  261. stats = kzalloc(sizeof(*stats), GFP_KERNEL);
  262. if (!stats)
  263. goto erralloc;
  264. size = sizeof(*stats) / sizeof(u64);
  265. for_each_possible_cpu(cpu) {
  266. src = (u64 *)per_cpu_ptr(net->smc.smc_stats, cpu);
  267. sum = (u64 *)stats;
  268. for (i = 0; i < size; i++)
  269. *(sum++) += *(src++);
  270. }
  271. if (smc_nl_fill_stats_tech_data(skb, stats, SMC_TYPE_D))
  272. goto errattr;
  273. if (smc_nl_fill_stats_tech_data(skb, stats, SMC_TYPE_R))
  274. goto errattr;
  275. if (nla_put_u64_64bit(skb, SMC_NLA_STATS_CLNT_HS_ERR_CNT,
  276. stats->clnt_hshake_err_cnt,
  277. SMC_NLA_STATS_PAD))
  278. goto errattr;
  279. if (nla_put_u64_64bit(skb, SMC_NLA_STATS_SRV_HS_ERR_CNT,
  280. stats->srv_hshake_err_cnt,
  281. SMC_NLA_STATS_PAD))
  282. goto errattr;
  283. nla_nest_end(skb, attrs);
  284. genlmsg_end(skb, nlh);
  285. cb_ctx->pos[0] = 1;
  286. kfree(stats);
  287. return skb->len;
  288. errattr:
  289. kfree(stats);
  290. erralloc:
  291. nla_nest_cancel(skb, attrs);
  292. errnest:
  293. genlmsg_cancel(skb, nlh);
  294. errmsg:
  295. return skb->len;
  296. }
  297. static int smc_nl_get_fback_details(struct sk_buff *skb,
  298. struct netlink_callback *cb, int pos,
  299. bool is_srv)
  300. {
  301. struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
  302. struct net *net = sock_net(skb->sk);
  303. int cnt_reported = cb_ctx->pos[2];
  304. struct smc_stats_fback *trgt_arr;
  305. struct nlattr *attrs;
  306. int rc = 0;
  307. void *nlh;
  308. if (is_srv)
  309. trgt_arr = &net->smc.fback_rsn->srv[0];
  310. else
  311. trgt_arr = &net->smc.fback_rsn->clnt[0];
  312. if (!trgt_arr[pos].fback_code)
  313. return -ENODATA;
  314. nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
  315. &smc_gen_nl_family, NLM_F_MULTI,
  316. SMC_NETLINK_GET_FBACK_STATS);
  317. if (!nlh)
  318. goto errmsg;
  319. attrs = nla_nest_start(skb, SMC_GEN_FBACK_STATS);
  320. if (!attrs)
  321. goto errout;
  322. if (nla_put_u8(skb, SMC_NLA_FBACK_STATS_TYPE, is_srv))
  323. goto errattr;
  324. if (!cnt_reported) {
  325. if (nla_put_u64_64bit(skb, SMC_NLA_FBACK_STATS_SRV_CNT,
  326. net->smc.fback_rsn->srv_fback_cnt,
  327. SMC_NLA_FBACK_STATS_PAD))
  328. goto errattr;
  329. if (nla_put_u64_64bit(skb, SMC_NLA_FBACK_STATS_CLNT_CNT,
  330. net->smc.fback_rsn->clnt_fback_cnt,
  331. SMC_NLA_FBACK_STATS_PAD))
  332. goto errattr;
  333. cnt_reported = 1;
  334. }
  335. if (nla_put_u32(skb, SMC_NLA_FBACK_STATS_RSN_CODE,
  336. trgt_arr[pos].fback_code))
  337. goto errattr;
  338. if (nla_put_u16(skb, SMC_NLA_FBACK_STATS_RSN_CNT,
  339. trgt_arr[pos].count))
  340. goto errattr;
  341. cb_ctx->pos[2] = cnt_reported;
  342. nla_nest_end(skb, attrs);
  343. genlmsg_end(skb, nlh);
  344. return rc;
  345. errattr:
  346. nla_nest_cancel(skb, attrs);
  347. errout:
  348. genlmsg_cancel(skb, nlh);
  349. errmsg:
  350. return -EMSGSIZE;
  351. }
  352. int smc_nl_get_fback_stats(struct sk_buff *skb, struct netlink_callback *cb)
  353. {
  354. struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
  355. struct net *net = sock_net(skb->sk);
  356. int rc_srv = 0, rc_clnt = 0, k;
  357. int skip_serv = cb_ctx->pos[1];
  358. int snum = cb_ctx->pos[0];
  359. bool is_srv = true;
  360. mutex_lock(&net->smc.mutex_fback_rsn);
  361. for (k = 0; k < SMC_MAX_FBACK_RSN_CNT; k++) {
  362. if (k < snum)
  363. continue;
  364. if (!skip_serv) {
  365. rc_srv = smc_nl_get_fback_details(skb, cb, k, is_srv);
  366. if (rc_srv && rc_srv != -ENODATA)
  367. break;
  368. } else {
  369. skip_serv = 0;
  370. }
  371. rc_clnt = smc_nl_get_fback_details(skb, cb, k, !is_srv);
  372. if (rc_clnt && rc_clnt != -ENODATA) {
  373. skip_serv = 1;
  374. break;
  375. }
  376. if (rc_clnt == -ENODATA && rc_srv == -ENODATA)
  377. break;
  378. }
  379. mutex_unlock(&net->smc.mutex_fback_rsn);
  380. cb_ctx->pos[1] = skip_serv;
  381. cb_ctx->pos[0] = k;
  382. return skb->len;
  383. }