rings.c 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. #include "netlink.h"
  3. #include "common.h"
  4. struct rings_req_info {
  5. struct ethnl_req_info base;
  6. };
  7. struct rings_reply_data {
  8. struct ethnl_reply_data base;
  9. struct ethtool_ringparam ringparam;
  10. struct kernel_ethtool_ringparam kernel_ringparam;
  11. };
  12. #define RINGS_REPDATA(__reply_base) \
  13. container_of(__reply_base, struct rings_reply_data, base)
  14. const struct nla_policy ethnl_rings_get_policy[] = {
  15. [ETHTOOL_A_RINGS_HEADER] =
  16. NLA_POLICY_NESTED(ethnl_header_policy),
  17. };
  18. static int rings_prepare_data(const struct ethnl_req_info *req_base,
  19. struct ethnl_reply_data *reply_base,
  20. struct genl_info *info)
  21. {
  22. struct rings_reply_data *data = RINGS_REPDATA(reply_base);
  23. struct netlink_ext_ack *extack = info ? info->extack : NULL;
  24. struct net_device *dev = reply_base->dev;
  25. int ret;
  26. if (!dev->ethtool_ops->get_ringparam)
  27. return -EOPNOTSUPP;
  28. ret = ethnl_ops_begin(dev);
  29. if (ret < 0)
  30. return ret;
  31. dev->ethtool_ops->get_ringparam(dev, &data->ringparam,
  32. &data->kernel_ringparam, extack);
  33. ethnl_ops_complete(dev);
  34. return 0;
  35. }
  36. static int rings_reply_size(const struct ethnl_req_info *req_base,
  37. const struct ethnl_reply_data *reply_base)
  38. {
  39. return nla_total_size(sizeof(u32)) + /* _RINGS_RX_MAX */
  40. nla_total_size(sizeof(u32)) + /* _RINGS_RX_MINI_MAX */
  41. nla_total_size(sizeof(u32)) + /* _RINGS_RX_JUMBO_MAX */
  42. nla_total_size(sizeof(u32)) + /* _RINGS_TX_MAX */
  43. nla_total_size(sizeof(u32)) + /* _RINGS_RX */
  44. nla_total_size(sizeof(u32)) + /* _RINGS_RX_MINI */
  45. nla_total_size(sizeof(u32)) + /* _RINGS_RX_JUMBO */
  46. nla_total_size(sizeof(u32)) + /* _RINGS_TX */
  47. nla_total_size(sizeof(u32)) + /* _RINGS_RX_BUF_LEN */
  48. nla_total_size(sizeof(u8)) + /* _RINGS_TCP_DATA_SPLIT */
  49. nla_total_size(sizeof(u32) + /* _RINGS_CQE_SIZE */
  50. nla_total_size(sizeof(u8))); /* _RINGS_TX_PUSH */
  51. }
  52. static int rings_fill_reply(struct sk_buff *skb,
  53. const struct ethnl_req_info *req_base,
  54. const struct ethnl_reply_data *reply_base)
  55. {
  56. const struct rings_reply_data *data = RINGS_REPDATA(reply_base);
  57. const struct kernel_ethtool_ringparam *kr = &data->kernel_ringparam;
  58. const struct ethtool_ringparam *ringparam = &data->ringparam;
  59. WARN_ON(kr->tcp_data_split > ETHTOOL_TCP_DATA_SPLIT_ENABLED);
  60. if ((ringparam->rx_max_pending &&
  61. (nla_put_u32(skb, ETHTOOL_A_RINGS_RX_MAX,
  62. ringparam->rx_max_pending) ||
  63. nla_put_u32(skb, ETHTOOL_A_RINGS_RX,
  64. ringparam->rx_pending))) ||
  65. (ringparam->rx_mini_max_pending &&
  66. (nla_put_u32(skb, ETHTOOL_A_RINGS_RX_MINI_MAX,
  67. ringparam->rx_mini_max_pending) ||
  68. nla_put_u32(skb, ETHTOOL_A_RINGS_RX_MINI,
  69. ringparam->rx_mini_pending))) ||
  70. (ringparam->rx_jumbo_max_pending &&
  71. (nla_put_u32(skb, ETHTOOL_A_RINGS_RX_JUMBO_MAX,
  72. ringparam->rx_jumbo_max_pending) ||
  73. nla_put_u32(skb, ETHTOOL_A_RINGS_RX_JUMBO,
  74. ringparam->rx_jumbo_pending))) ||
  75. (ringparam->tx_max_pending &&
  76. (nla_put_u32(skb, ETHTOOL_A_RINGS_TX_MAX,
  77. ringparam->tx_max_pending) ||
  78. nla_put_u32(skb, ETHTOOL_A_RINGS_TX,
  79. ringparam->tx_pending))) ||
  80. (kr->rx_buf_len &&
  81. (nla_put_u32(skb, ETHTOOL_A_RINGS_RX_BUF_LEN, kr->rx_buf_len))) ||
  82. (kr->tcp_data_split &&
  83. (nla_put_u8(skb, ETHTOOL_A_RINGS_TCP_DATA_SPLIT,
  84. kr->tcp_data_split))) ||
  85. (kr->cqe_size &&
  86. (nla_put_u32(skb, ETHTOOL_A_RINGS_CQE_SIZE, kr->cqe_size))) ||
  87. nla_put_u8(skb, ETHTOOL_A_RINGS_TX_PUSH, !!kr->tx_push))
  88. return -EMSGSIZE;
  89. return 0;
  90. }
  91. const struct ethnl_request_ops ethnl_rings_request_ops = {
  92. .request_cmd = ETHTOOL_MSG_RINGS_GET,
  93. .reply_cmd = ETHTOOL_MSG_RINGS_GET_REPLY,
  94. .hdr_attr = ETHTOOL_A_RINGS_HEADER,
  95. .req_info_size = sizeof(struct rings_req_info),
  96. .reply_data_size = sizeof(struct rings_reply_data),
  97. .prepare_data = rings_prepare_data,
  98. .reply_size = rings_reply_size,
  99. .fill_reply = rings_fill_reply,
  100. };
  101. /* RINGS_SET */
  102. const struct nla_policy ethnl_rings_set_policy[] = {
  103. [ETHTOOL_A_RINGS_HEADER] =
  104. NLA_POLICY_NESTED(ethnl_header_policy),
  105. [ETHTOOL_A_RINGS_RX] = { .type = NLA_U32 },
  106. [ETHTOOL_A_RINGS_RX_MINI] = { .type = NLA_U32 },
  107. [ETHTOOL_A_RINGS_RX_JUMBO] = { .type = NLA_U32 },
  108. [ETHTOOL_A_RINGS_TX] = { .type = NLA_U32 },
  109. [ETHTOOL_A_RINGS_RX_BUF_LEN] = NLA_POLICY_MIN(NLA_U32, 1),
  110. [ETHTOOL_A_RINGS_CQE_SIZE] = NLA_POLICY_MIN(NLA_U32, 1),
  111. [ETHTOOL_A_RINGS_TX_PUSH] = NLA_POLICY_MAX(NLA_U8, 1),
  112. };
  113. int ethnl_set_rings(struct sk_buff *skb, struct genl_info *info)
  114. {
  115. struct kernel_ethtool_ringparam kernel_ringparam = {};
  116. struct ethtool_ringparam ringparam = {};
  117. struct ethnl_req_info req_info = {};
  118. struct nlattr **tb = info->attrs;
  119. const struct nlattr *err_attr;
  120. const struct ethtool_ops *ops;
  121. struct net_device *dev;
  122. bool mod = false;
  123. int ret;
  124. ret = ethnl_parse_header_dev_get(&req_info,
  125. tb[ETHTOOL_A_RINGS_HEADER],
  126. genl_info_net(info), info->extack,
  127. true);
  128. if (ret < 0)
  129. return ret;
  130. dev = req_info.dev;
  131. ops = dev->ethtool_ops;
  132. ret = -EOPNOTSUPP;
  133. if (!ops->get_ringparam || !ops->set_ringparam)
  134. goto out_dev;
  135. if (tb[ETHTOOL_A_RINGS_RX_BUF_LEN] &&
  136. !(ops->supported_ring_params & ETHTOOL_RING_USE_RX_BUF_LEN)) {
  137. ret = -EOPNOTSUPP;
  138. NL_SET_ERR_MSG_ATTR(info->extack,
  139. tb[ETHTOOL_A_RINGS_RX_BUF_LEN],
  140. "setting rx buf len not supported");
  141. goto out_dev;
  142. }
  143. if (tb[ETHTOOL_A_RINGS_CQE_SIZE] &&
  144. !(ops->supported_ring_params & ETHTOOL_RING_USE_CQE_SIZE)) {
  145. ret = -EOPNOTSUPP;
  146. NL_SET_ERR_MSG_ATTR(info->extack,
  147. tb[ETHTOOL_A_RINGS_CQE_SIZE],
  148. "setting cqe size not supported");
  149. goto out_dev;
  150. }
  151. if (tb[ETHTOOL_A_RINGS_TX_PUSH] &&
  152. !(ops->supported_ring_params & ETHTOOL_RING_USE_TX_PUSH)) {
  153. ret = -EOPNOTSUPP;
  154. NL_SET_ERR_MSG_ATTR(info->extack,
  155. tb[ETHTOOL_A_RINGS_TX_PUSH],
  156. "setting tx push not supported");
  157. goto out_dev;
  158. }
  159. rtnl_lock();
  160. ret = ethnl_ops_begin(dev);
  161. if (ret < 0)
  162. goto out_rtnl;
  163. ops->get_ringparam(dev, &ringparam, &kernel_ringparam, info->extack);
  164. ethnl_update_u32(&ringparam.rx_pending, tb[ETHTOOL_A_RINGS_RX], &mod);
  165. ethnl_update_u32(&ringparam.rx_mini_pending,
  166. tb[ETHTOOL_A_RINGS_RX_MINI], &mod);
  167. ethnl_update_u32(&ringparam.rx_jumbo_pending,
  168. tb[ETHTOOL_A_RINGS_RX_JUMBO], &mod);
  169. ethnl_update_u32(&ringparam.tx_pending, tb[ETHTOOL_A_RINGS_TX], &mod);
  170. ethnl_update_u32(&kernel_ringparam.rx_buf_len,
  171. tb[ETHTOOL_A_RINGS_RX_BUF_LEN], &mod);
  172. ethnl_update_u32(&kernel_ringparam.cqe_size,
  173. tb[ETHTOOL_A_RINGS_CQE_SIZE], &mod);
  174. ethnl_update_u8(&kernel_ringparam.tx_push,
  175. tb[ETHTOOL_A_RINGS_TX_PUSH], &mod);
  176. ret = 0;
  177. if (!mod)
  178. goto out_ops;
  179. /* ensure new ring parameters are within limits */
  180. if (ringparam.rx_pending > ringparam.rx_max_pending)
  181. err_attr = tb[ETHTOOL_A_RINGS_RX];
  182. else if (ringparam.rx_mini_pending > ringparam.rx_mini_max_pending)
  183. err_attr = tb[ETHTOOL_A_RINGS_RX_MINI];
  184. else if (ringparam.rx_jumbo_pending > ringparam.rx_jumbo_max_pending)
  185. err_attr = tb[ETHTOOL_A_RINGS_RX_JUMBO];
  186. else if (ringparam.tx_pending > ringparam.tx_max_pending)
  187. err_attr = tb[ETHTOOL_A_RINGS_TX];
  188. else
  189. err_attr = NULL;
  190. if (err_attr) {
  191. ret = -EINVAL;
  192. NL_SET_ERR_MSG_ATTR(info->extack, err_attr,
  193. "requested ring size exceeds maximum");
  194. goto out_ops;
  195. }
  196. ret = dev->ethtool_ops->set_ringparam(dev, &ringparam,
  197. &kernel_ringparam, info->extack);
  198. if (ret < 0)
  199. goto out_ops;
  200. ethtool_notify(dev, ETHTOOL_MSG_RINGS_NTF, NULL);
  201. out_ops:
  202. ethnl_ops_complete(dev);
  203. out_rtnl:
  204. rtnl_unlock();
  205. out_dev:
  206. ethnl_parse_header_dev_put(&req_info);
  207. return ret;
  208. }