rmnet_config.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790
  1. /* Copyright (c) 2013-2021, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. *
  12. * RMNET configuration engine
  13. *
  14. */
  15. #include <net/sock.h>
  16. #include <linux/module.h>
  17. #include <linux/netlink.h>
  18. #include <linux/netdevice.h>
  19. #include "rmnet_config.h"
  20. #include "rmnet_handlers.h"
  21. #include "rmnet_vnd.h"
  22. #include "rmnet_private.h"
  23. #include "rmnet_map.h"
  24. #include "rmnet_descriptor.h"
  25. #include "rmnet_genl.h"
  26. #include "rmnet_qmi.h"
  27. #include "qmi_rmnet.h"
  28. #define CONFIG_QTI_QMI_RMNET 1
  29. #define CONFIG_QTI_QMI_DFC 1
  30. #define CONFIG_QTI_QMI_POWER_COLLAPSE 1
  31. #define QMAP_SHS_MASK 0xFF
  32. #define QMAP_SHS_PKT_LIMIT 200
  33. /* Locking scheme -
  34. * The shared resource which needs to be protected is realdev->rx_handler_data.
  35. * For the writer path, this is using rtnl_lock(). The writer paths are
  36. * rmnet_newlink(), rmnet_dellink() and rmnet_force_unassociate_device(). These
  37. * paths are already called with rtnl_lock() acquired in. There is also an
  38. * ASSERT_RTNL() to ensure that we are calling with rtnl acquired. For
  39. * dereference here, we will need to use rtnl_dereference(). Dev list writing
  40. * needs to happen with rtnl_lock() acquired for netdev_master_upper_dev_link().
  41. * For the reader path, the real_dev->rx_handler_data is called in the TX / RX
  42. * path. We only need rcu_read_lock() for these scenarios. In these cases,
  43. * the rcu_read_lock() is held in __dev_queue_xmit() and
  44. * netif_receive_skb_internal(), so readers need to use rcu_dereference_rtnl()
  45. * to get the relevant information. For dev list reading, we again acquire
  46. * rcu_read_lock() in rmnet_dellink() for netdev_master_upper_dev_get_rcu().
  47. * We also use unregister_netdevice_many() to free all rmnet devices in
  48. * rmnet_force_unassociate_device() so we dont lose the rtnl_lock() and free in
  49. * same context.
  50. */
  51. /* Local Definitions and Declarations */
  52. enum {
  53. IFLA_RMNET_DFC_QOS = __IFLA_RMNET_MAX,
  54. IFLA_RMNET_UL_AGG_PARAMS,
  55. __IFLA_RMNET_EXT_MAX,
  56. };
  57. static const struct nla_policy rmnet_policy[__IFLA_RMNET_EXT_MAX] = {
  58. [IFLA_RMNET_MUX_ID] = {
  59. .type = NLA_U16
  60. },
  61. [IFLA_RMNET_FLAGS] = {
  62. .len = sizeof(struct ifla_rmnet_flags)
  63. },
  64. [IFLA_RMNET_DFC_QOS] = {
  65. .len = sizeof(struct tcmsg)
  66. },
  67. [IFLA_RMNET_UL_AGG_PARAMS] = {
  68. .len = sizeof(struct rmnet_egress_agg_params)
  69. },
  70. };
  71. int rmnet_is_real_dev_registered(const struct net_device *real_dev)
  72. {
  73. return rcu_access_pointer(real_dev->rx_handler) == rmnet_rx_handler;
  74. }
  75. EXPORT_SYMBOL(rmnet_is_real_dev_registered);
  76. /* Needs rtnl lock */
  77. static struct rmnet_port*
  78. rmnet_get_port_rtnl(const struct net_device *real_dev)
  79. {
  80. return rtnl_dereference(real_dev->rx_handler_data);
  81. }
  82. static int rmnet_unregister_real_device(struct net_device *real_dev,
  83. struct rmnet_port *port)
  84. {
  85. if (port->nr_rmnet_devs)
  86. return -EINVAL;
  87. netdev_rx_handler_unregister(real_dev);
  88. rmnet_map_cmd_exit(port);
  89. rmnet_map_tx_aggregate_exit(port);
  90. rmnet_descriptor_deinit(port);
  91. kfree(port);
  92. /* release reference on real_dev */
  93. dev_put(real_dev);
  94. netdev_dbg(real_dev, "Removed from rmnet\n");
  95. return 0;
  96. }
  97. static int rmnet_register_real_device(struct net_device *real_dev)
  98. {
  99. struct rmnet_port *port;
  100. int rc, entry;
  101. ASSERT_RTNL();
  102. if (rmnet_is_real_dev_registered(real_dev))
  103. return 0;
  104. port = kzalloc(sizeof(*port), GFP_ATOMIC);
  105. if (!port)
  106. return -ENOMEM;
  107. port->dev = real_dev;
  108. port->phy_shs_cfg.config = RMNET_SHS_NO_DLMKR | RMNET_SHS_NO_PSH |
  109. RMNET_SHS_STMP_ALL;
  110. port->phy_shs_cfg.map_mask = QMAP_SHS_MASK;
  111. port->phy_shs_cfg.max_pkts = QMAP_SHS_PKT_LIMIT;
  112. rc = netdev_rx_handler_register(real_dev, rmnet_rx_handler, port);
  113. if (rc) {
  114. kfree(port);
  115. return -EBUSY;
  116. }
  117. /* hold on to real dev for MAP data */
  118. dev_hold(real_dev);
  119. for (entry = 0; entry < RMNET_MAX_LOGICAL_EP; entry++)
  120. INIT_HLIST_HEAD(&port->muxed_ep[entry]);
  121. rc = rmnet_descriptor_init(port);
  122. if (rc) {
  123. rmnet_descriptor_deinit(port);
  124. return rc;
  125. }
  126. rmnet_map_tx_aggregate_init(port);
  127. rmnet_map_cmd_init(port);
  128. netdev_dbg(real_dev, "registered with rmnet\n");
  129. return 0;
  130. }
  131. static void rmnet_unregister_bridge(struct net_device *dev,
  132. struct rmnet_port *port)
  133. {
  134. struct rmnet_port *bridge_port;
  135. struct net_device *bridge_dev;
  136. if (port->rmnet_mode != RMNET_EPMODE_BRIDGE)
  137. return;
  138. /* bridge slave handling */
  139. if (!port->nr_rmnet_devs) {
  140. bridge_dev = port->bridge_ep;
  141. bridge_port = rmnet_get_port_rtnl(bridge_dev);
  142. bridge_port->bridge_ep = NULL;
  143. bridge_port->rmnet_mode = RMNET_EPMODE_VND;
  144. } else {
  145. bridge_dev = port->bridge_ep;
  146. bridge_port = rmnet_get_port_rtnl(bridge_dev);
  147. rmnet_unregister_real_device(bridge_dev, bridge_port);
  148. }
  149. }
  150. static int rmnet_newlink(struct net *src_net, struct net_device *dev,
  151. struct nlattr *tb[], struct nlattr *data[],
  152. struct netlink_ext_ack *extack)
  153. {
  154. struct net_device *real_dev;
  155. int mode = RMNET_EPMODE_VND;
  156. struct rmnet_endpoint *ep;
  157. struct rmnet_port *port;
  158. u32 data_format;
  159. int err = 0;
  160. u16 mux_id;
  161. real_dev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
  162. if (!real_dev || !dev)
  163. return -ENODEV;
  164. if (!data[IFLA_RMNET_MUX_ID])
  165. return -EINVAL;
  166. ep = kzalloc(sizeof(*ep), GFP_ATOMIC);
  167. if (!ep)
  168. return -ENOMEM;
  169. mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]);
  170. err = rmnet_register_real_device(real_dev);
  171. if (err)
  172. goto err0;
  173. port = rmnet_get_port_rtnl(real_dev);
  174. err = rmnet_vnd_newlink(mux_id, dev, port, real_dev, ep);
  175. if (err)
  176. goto err1;
  177. port->rmnet_mode = mode;
  178. hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]);
  179. if (data[IFLA_RMNET_FLAGS]) {
  180. struct ifla_rmnet_flags *flags;
  181. flags = nla_data(data[IFLA_RMNET_FLAGS]);
  182. data_format = flags->flags & flags->mask;
  183. netdev_dbg(dev, "data format [0x%08X]\n", data_format);
  184. port->data_format = data_format;
  185. }
  186. if (data[IFLA_RMNET_UL_AGG_PARAMS]) {
  187. void *agg_params;
  188. unsigned long irq_flags;
  189. agg_params = nla_data(data[IFLA_RMNET_UL_AGG_PARAMS]);
  190. spin_lock_irqsave(&port->agg_lock, irq_flags);
  191. memcpy(&port->egress_agg_params, agg_params,
  192. sizeof(port->egress_agg_params));
  193. spin_unlock_irqrestore(&port->agg_lock, irq_flags);
  194. }
  195. return 0;
  196. err1:
  197. rmnet_unregister_real_device(real_dev, port);
  198. err0:
  199. kfree(ep);
  200. return err;
  201. }
  202. static void rmnet_dellink(struct net_device *dev, struct list_head *head)
  203. {
  204. struct rmnet_priv *priv = netdev_priv(dev);
  205. struct net_device *real_dev;
  206. struct rmnet_endpoint *ep;
  207. struct rmnet_port *port;
  208. u8 mux_id;
  209. real_dev = priv->real_dev;
  210. if (!real_dev || !rmnet_is_real_dev_registered(real_dev))
  211. return;
  212. port = rmnet_get_port_rtnl(real_dev);
  213. mux_id = rmnet_vnd_get_mux(dev);
  214. ep = rmnet_get_endpoint(port, mux_id);
  215. if (ep) {
  216. hlist_del_init_rcu(&ep->hlnode);
  217. rmnet_unregister_bridge(dev, port);
  218. rmnet_vnd_dellink(mux_id, port, ep);
  219. synchronize_rcu();
  220. kfree(ep);
  221. }
  222. if (!port->nr_rmnet_devs)
  223. qmi_rmnet_qmi_exit(port->qmi_info, port);
  224. unregister_netdevice(dev);
  225. qmi_rmnet_qos_exit_post();
  226. rmnet_unregister_real_device(real_dev, port);
  227. }
  228. static void rmnet_force_unassociate_device(struct net_device *dev)
  229. {
  230. struct net_device *real_dev = dev;
  231. struct hlist_node *tmp_ep;
  232. struct rmnet_endpoint *ep;
  233. struct rmnet_port *port;
  234. unsigned long bkt_ep;
  235. LIST_HEAD(list);
  236. HLIST_HEAD(cleanup_list);
  237. if (!rmnet_is_real_dev_registered(real_dev))
  238. return;
  239. ASSERT_RTNL();
  240. port = rmnet_get_port_rtnl(dev);
  241. qmi_rmnet_qmi_exit(port->qmi_info, port);
  242. rmnet_unregister_bridge(dev, port);
  243. hash_for_each_safe(port->muxed_ep, bkt_ep, tmp_ep, ep, hlnode) {
  244. unregister_netdevice_queue(ep->egress_dev, &list);
  245. rmnet_vnd_dellink(ep->mux_id, port, ep);
  246. hlist_del_init_rcu(&ep->hlnode);
  247. hlist_add_head(&ep->hlnode, &cleanup_list);
  248. }
  249. synchronize_rcu();
  250. hlist_for_each_entry_safe(ep, tmp_ep, &cleanup_list, hlnode) {
  251. hlist_del(&ep->hlnode);
  252. kfree(ep);
  253. }
  254. /* Unregistering devices in context before freeing port.
  255. * If this API becomes non-context their order should switch.
  256. */
  257. unregister_netdevice_many(&list);
  258. qmi_rmnet_qos_exit_post();
  259. rmnet_unregister_real_device(real_dev, port);
  260. }
  261. static int rmnet_config_notify_cb(struct notifier_block *nb,
  262. unsigned long event, void *data)
  263. {
  264. struct net_device *dev = netdev_notifier_info_to_dev(data);
  265. if (!dev)
  266. return NOTIFY_DONE;
  267. switch (event) {
  268. case NETDEV_UNREGISTER:
  269. netdev_dbg(dev, "Kernel unregister\n");
  270. rmnet_force_unassociate_device(dev);
  271. break;
  272. case NETDEV_DOWN:
  273. rmnet_vnd_reset_mac_addr(dev);
  274. break;
  275. default:
  276. break;
  277. }
  278. return NOTIFY_DONE;
  279. }
  280. static struct notifier_block rmnet_dev_notifier __read_mostly = {
  281. .notifier_call = rmnet_config_notify_cb,
  282. };
  283. static int rmnet_rtnl_validate(struct nlattr *tb[], struct nlattr *data[],
  284. struct netlink_ext_ack *extack)
  285. {
  286. struct rmnet_egress_agg_params *agg_params;
  287. u16 mux_id;
  288. if (!data) {
  289. return -EINVAL;
  290. } else {
  291. if (data[IFLA_RMNET_MUX_ID]) {
  292. mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]);
  293. if (mux_id > (RMNET_MAX_LOGICAL_EP - 1))
  294. return -ERANGE;
  295. }
  296. if (data[IFLA_RMNET_UL_AGG_PARAMS]) {
  297. agg_params = nla_data(data[IFLA_RMNET_UL_AGG_PARAMS]);
  298. if (agg_params->agg_time < 1000000)
  299. return -EINVAL;
  300. }
  301. }
  302. return 0;
  303. }
  304. static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[],
  305. struct nlattr *data[],
  306. struct netlink_ext_ack *extack)
  307. {
  308. struct rmnet_priv *priv = netdev_priv(dev);
  309. struct net_device *real_dev;
  310. struct rmnet_endpoint *ep;
  311. struct rmnet_port *port;
  312. u16 mux_id;
  313. real_dev = __dev_get_by_index(dev_net(dev),
  314. nla_get_u32(tb[IFLA_LINK]));
  315. if (!real_dev || !dev || !rmnet_is_real_dev_registered(real_dev))
  316. return -ENODEV;
  317. port = rmnet_get_port_rtnl(real_dev);
  318. if (data[IFLA_RMNET_MUX_ID]) {
  319. mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]);
  320. ep = rmnet_get_endpoint(port, priv->mux_id);
  321. if (!ep)
  322. return -ENODEV;
  323. hlist_del_init_rcu(&ep->hlnode);
  324. hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]);
  325. ep->mux_id = mux_id;
  326. priv->mux_id = mux_id;
  327. }
  328. if (data[IFLA_RMNET_FLAGS]) {
  329. struct ifla_rmnet_flags *flags;
  330. flags = nla_data(data[IFLA_RMNET_FLAGS]);
  331. port->data_format = flags->flags & flags->mask;
  332. }
  333. if (data[IFLA_RMNET_DFC_QOS]) {
  334. struct tcmsg *tcm;
  335. tcm = nla_data(data[IFLA_RMNET_DFC_QOS]);
  336. qmi_rmnet_change_link(dev, port, tcm);
  337. }
  338. if (data[IFLA_RMNET_UL_AGG_PARAMS]) {
  339. struct rmnet_egress_agg_params *agg_params;
  340. agg_params = nla_data(data[IFLA_RMNET_UL_AGG_PARAMS]);
  341. rmnet_map_update_ul_agg_config(port, agg_params->agg_size,
  342. agg_params->agg_count,
  343. agg_params->agg_features,
  344. agg_params->agg_time);
  345. }
  346. return 0;
  347. }
  348. static size_t rmnet_get_size(const struct net_device *dev)
  349. {
  350. return
  351. /* IFLA_RMNET_MUX_ID */
  352. nla_total_size(2) +
  353. /* IFLA_RMNET_FLAGS */
  354. nla_total_size(sizeof(struct ifla_rmnet_flags)) +
  355. /* IFLA_RMNET_DFC_QOS */
  356. nla_total_size(sizeof(struct tcmsg)) +
  357. /* IFLA_RMNET_UL_AGG_PARAMS */
  358. nla_total_size(sizeof(struct rmnet_egress_agg_params));
  359. }
  360. static int rmnet_fill_info(struct sk_buff *skb, const struct net_device *dev)
  361. {
  362. struct rmnet_priv *priv = netdev_priv(dev);
  363. struct net_device *real_dev;
  364. struct ifla_rmnet_flags f;
  365. struct rmnet_port *port = NULL;
  366. real_dev = priv->real_dev;
  367. if (nla_put_u16(skb, IFLA_RMNET_MUX_ID, priv->mux_id))
  368. goto nla_put_failure;
  369. if (rmnet_is_real_dev_registered(real_dev)) {
  370. port = rmnet_get_port_rtnl(real_dev);
  371. f.flags = port->data_format;
  372. } else {
  373. f.flags = 0;
  374. }
  375. f.mask = ~0;
  376. if (nla_put(skb, IFLA_RMNET_FLAGS, sizeof(f), &f))
  377. goto nla_put_failure;
  378. if (port) {
  379. if (nla_put(skb, IFLA_RMNET_UL_AGG_PARAMS,
  380. sizeof(port->egress_agg_params),
  381. &port->egress_agg_params))
  382. goto nla_put_failure;
  383. }
  384. return 0;
  385. nla_put_failure:
  386. return -EMSGSIZE;
  387. }
  388. struct rtnl_link_ops rmnet_link_ops __read_mostly = {
  389. .kind = "rmnet",
  390. .maxtype = __IFLA_RMNET_EXT_MAX,
  391. .priv_size = sizeof(struct rmnet_priv),
  392. .setup = rmnet_vnd_setup,
  393. .validate = rmnet_rtnl_validate,
  394. .newlink = rmnet_newlink,
  395. .dellink = rmnet_dellink,
  396. .get_size = rmnet_get_size,
  397. .changelink = rmnet_changelink,
  398. .policy = rmnet_policy,
  399. .fill_info = rmnet_fill_info,
  400. };
  401. /* Needs either rcu_read_lock() or rtnl lock */
  402. struct rmnet_port *rmnet_get_port(struct net_device *real_dev)
  403. {
  404. if (rmnet_is_real_dev_registered(real_dev))
  405. return rcu_dereference_rtnl(real_dev->rx_handler_data);
  406. else
  407. return NULL;
  408. }
  409. EXPORT_SYMBOL(rmnet_get_port);
  410. struct rmnet_endpoint *rmnet_get_endpoint(struct rmnet_port *port, u8 mux_id)
  411. {
  412. struct rmnet_endpoint *ep;
  413. hlist_for_each_entry_rcu(ep, &port->muxed_ep[mux_id], hlnode) {
  414. if (ep->mux_id == mux_id)
  415. return ep;
  416. }
  417. return NULL;
  418. }
  419. EXPORT_SYMBOL(rmnet_get_endpoint);
  420. int rmnet_add_bridge(struct net_device *rmnet_dev,
  421. struct net_device *slave_dev,
  422. struct netlink_ext_ack *extack)
  423. {
  424. struct rmnet_priv *priv = netdev_priv(rmnet_dev);
  425. struct net_device *real_dev = priv->real_dev;
  426. struct rmnet_port *port, *slave_port;
  427. int err;
  428. port = rmnet_get_port(real_dev);
  429. /* If there is more than one rmnet dev attached, its probably being
  430. * used for muxing. Skip the briding in that case
  431. */
  432. if (port->nr_rmnet_devs > 1)
  433. return -EINVAL;
  434. if (rmnet_is_real_dev_registered(slave_dev))
  435. return -EBUSY;
  436. err = rmnet_register_real_device(slave_dev);
  437. if (err)
  438. return -EBUSY;
  439. slave_port = rmnet_get_port(slave_dev);
  440. slave_port->rmnet_mode = RMNET_EPMODE_BRIDGE;
  441. slave_port->bridge_ep = real_dev;
  442. port->rmnet_mode = RMNET_EPMODE_BRIDGE;
  443. port->bridge_ep = slave_dev;
  444. netdev_dbg(slave_dev, "registered with rmnet as slave\n");
  445. return 0;
  446. }
  447. int rmnet_del_bridge(struct net_device *rmnet_dev,
  448. struct net_device *slave_dev)
  449. {
  450. struct rmnet_priv *priv = netdev_priv(rmnet_dev);
  451. struct net_device *real_dev = priv->real_dev;
  452. struct rmnet_port *port, *slave_port;
  453. port = rmnet_get_port(real_dev);
  454. port->rmnet_mode = RMNET_EPMODE_VND;
  455. port->bridge_ep = NULL;
  456. slave_port = rmnet_get_port(slave_dev);
  457. rmnet_unregister_real_device(slave_dev, slave_port);
  458. netdev_dbg(slave_dev, "removed from rmnet as slave\n");
  459. return 0;
  460. }
  461. void *rmnet_get_qmi_pt(void *port)
  462. {
  463. if (port)
  464. return ((struct rmnet_port *)port)->qmi_info;
  465. return NULL;
  466. }
  467. EXPORT_SYMBOL(rmnet_get_qmi_pt);
  468. void *rmnet_get_qos_pt(struct net_device *dev)
  469. {
  470. struct rmnet_priv *priv;
  471. if (dev) {
  472. priv = netdev_priv(dev);
  473. return rcu_dereference(priv->qos_info);
  474. }
  475. return NULL;
  476. }
  477. EXPORT_SYMBOL(rmnet_get_qos_pt);
  478. void *rmnet_get_rmnet_port(struct net_device *dev)
  479. {
  480. struct rmnet_priv *priv;
  481. if (dev) {
  482. priv = netdev_priv(dev);
  483. return (void *)rmnet_get_port(priv->real_dev);
  484. }
  485. return NULL;
  486. }
  487. EXPORT_SYMBOL(rmnet_get_rmnet_port);
  488. struct net_device *rmnet_get_rmnet_dev(void *port, u8 mux_id)
  489. {
  490. struct rmnet_endpoint *ep;
  491. if (port) {
  492. ep = rmnet_get_endpoint((struct rmnet_port *)port, mux_id);
  493. if (ep)
  494. return ep->egress_dev;
  495. }
  496. return NULL;
  497. }
  498. EXPORT_SYMBOL(rmnet_get_rmnet_dev);
  499. void rmnet_reset_qmi_pt(void *port)
  500. {
  501. if (port)
  502. ((struct rmnet_port *)port)->qmi_info = NULL;
  503. }
  504. EXPORT_SYMBOL(rmnet_reset_qmi_pt);
  505. void rmnet_init_qmi_pt(void *port, void *qmi)
  506. {
  507. if (port)
  508. ((struct rmnet_port *)port)->qmi_info = qmi;
  509. }
  510. EXPORT_SYMBOL(rmnet_init_qmi_pt);
  511. void rmnet_get_packets(void *port, u64 *rx, u64 *tx)
  512. {
  513. struct rmnet_priv *priv;
  514. struct rmnet_pcpu_stats *ps;
  515. unsigned int cpu, start;
  516. struct rmnet_endpoint *ep;
  517. unsigned long bkt;
  518. if (!port || !tx || !rx)
  519. return;
  520. *tx = 0;
  521. *rx = 0;
  522. rcu_read_lock();
  523. hash_for_each(((struct rmnet_port *)port)->muxed_ep, bkt, ep, hlnode) {
  524. priv = netdev_priv(ep->egress_dev);
  525. for_each_possible_cpu(cpu) {
  526. ps = per_cpu_ptr(priv->pcpu_stats, cpu);
  527. do {
  528. start = u64_stats_fetch_begin_irq(&ps->syncp);
  529. *tx += ps->stats.tx_pkts;
  530. *rx += ps->stats.rx_pkts;
  531. } while (u64_stats_fetch_retry_irq(&ps->syncp, start));
  532. }
  533. }
  534. rcu_read_unlock();
  535. }
  536. EXPORT_SYMBOL(rmnet_get_packets);
  537. void rmnet_set_powersave_format(void *port)
  538. {
  539. if (!port)
  540. return;
  541. ((struct rmnet_port *)port)->data_format |= RMNET_INGRESS_FORMAT_PS;
  542. }
  543. EXPORT_SYMBOL(rmnet_set_powersave_format);
  544. void rmnet_clear_powersave_format(void *port)
  545. {
  546. if (!port)
  547. return;
  548. ((struct rmnet_port *)port)->data_format &= ~RMNET_INGRESS_FORMAT_PS;
  549. }
  550. EXPORT_SYMBOL(rmnet_clear_powersave_format);
  551. void rmnet_enable_all_flows(void *port)
  552. {
  553. struct rmnet_endpoint *ep;
  554. unsigned long bkt;
  555. if (unlikely(!port))
  556. return;
  557. rcu_read_lock();
  558. hash_for_each_rcu(((struct rmnet_port *)port)->muxed_ep,
  559. bkt, ep, hlnode) {
  560. qmi_rmnet_enable_all_flows(ep->egress_dev);
  561. }
  562. rcu_read_unlock();
  563. }
  564. EXPORT_SYMBOL(rmnet_enable_all_flows);
  565. bool rmnet_all_flows_enabled(void *port)
  566. {
  567. struct rmnet_endpoint *ep;
  568. unsigned long bkt;
  569. bool ret = true;
  570. if (unlikely(!port))
  571. return true;
  572. rcu_read_lock();
  573. hash_for_each_rcu(((struct rmnet_port *)port)->muxed_ep,
  574. bkt, ep, hlnode) {
  575. if (!qmi_rmnet_all_flows_enabled(ep->egress_dev)) {
  576. ret = false;
  577. goto out;
  578. }
  579. }
  580. out:
  581. rcu_read_unlock();
  582. return ret;
  583. }
  584. EXPORT_SYMBOL(rmnet_all_flows_enabled);
  585. int rmnet_get_powersave_notif(void *port)
  586. {
  587. if (!port)
  588. return 0;
  589. return ((struct rmnet_port *)port)->data_format & RMNET_FORMAT_PS_NOTIF;
  590. }
  591. EXPORT_SYMBOL(rmnet_get_powersave_notif);
  592. struct net_device *rmnet_get_real_dev(void *port)
  593. {
  594. if (port)
  595. return ((struct rmnet_port *)port)->dev;
  596. return NULL;
  597. }
  598. EXPORT_SYMBOL(rmnet_get_real_dev);
  599. int rmnet_get_dlmarker_info(void *port)
  600. {
  601. if (!port)
  602. return 0;
  603. return ((struct rmnet_port *)port)->data_format &
  604. (RMNET_INGRESS_FORMAT_DL_MARKER_V1 |
  605. RMNET_INGRESS_FORMAT_DL_MARKER_V2);
  606. }
  607. EXPORT_SYMBOL(rmnet_get_dlmarker_info);
  608. /* Startup/Shutdown */
  609. static int __init rmnet_init(void)
  610. {
  611. int rc;
  612. rc = register_netdevice_notifier(&rmnet_dev_notifier);
  613. if (rc != 0)
  614. return rc;
  615. rc = rtnl_link_register(&rmnet_link_ops);
  616. if (rc != 0) {
  617. unregister_netdevice_notifier(&rmnet_dev_notifier);
  618. return rc;
  619. }
  620. rmnet_core_genl_init();
  621. try_module_get(THIS_MODULE);
  622. return rc;
  623. }
  624. static void __exit rmnet_exit(void)
  625. {
  626. unregister_netdevice_notifier(&rmnet_dev_notifier);
  627. rtnl_link_unregister(&rmnet_link_ops);
  628. rmnet_core_genl_deinit();
  629. module_put(THIS_MODULE);
  630. }
  631. module_init(rmnet_init)
  632. module_exit(rmnet_exit)
  633. MODULE_LICENSE("GPL v2");