mhi_netdev.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. // Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
  3. #include <linux/module.h>
  4. #include <linux/kernel.h>
  5. #include <linux/netdevice.h>
  6. #include <linux/skbuff.h>
  7. #include <linux/msm_rmnet.h>
  8. #include <linux/if_arp.h>
  9. #include <linux/dma-mapping.h>
  10. #include <linux/debugfs.h>
  11. #include <linux/ipc_logging.h>
  12. #include <linux/device.h>
  13. #include <linux/errno.h>
  14. #include <linux/of_device.h>
  15. #include <linux/rtnetlink.h>
  16. #include <linux/kthread.h>
  17. #include <linux/mhi.h>
  18. #include <linux/mhi_misc.h>
  19. #define MHI_NETDEV_DRIVER_NAME "mhi_netdev"
  20. #define WATCHDOG_TIMEOUT (30 * HZ)
  21. #define IPC_LOG_PAGES (100)
  22. #define MAX_NETBUF_SIZE (128)
  23. #define MHI_NETDEV_NAPI_POLL_WEIGHT (64)
  24. #ifdef CONFIG_MHI_BUS_DEBUG
  25. #define MHI_NETDEV_LOG_LVL MHI_MSG_LVL_VERBOSE
  26. #else
  27. #define MHI_NETDEV_LOG_LVL MHI_MSG_LVL_ERROR
  28. #endif
  29. #define MSG_VERB(fmt, ...) do { \
  30. if (mhi_netdev->ipc_log && mhi_netdev->msg_lvl <= MHI_MSG_LVL_VERBOSE) \
  31. ipc_log_string(mhi_netdev->ipc_log, "%s[D][%s] " fmt, \
  32. "", __func__, ##__VA_ARGS__); \
  33. } while (0)
  34. #define MSG_LOG(fmt, ...) do { \
  35. if (mhi_netdev->ipc_log && mhi_netdev->msg_lvl <= MHI_MSG_LVL_INFO) \
  36. ipc_log_string(mhi_netdev->ipc_log, "%s[I][%s] " fmt, \
  37. "", __func__, ##__VA_ARGS__); \
  38. } while (0)
  39. #define MSG_ERR(fmt, ...) do { \
  40. pr_err("[E][%s] " fmt, __func__, ##__VA_ARGS__);\
  41. if (mhi_netdev->ipc_log && mhi_netdev->msg_lvl <= MHI_MSG_LVL_ERROR) \
  42. ipc_log_string(mhi_netdev->ipc_log, "%s[E][%s] " fmt, \
  43. "", __func__, ##__VA_ARGS__); \
  44. } while (0)
  45. static const char * const mhi_log_level_str[MHI_MSG_LVL_MAX] = {
  46. [MHI_MSG_LVL_VERBOSE] = "Verbose",
  47. [MHI_MSG_LVL_INFO] = "Info",
  48. [MHI_MSG_LVL_ERROR] = "Error",
  49. [MHI_MSG_LVL_CRITICAL] = "Critical",
  50. [MHI_MSG_LVL_MASK_ALL] = "Mask all",
  51. };
  52. #define MHI_NETDEV_LOG_LEVEL_STR(level) ((level >= MHI_MSG_LVL_MAX || \
  53. !mhi_log_level_str[level]) ? \
  54. "Mask all" : mhi_log_level_str[level])
  55. struct mhi_net_chain {
  56. struct sk_buff *head, *tail; /* chained skb */
  57. };
  58. struct mhi_netdev {
  59. struct mhi_device *mhi_dev;
  60. struct mhi_netdev *rsc_dev; /* rsc linked node */
  61. struct mhi_netdev *rsc_parent;
  62. bool is_rsc_dev;
  63. int wake;
  64. u32 mru;
  65. u32 order;
  66. const char *interface_name;
  67. struct napi_struct *napi;
  68. struct net_device *ndev;
  69. struct list_head *recycle_pool;
  70. int pool_size;
  71. bool chain_skb;
  72. struct mhi_net_chain *chain;
  73. struct task_struct *alloc_task;
  74. wait_queue_head_t alloc_event;
  75. int bg_pool_limit; /* minimum pool size */
  76. int bg_pool_size; /* current size of the pool */
  77. struct list_head *bg_pool;
  78. spinlock_t bg_lock; /* lock to access list */
  79. struct dentry *dentry;
  80. enum MHI_DEBUG_LEVEL msg_lvl;
  81. void *ipc_log;
  82. /* debug stats */
  83. u32 abuffers, kbuffers, rbuffers;
  84. bool napi_scheduled;
  85. };
  86. struct mhi_netdev_priv {
  87. struct mhi_netdev *mhi_netdev;
  88. };
  89. /* Try not to make this structure bigger than 128 bytes, since this take space
  90. * in payload packet.
  91. * Example: If MRU = 16K, effective MRU = 16K - sizeof(mhi_netbuf)
  92. */
  93. struct mhi_netbuf {
  94. struct mhi_buf mhi_buf; /* this must be first element */
  95. bool recycle;
  96. struct page *page;
  97. struct list_head node;
  98. void (*unmap)(struct device *dev, dma_addr_t addr, size_t size,
  99. enum dma_data_direction dir);
  100. };
  101. struct mhi_netdev_driver_data {
  102. u32 mru;
  103. bool chain_skb;
  104. bool is_rsc_chan;
  105. bool has_rsc_child;
  106. const char *interface_name;
  107. };
  108. static struct mhi_netdev *rsc_parent_netdev;
  109. static struct mhi_driver mhi_netdev_driver;
  110. static void mhi_netdev_create_debugfs(struct mhi_netdev *mhi_netdev);
  111. static __be16 mhi_netdev_ip_type_trans(u8 data)
  112. {
  113. __be16 protocol = htons(ETH_P_MAP);
  114. /* determine L3 protocol */
  115. switch (data & 0xf0) {
  116. case 0x40:
  117. /* length must be 5 at a minimum to support 20 byte IP header */
  118. if ((data & 0x0f) > 4)
  119. protocol = htons(ETH_P_IP);
  120. break;
  121. case 0x60:
  122. protocol = htons(ETH_P_IPV6);
  123. break;
  124. default:
  125. /* default is already QMAP */
  126. break;
  127. }
  128. return protocol;
  129. }
  130. static struct mhi_netbuf *mhi_netdev_alloc(struct device *dev,
  131. gfp_t gfp,
  132. unsigned int order)
  133. {
  134. struct page *page;
  135. struct mhi_netbuf *netbuf;
  136. struct mhi_buf *mhi_buf;
  137. void *vaddr;
  138. page = __dev_alloc_pages(gfp | __GFP_NOMEMALLOC, order);
  139. if (!page)
  140. return NULL;
  141. vaddr = page_address(page);
  142. /* we going to use the end of page to store cached data */
  143. netbuf = vaddr + (PAGE_SIZE << order) - sizeof(*netbuf);
  144. netbuf->recycle = false;
  145. netbuf->page = page;
  146. mhi_buf = (struct mhi_buf *)netbuf;
  147. mhi_buf->buf = vaddr;
  148. mhi_buf->len = (void *)netbuf - vaddr;
  149. if (!dev)
  150. return netbuf;
  151. mhi_buf->dma_addr = dma_map_page(dev, page, 0, mhi_buf->len,
  152. DMA_FROM_DEVICE);
  153. if (dma_mapping_error(dev, mhi_buf->dma_addr)) {
  154. __free_pages(netbuf->page, order);
  155. return NULL;
  156. }
  157. return netbuf;
  158. }
  159. static void mhi_netdev_unmap_page(struct device *dev,
  160. dma_addr_t dma_addr,
  161. size_t len,
  162. enum dma_data_direction dir)
  163. {
  164. dma_unmap_page(dev, dma_addr, len, dir);
  165. }
  166. static int mhi_netdev_tmp_alloc(struct mhi_netdev *mhi_netdev,
  167. struct mhi_device *mhi_dev,
  168. int nr_tre)
  169. {
  170. struct device *dev = mhi_dev->dev.parent->parent;
  171. const u32 order = mhi_netdev->order;
  172. int i, ret;
  173. for (i = 0; i < nr_tre; i++) {
  174. struct mhi_buf *mhi_buf;
  175. struct mhi_netbuf *netbuf = mhi_netdev_alloc(dev, GFP_ATOMIC,
  176. order);
  177. if (!netbuf)
  178. return -ENOMEM;
  179. mhi_buf = (struct mhi_buf *)netbuf;
  180. netbuf->unmap = mhi_netdev_unmap_page;
  181. ret = mhi_queue_dma(mhi_dev, DMA_FROM_DEVICE, mhi_buf,
  182. mhi_buf->len, MHI_EOT);
  183. if (unlikely(ret)) {
  184. MSG_ERR("Failed to queue transfer, ret:%d\n", ret);
  185. mhi_netdev_unmap_page(dev, mhi_buf->dma_addr,
  186. mhi_buf->len, DMA_FROM_DEVICE);
  187. __free_pages(netbuf->page, order);
  188. return ret;
  189. }
  190. mhi_netdev->abuffers++;
  191. }
  192. return 0;
  193. }
  194. static int mhi_netdev_queue_bg_pool(struct mhi_netdev *mhi_netdev,
  195. struct mhi_device *mhi_dev,
  196. int nr_tre)
  197. {
  198. struct device *dev = mhi_dev->dev.parent->parent;
  199. int i, ret;
  200. LIST_HEAD(head);
  201. spin_lock_bh(&mhi_netdev->bg_lock);
  202. list_splice_init(mhi_netdev->bg_pool, &head);
  203. spin_unlock_bh(&mhi_netdev->bg_lock);
  204. for (i = 0; i < nr_tre; i++) {
  205. struct mhi_netbuf *net_buf =
  206. list_first_entry_or_null(&head, struct mhi_netbuf, node);
  207. struct mhi_buf *mhi_buf = (struct mhi_buf *)net_buf;
  208. if (!mhi_buf)
  209. break;
  210. mhi_buf->dma_addr = dma_map_page(dev, net_buf->page, 0,
  211. mhi_buf->len, DMA_FROM_DEVICE);
  212. if (dma_mapping_error(dev, mhi_buf->dma_addr))
  213. break;
  214. net_buf->unmap = mhi_netdev_unmap_page;
  215. ret = mhi_queue_dma(mhi_dev, DMA_FROM_DEVICE, mhi_buf,
  216. mhi_buf->len, MHI_EOT);
  217. if (unlikely(ret)) {
  218. MSG_ERR("Failed to queue transfer, ret: %d\n", ret);
  219. mhi_netdev_unmap_page(dev, mhi_buf->dma_addr,
  220. mhi_buf->len, DMA_FROM_DEVICE);
  221. break;
  222. }
  223. list_del(&net_buf->node);
  224. mhi_netdev->kbuffers++;
  225. }
  226. /* add remaining buffers back to main pool */
  227. spin_lock_bh(&mhi_netdev->bg_lock);
  228. list_splice(&head, mhi_netdev->bg_pool);
  229. mhi_netdev->bg_pool_size -= i;
  230. spin_unlock_bh(&mhi_netdev->bg_lock);
  231. /* wake up the bg thread to allocate more buffers */
  232. wake_up_interruptible(&mhi_netdev->alloc_event);
  233. return i;
  234. }
  235. static void mhi_netdev_queue(struct mhi_netdev *mhi_netdev,
  236. struct mhi_device *mhi_dev)
  237. {
  238. struct device *dev = mhi_dev->dev.parent->parent;
  239. struct mhi_netbuf *netbuf, *temp_buf;
  240. struct mhi_buf *mhi_buf;
  241. struct list_head *pool = mhi_netdev->recycle_pool;
  242. int nr_tre = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE);
  243. int i, ret;
  244. const int max_peek = 4;
  245. MSG_VERB("Enter free descriptors: %d\n", nr_tre);
  246. if (!nr_tre)
  247. return;
  248. /* try going thru reclaim pool first */
  249. for (i = 0; i < nr_tre; i++) {
  250. /* peek for the next buffer, we going to peak several times,
  251. * and we going to give up if buffers are not yet free
  252. */
  253. int peek = 0;
  254. netbuf = NULL;
  255. list_for_each_entry(temp_buf, pool, node) {
  256. mhi_buf = (struct mhi_buf *)temp_buf;
  257. /* page == 1 idle, buffer is free to reclaim */
  258. if (page_ref_count(temp_buf->page) == 1) {
  259. netbuf = temp_buf;
  260. break;
  261. }
  262. if (peek++ >= max_peek)
  263. break;
  264. }
  265. /* could not find a free buffer */
  266. if (!netbuf)
  267. break;
  268. /* increment reference count so when network stack is done
  269. * with buffer, the buffer won't be freed
  270. */
  271. page_ref_inc(temp_buf->page);
  272. list_del(&temp_buf->node);
  273. dma_sync_single_for_device(dev, mhi_buf->dma_addr, mhi_buf->len,
  274. DMA_FROM_DEVICE);
  275. ret = mhi_queue_dma(mhi_dev, DMA_FROM_DEVICE, mhi_buf,
  276. mhi_buf->len, MHI_EOT);
  277. if (unlikely(ret)) {
  278. MSG_ERR("Failed to queue buffer, ret: %d\n", ret);
  279. netbuf->unmap(dev, mhi_buf->dma_addr, mhi_buf->len,
  280. DMA_FROM_DEVICE);
  281. page_ref_dec(temp_buf->page);
  282. list_add(&temp_buf->node, pool);
  283. return;
  284. }
  285. mhi_netdev->rbuffers++;
  286. }
  287. /* recycling did not work, buffers are still busy use bg pool */
  288. if (i < nr_tre)
  289. i += mhi_netdev_queue_bg_pool(mhi_netdev, mhi_dev, nr_tre - i);
  290. /* recyling did not work, buffers are still busy allocate temp pkts */
  291. if (i < nr_tre)
  292. mhi_netdev_tmp_alloc(mhi_netdev, mhi_dev, nr_tre - i);
  293. }
  294. /* allocating pool of memory */
  295. static int mhi_netdev_alloc_pool(struct mhi_netdev *mhi_netdev)
  296. {
  297. int i;
  298. struct mhi_netbuf *netbuf, *tmp;
  299. struct mhi_buf *mhi_buf;
  300. const u32 order = mhi_netdev->order;
  301. struct device *dev = mhi_netdev->mhi_dev->dev.parent->parent;
  302. struct list_head *pool = kmalloc(sizeof(*pool), GFP_KERNEL);
  303. if (!pool)
  304. return -ENOMEM;
  305. INIT_LIST_HEAD(pool);
  306. for (i = 0; i < mhi_netdev->pool_size; i++) {
  307. /* allocate paged data */
  308. netbuf = mhi_netdev_alloc(dev, GFP_KERNEL, order);
  309. if (!netbuf)
  310. goto error_alloc_page;
  311. netbuf->unmap = dma_sync_single_for_cpu;
  312. netbuf->recycle = true;
  313. mhi_buf = (struct mhi_buf *)netbuf;
  314. list_add(&netbuf->node, pool);
  315. }
  316. mhi_netdev->recycle_pool = pool;
  317. return 0;
  318. error_alloc_page:
  319. list_for_each_entry_safe(netbuf, tmp, pool, node) {
  320. list_del(&netbuf->node);
  321. mhi_buf = (struct mhi_buf *)netbuf;
  322. dma_unmap_page(dev, mhi_buf->dma_addr, mhi_buf->len,
  323. DMA_FROM_DEVICE);
  324. __free_pages(netbuf->page, order);
  325. }
  326. kfree(pool);
  327. return -ENOMEM;
  328. }
  329. static void mhi_netdev_free_pool(struct mhi_netdev *mhi_netdev)
  330. {
  331. struct device *dev = mhi_netdev->mhi_dev->dev.parent->parent;
  332. struct mhi_netbuf *netbuf, *tmp;
  333. struct mhi_buf *mhi_buf;
  334. list_for_each_entry_safe(netbuf, tmp, mhi_netdev->recycle_pool, node) {
  335. list_del(&netbuf->node);
  336. mhi_buf = (struct mhi_buf *)netbuf;
  337. dma_unmap_page(dev, mhi_buf->dma_addr, mhi_buf->len,
  338. DMA_FROM_DEVICE);
  339. __free_pages(netbuf->page, mhi_netdev->order);
  340. }
  341. kfree(mhi_netdev->recycle_pool);
  342. /* free the bg pool */
  343. list_for_each_entry_safe(netbuf, tmp, mhi_netdev->bg_pool, node) {
  344. list_del(&netbuf->node);
  345. __free_pages(netbuf->page, mhi_netdev->order);
  346. mhi_netdev->bg_pool_size--;
  347. }
  348. kfree(mhi_netdev->bg_pool);
  349. }
  350. static int mhi_netdev_alloc_thread(void *data)
  351. {
  352. struct mhi_netdev *mhi_netdev = data;
  353. struct mhi_netbuf *netbuf, *tmp_buf;
  354. struct mhi_buf *mhi_buf;
  355. const u32 order = mhi_netdev->order;
  356. LIST_HEAD(head);
  357. while (!kthread_should_stop()) {
  358. while (mhi_netdev->bg_pool_size <= mhi_netdev->bg_pool_limit) {
  359. int buffers = 0, i;
  360. /* do a bulk allocation */
  361. for (i = 0; i < NAPI_POLL_WEIGHT; i++) {
  362. if (kthread_should_stop())
  363. goto exit_alloc;
  364. netbuf = mhi_netdev_alloc(NULL, GFP_KERNEL,
  365. order);
  366. if (!netbuf)
  367. continue;
  368. mhi_buf = (struct mhi_buf *)netbuf;
  369. list_add(&netbuf->node, &head);
  370. buffers++;
  371. }
  372. /* add the list to main pool */
  373. spin_lock_bh(&mhi_netdev->bg_lock);
  374. list_splice_init(&head, mhi_netdev->bg_pool);
  375. mhi_netdev->bg_pool_size += buffers;
  376. spin_unlock_bh(&mhi_netdev->bg_lock);
  377. }
  378. /* replenish the ring */
  379. napi_schedule(mhi_netdev->napi);
  380. mhi_netdev->napi_scheduled = true;
  381. /* wait for buffers to run low or thread to stop */
  382. wait_event_interruptible(mhi_netdev->alloc_event,
  383. kthread_should_stop() ||
  384. mhi_netdev->bg_pool_size <= mhi_netdev->bg_pool_limit);
  385. }
  386. exit_alloc:
  387. list_for_each_entry_safe(netbuf, tmp_buf, &head, node) {
  388. list_del(&netbuf->node);
  389. __free_pages(netbuf->page, order);
  390. }
  391. return 0;
  392. }
  393. static int mhi_netdev_poll(struct napi_struct *napi, int budget)
  394. {
  395. struct net_device *dev = napi->dev;
  396. struct mhi_netdev_priv *mhi_netdev_priv = netdev_priv(dev);
  397. struct mhi_netdev *mhi_netdev = mhi_netdev_priv->mhi_netdev;
  398. struct mhi_device *mhi_dev = mhi_netdev->mhi_dev;
  399. struct mhi_netdev *rsc_dev = mhi_netdev->rsc_dev;
  400. struct mhi_net_chain *chain = mhi_netdev->chain;
  401. int rx_work = 0;
  402. MSG_VERB("Enter: %d\n", budget);
  403. rx_work = mhi_poll(mhi_dev, budget);
  404. /* chained skb, push it to stack */
  405. if (chain && chain->head) {
  406. netif_receive_skb(chain->head);
  407. chain->head = NULL;
  408. }
  409. if (rx_work < 0) {
  410. MSG_ERR("Error polling ret: %d\n", rx_work);
  411. napi_complete(napi);
  412. mhi_netdev->napi_scheduled = false;
  413. return 0;
  414. }
  415. /* queue new buffers */
  416. mhi_netdev_queue(mhi_netdev, mhi_dev);
  417. if (rsc_dev)
  418. mhi_netdev_queue(mhi_netdev, rsc_dev->mhi_dev);
  419. /* complete work if # of packet processed less than allocated budget */
  420. if (rx_work < budget) {
  421. napi_complete(napi);
  422. mhi_netdev->napi_scheduled = false;
  423. }
  424. MSG_VERB("Polled: %d\n", rx_work);
  425. return rx_work;
  426. }
  427. static int mhi_netdev_open(struct net_device *dev)
  428. {
  429. struct mhi_netdev_priv *mhi_netdev_priv = netdev_priv(dev);
  430. struct mhi_netdev *mhi_netdev = mhi_netdev_priv->mhi_netdev;
  431. struct mhi_device *mhi_dev = mhi_netdev->mhi_dev;
  432. MSG_LOG("Opened netdev interface\n");
  433. /* tx queue may not necessarily be stopped already
  434. * so stop the queue if tx path is not enabled
  435. */
  436. if (!mhi_dev->ul_chan)
  437. netif_stop_queue(dev);
  438. else
  439. netif_start_queue(dev);
  440. return 0;
  441. }
  442. static int mhi_netdev_change_mtu(struct net_device *dev, int new_mtu)
  443. {
  444. if (new_mtu < 0 || MHI_MAX_MTU < new_mtu)
  445. return -EINVAL;
  446. dev->mtu = new_mtu;
  447. return 0;
  448. }
  449. static netdev_tx_t mhi_netdev_xmit(struct sk_buff *skb, struct net_device *dev)
  450. {
  451. struct mhi_netdev_priv *mhi_netdev_priv = netdev_priv(dev);
  452. struct mhi_netdev *mhi_netdev = mhi_netdev_priv->mhi_netdev;
  453. struct mhi_device *mhi_dev = mhi_netdev->mhi_dev;
  454. netdev_tx_t res = NETDEV_TX_OK;
  455. int ret;
  456. MSG_VERB("Entered\n");
  457. ret = mhi_queue_skb(mhi_dev, DMA_TO_DEVICE, skb, skb->len,
  458. MHI_EOT);
  459. if (ret) {
  460. MSG_VERB("Failed to queue with reason: %d\n", res);
  461. netif_stop_queue(dev);
  462. res = NETDEV_TX_BUSY;
  463. }
  464. MSG_VERB("Exited\n");
  465. return res;
  466. }
  467. static int mhi_netdev_ioctl_extended(struct net_device *dev, struct ifreq *ifr)
  468. {
  469. struct rmnet_ioctl_extended_s ext_cmd;
  470. int rc = 0;
  471. struct mhi_netdev_priv *mhi_netdev_priv = netdev_priv(dev);
  472. struct mhi_netdev *mhi_netdev = mhi_netdev_priv->mhi_netdev;
  473. struct mhi_device *mhi_dev = mhi_netdev->mhi_dev;
  474. rc = copy_from_user(&ext_cmd, ifr->ifr_ifru.ifru_data,
  475. sizeof(struct rmnet_ioctl_extended_s));
  476. if (rc)
  477. return rc;
  478. switch (ext_cmd.extended_ioctl) {
  479. case RMNET_IOCTL_GET_SUPPORTED_FEATURES:
  480. ext_cmd.u.data = 0;
  481. break;
  482. case RMNET_IOCTL_GET_DRIVER_NAME:
  483. strscpy(ext_cmd.u.if_name, mhi_netdev->interface_name,
  484. sizeof(ext_cmd.u.if_name));
  485. break;
  486. case RMNET_IOCTL_SET_SLEEP_STATE:
  487. if (ext_cmd.u.data && mhi_netdev->wake) {
  488. /* Request to enable LPM */
  489. MSG_VERB("Enable MHI LPM\n");
  490. mhi_netdev->wake--;
  491. mhi_device_put(mhi_dev);
  492. } else if (!ext_cmd.u.data && !mhi_netdev->wake) {
  493. /* Request to disable LPM */
  494. MSG_VERB("Disable MHI LPM\n");
  495. mhi_netdev->wake++;
  496. mhi_device_get(mhi_dev);
  497. }
  498. break;
  499. default:
  500. rc = -EINVAL;
  501. break;
  502. }
  503. rc = copy_to_user(ifr->ifr_ifru.ifru_data, &ext_cmd,
  504. sizeof(struct rmnet_ioctl_extended_s));
  505. return rc;
  506. }
  507. static int mhi_netdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  508. {
  509. int rc = 0;
  510. struct rmnet_ioctl_data_s ioctl_data;
  511. switch (cmd) {
  512. case RMNET_IOCTL_SET_LLP_IP: /* set RAWIP protocol */
  513. break;
  514. case RMNET_IOCTL_GET_LLP: /* get link protocol state */
  515. ioctl_data.u.operation_mode = RMNET_MODE_LLP_IP;
  516. if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data,
  517. sizeof(struct rmnet_ioctl_data_s)))
  518. rc = -EFAULT;
  519. break;
  520. case RMNET_IOCTL_GET_OPMODE: /* get operation mode */
  521. ioctl_data.u.operation_mode = RMNET_MODE_LLP_IP;
  522. if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data,
  523. sizeof(struct rmnet_ioctl_data_s)))
  524. rc = -EFAULT;
  525. break;
  526. case RMNET_IOCTL_SET_QOS_ENABLE:
  527. rc = -EINVAL;
  528. break;
  529. case RMNET_IOCTL_SET_QOS_DISABLE:
  530. rc = 0;
  531. break;
  532. case RMNET_IOCTL_OPEN:
  533. case RMNET_IOCTL_CLOSE:
  534. /* we just ignore them and return success */
  535. rc = 0;
  536. break;
  537. case RMNET_IOCTL_EXTENDED:
  538. rc = mhi_netdev_ioctl_extended(dev, ifr);
  539. break;
  540. default:
  541. /* don't fail any IOCTL right now */
  542. rc = 0;
  543. break;
  544. }
  545. return rc;
  546. }
  547. static const struct net_device_ops mhi_netdev_ops_ip = {
  548. .ndo_open = mhi_netdev_open,
  549. .ndo_start_xmit = mhi_netdev_xmit,
  550. .ndo_do_ioctl = mhi_netdev_ioctl,
  551. .ndo_change_mtu = mhi_netdev_change_mtu,
  552. .ndo_set_mac_address = 0,
  553. .ndo_validate_addr = 0,
  554. };
  555. static void mhi_netdev_setup(struct net_device *dev)
  556. {
  557. dev->netdev_ops = &mhi_netdev_ops_ip;
  558. ether_setup(dev);
  559. /* set this after calling ether_setup */
  560. dev->header_ops = 0; /* No header */
  561. dev->type = ARPHRD_RAWIP;
  562. dev->hard_header_len = 0;
  563. dev->addr_len = 0;
  564. dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
  565. dev->watchdog_timeo = WATCHDOG_TIMEOUT;
  566. }
  567. /* enable mhi_netdev netdev, call only after grabbing mhi_netdev.mutex */
  568. static int mhi_netdev_enable_iface(struct mhi_netdev *mhi_netdev)
  569. {
  570. int ret = 0;
  571. char ifname[IFNAMSIZ];
  572. struct mhi_device *mhi_dev = mhi_netdev->mhi_dev;
  573. struct mhi_netdev_priv *mhi_netdev_priv;
  574. snprintf(ifname, sizeof(ifname), "%s%%d", mhi_netdev->interface_name);
  575. rtnl_lock();
  576. mhi_netdev->ndev = alloc_netdev(sizeof(*mhi_netdev_priv),
  577. ifname, NET_NAME_PREDICTABLE,
  578. mhi_netdev_setup);
  579. if (!mhi_netdev->ndev) {
  580. rtnl_unlock();
  581. return -ENOMEM;
  582. }
  583. mhi_netdev->ndev->mtu = mhi_dev->mhi_cntrl->buffer_len;
  584. SET_NETDEV_DEV(mhi_netdev->ndev, &mhi_dev->dev);
  585. mhi_netdev_priv = netdev_priv(mhi_netdev->ndev);
  586. mhi_netdev_priv->mhi_netdev = mhi_netdev;
  587. rtnl_unlock();
  588. mhi_netdev->napi = devm_kzalloc(&mhi_dev->dev,
  589. sizeof(*mhi_netdev->napi), GFP_KERNEL);
  590. if (!mhi_netdev->napi) {
  591. ret = -ENOMEM;
  592. goto napi_alloc_fail;
  593. }
  594. netif_napi_add(mhi_netdev->ndev, mhi_netdev->napi,
  595. mhi_netdev_poll, MHI_NETDEV_NAPI_POLL_WEIGHT);
  596. ret = register_netdev(mhi_netdev->ndev);
  597. if (ret) {
  598. MSG_ERR("Network device registration failed\n");
  599. goto net_dev_reg_fail;
  600. }
  601. napi_enable(mhi_netdev->napi);
  602. MSG_LOG("Exited\n");
  603. return 0;
  604. net_dev_reg_fail:
  605. netif_napi_del(mhi_netdev->napi);
  606. napi_alloc_fail:
  607. free_netdev(mhi_netdev->ndev);
  608. mhi_netdev->ndev = NULL;
  609. return ret;
  610. }
  611. static void mhi_netdev_xfer_ul_cb(struct mhi_device *mhi_dev,
  612. struct mhi_result *mhi_result)
  613. {
  614. struct mhi_netdev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev);
  615. struct sk_buff *skb = mhi_result->buf_addr;
  616. struct net_device *ndev = mhi_netdev->ndev;
  617. ndev->stats.tx_packets++;
  618. ndev->stats.tx_bytes += skb->len;
  619. dev_kfree_skb(skb);
  620. if (netif_queue_stopped(ndev))
  621. netif_wake_queue(ndev);
  622. }
  623. static void mhi_netdev_push_skb(struct mhi_netdev *mhi_netdev,
  624. struct mhi_buf *mhi_buf,
  625. struct mhi_result *mhi_result)
  626. {
  627. struct sk_buff *skb;
  628. struct mhi_netbuf *netbuf;
  629. netbuf = (struct mhi_netbuf *)mhi_buf;
  630. skb = alloc_skb(0, GFP_ATOMIC);
  631. if (!skb) {
  632. __free_pages(netbuf->page, mhi_netdev->order);
  633. return;
  634. }
  635. skb_add_rx_frag(skb, 0, netbuf->page, 0,
  636. mhi_result->bytes_xferd, mhi_netdev->mru);
  637. skb->dev = mhi_netdev->ndev;
  638. skb->protocol = mhi_netdev_ip_type_trans(*(u8 *)mhi_buf->buf);
  639. netif_receive_skb(skb);
  640. }
  641. static void mhi_netdev_xfer_dl_cb(struct mhi_device *mhi_dev,
  642. struct mhi_result *mhi_result)
  643. {
  644. struct mhi_netdev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev);
  645. struct mhi_netbuf *netbuf = mhi_result->buf_addr;
  646. struct mhi_buf *mhi_buf = &netbuf->mhi_buf;
  647. struct sk_buff *skb;
  648. struct net_device *ndev = mhi_netdev->ndev;
  649. struct device *dev = mhi_dev->dev.parent->parent;
  650. struct mhi_net_chain *chain = mhi_netdev->chain;
  651. netbuf->unmap(dev, mhi_buf->dma_addr, mhi_buf->len, DMA_FROM_DEVICE);
  652. if (likely(netbuf->recycle))
  653. list_add_tail(&netbuf->node, mhi_netdev->recycle_pool);
  654. /* modem is down, drop the buffer */
  655. if (mhi_result->transaction_status == -ENOTCONN) {
  656. __free_pages(netbuf->page, mhi_netdev->order);
  657. return;
  658. }
  659. ndev->stats.rx_packets++;
  660. ndev->stats.rx_bytes += mhi_result->bytes_xferd;
  661. if (unlikely(!chain)) {
  662. mhi_netdev_push_skb(mhi_netdev, mhi_buf, mhi_result);
  663. return;
  664. }
  665. /* we support chaining */
  666. skb = alloc_skb(0, GFP_ATOMIC);
  667. if (likely(skb)) {
  668. skb_add_rx_frag(skb, 0, netbuf->page, 0,
  669. mhi_result->bytes_xferd, mhi_netdev->mru);
  670. /* this is first on list */
  671. if (!chain->head) {
  672. skb->dev = ndev;
  673. skb->protocol =
  674. mhi_netdev_ip_type_trans(*(u8 *)mhi_buf->buf);
  675. chain->head = skb;
  676. } else {
  677. skb_shinfo(chain->tail)->frag_list = skb;
  678. }
  679. chain->tail = skb;
  680. } else {
  681. __free_pages(netbuf->page, mhi_netdev->order);
  682. }
  683. }
  684. static void mhi_netdev_status_cb(struct mhi_device *mhi_dev,
  685. enum mhi_callback mhi_cb)
  686. {
  687. struct mhi_netdev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev);
  688. if (mhi_cb != MHI_CB_PENDING_DATA)
  689. return;
  690. napi_schedule(mhi_netdev->napi);
  691. mhi_netdev->napi_scheduled = true;
  692. }
  693. #ifdef CONFIG_DEBUG_FS
  694. struct dentry *dentry;
  695. static int mhi_netdev_debugfs_stats_show(struct seq_file *m, void *d)
  696. {
  697. struct mhi_netdev *mhi_netdev = m->private;
  698. seq_printf(m,
  699. "mru:%u order:%u pool_size:%d, bg_pool_size:%d bg_pool_limit:%d abuf:%u kbuf:%u rbuf:%u\n",
  700. mhi_netdev->mru, mhi_netdev->order, mhi_netdev->pool_size,
  701. mhi_netdev->bg_pool_size, mhi_netdev->bg_pool_limit,
  702. mhi_netdev->abuffers, mhi_netdev->kbuffers,
  703. mhi_netdev->rbuffers);
  704. seq_printf(m, "chaining SKBs:%s\n", (mhi_netdev->chain) ?
  705. "enabled" : "disabled");
  706. return 0;
  707. }
  708. static int mhi_netdev_debugfs_stats_open(struct inode *inode, struct file *fp)
  709. {
  710. return single_open(fp, mhi_netdev_debugfs_stats_show, inode->i_private);
  711. }
  712. static const struct file_operations debugfs_stats = {
  713. .open = mhi_netdev_debugfs_stats_open,
  714. .release = single_release,
  715. .read = seq_read,
  716. };
  717. static int mhi_netdev_debugfs_chain(void *data, u64 val)
  718. {
  719. struct mhi_netdev *mhi_netdev = data;
  720. struct mhi_netdev *rsc_dev = mhi_netdev->rsc_dev;
  721. mhi_netdev->chain = NULL;
  722. if (rsc_dev)
  723. rsc_dev->chain = NULL;
  724. return 0;
  725. }
  726. DEFINE_DEBUGFS_ATTRIBUTE(debugfs_chain, NULL,
  727. mhi_netdev_debugfs_chain, "%llu\n");
  728. static void mhi_netdev_create_debugfs(struct mhi_netdev *mhi_netdev)
  729. {
  730. char node_name[40];
  731. struct mhi_device *mhi_dev = mhi_netdev->mhi_dev;
  732. /* Both tx & rx client handle contain same device info */
  733. snprintf(node_name, sizeof(node_name), "%s_%s", dev_name(&mhi_dev->dev),
  734. mhi_netdev->interface_name);
  735. if (IS_ERR_OR_NULL(dentry))
  736. return;
  737. mhi_netdev->dentry = debugfs_create_dir(node_name, dentry);
  738. if (IS_ERR_OR_NULL(mhi_netdev->dentry))
  739. return;
  740. debugfs_create_file_unsafe("stats", 0444, mhi_netdev->dentry,
  741. mhi_netdev, &debugfs_stats);
  742. debugfs_create_file_unsafe("chain", 0444, mhi_netdev->dentry,
  743. mhi_netdev, &debugfs_chain);
  744. }
  745. static void mhi_netdev_create_debugfs_dir(void)
  746. {
  747. dentry = debugfs_create_dir(MHI_NETDEV_DRIVER_NAME, 0);
  748. }
  749. #else
  750. static void mhi_netdev_create_debugfs(struct mhi_netdev *mhi_netdev)
  751. {
  752. }
  753. static void mhi_netdev_create_debugfs_dir(void)
  754. {
  755. }
  756. #endif
  757. static ssize_t log_level_show(struct device *dev,
  758. struct device_attribute *attr,
  759. char *buf)
  760. {
  761. struct mhi_device *mhi_dev = to_mhi_device(dev);
  762. struct mhi_netdev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev);
  763. if (!mhi_netdev)
  764. return -EIO;
  765. return scnprintf(buf, PAGE_SIZE,
  766. "MHI network device IPC log level begins from: %s\n",
  767. MHI_NETDEV_LOG_LEVEL_STR(mhi_netdev->msg_lvl));
  768. }
  769. static ssize_t log_level_store(struct device *dev,
  770. struct device_attribute *attr,
  771. const char *buf,
  772. size_t count)
  773. {
  774. struct mhi_device *mhi_dev = to_mhi_device(dev);
  775. struct mhi_netdev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev);
  776. enum MHI_DEBUG_LEVEL log_level;
  777. if (kstrtou32(buf, 0, &log_level) < 0)
  778. return -EINVAL;
  779. if (!mhi_netdev)
  780. return -EIO;
  781. mhi_netdev->msg_lvl = log_level;
  782. /* set level for parent if RSC child netdev and vice versa */
  783. if (mhi_netdev->is_rsc_dev)
  784. mhi_netdev->rsc_parent->msg_lvl = log_level;
  785. else if (mhi_netdev->rsc_dev)
  786. mhi_netdev->rsc_dev->msg_lvl = log_level;
  787. MSG_LOG("MHI Network device IPC log level changed to: %s\n",
  788. MHI_NETDEV_LOG_LEVEL_STR(log_level));
  789. return count;
  790. }
  791. static DEVICE_ATTR_RW(log_level);
  792. static struct attribute *mhi_netdev_attrs[] = {
  793. &dev_attr_log_level.attr,
  794. NULL,
  795. };
  796. static const struct attribute_group mhi_netdev_group = {
  797. .attrs = mhi_netdev_attrs,
  798. };
  799. static void mhi_netdev_remove(struct mhi_device *mhi_dev)
  800. {
  801. struct mhi_netdev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev);
  802. MSG_LOG("Remove notification received\n");
  803. /* rsc parent takes cares of the cleanup except buffer pool */
  804. if (mhi_netdev->is_rsc_dev) {
  805. mhi_netdev_free_pool(mhi_netdev);
  806. return;
  807. }
  808. sysfs_remove_group(&mhi_dev->dev.kobj, &mhi_netdev_group);
  809. kthread_stop(mhi_netdev->alloc_task);
  810. netif_stop_queue(mhi_netdev->ndev);
  811. napi_disable(mhi_netdev->napi);
  812. unregister_netdev(mhi_netdev->ndev);
  813. netif_napi_del(mhi_netdev->napi);
  814. free_netdev(mhi_netdev->ndev);
  815. mhi_netdev->ndev = NULL;
  816. if (!IS_ERR_OR_NULL(mhi_netdev->dentry))
  817. debugfs_remove_recursive(mhi_netdev->dentry);
  818. if (!mhi_netdev->rsc_parent)
  819. mhi_netdev_free_pool(mhi_netdev);
  820. }
  821. static void mhi_netdev_clone_dev(struct mhi_netdev *mhi_netdev,
  822. struct mhi_netdev *parent)
  823. {
  824. mhi_netdev->ndev = parent->ndev;
  825. mhi_netdev->napi = parent->napi;
  826. mhi_netdev->ipc_log = parent->ipc_log;
  827. mhi_netdev->msg_lvl = parent->msg_lvl;
  828. mhi_netdev->is_rsc_dev = true;
  829. mhi_netdev->chain = parent->chain;
  830. mhi_netdev->rsc_parent = parent;
  831. mhi_netdev->recycle_pool = parent->recycle_pool;
  832. mhi_netdev->bg_pool = parent->bg_pool;
  833. }
  834. static int mhi_netdev_probe(struct mhi_device *mhi_dev,
  835. const struct mhi_device_id *id)
  836. {
  837. struct mhi_netdev *mhi_netdev;
  838. struct mhi_netdev_driver_data *data;
  839. char node_name[40];
  840. int nr_tre, ret;
  841. data = (struct mhi_netdev_driver_data *)id->driver_data;
  842. mhi_netdev = devm_kzalloc(&mhi_dev->dev, sizeof(*mhi_netdev),
  843. GFP_KERNEL);
  844. if (!mhi_netdev)
  845. return -ENOMEM;
  846. /* move mhi channels to start state */
  847. ret = mhi_prepare_for_transfer(mhi_dev);
  848. if (ret) {
  849. MSG_ERR("Failed to start channels, ret: %d\n", ret);
  850. return ret;
  851. }
  852. mhi_netdev->mhi_dev = mhi_dev;
  853. dev_set_drvdata(&mhi_dev->dev, mhi_netdev);
  854. mhi_netdev->mru = data->mru;
  855. mhi_netdev->rsc_parent = data->has_rsc_child ? mhi_netdev : NULL;
  856. mhi_netdev->rsc_dev = data->is_rsc_chan ? mhi_netdev : NULL;
  857. /* MRU must be multiplication of page size */
  858. mhi_netdev->order = __ilog2_u32(mhi_netdev->mru / PAGE_SIZE);
  859. if ((PAGE_SIZE << mhi_netdev->order) < mhi_netdev->mru)
  860. return -EINVAL;
  861. if (data->is_rsc_chan) {
  862. if (!rsc_parent_netdev || !rsc_parent_netdev->ndev)
  863. return -ENODEV;
  864. /* this device is shared with parent device. so we won't be
  865. * creating a new network interface. Clone parent
  866. * information to child node
  867. */
  868. mhi_netdev_clone_dev(mhi_netdev, rsc_parent_netdev);
  869. } else {
  870. mhi_netdev->msg_lvl = MHI_NETDEV_LOG_LVL;
  871. ret = sysfs_create_group(&mhi_dev->dev.kobj, &mhi_netdev_group);
  872. if (ret)
  873. MSG_ERR("Failed to create MHI netdev sysfs group\n");
  874. if (data->chain_skb) {
  875. mhi_netdev->chain = devm_kzalloc(&mhi_dev->dev,
  876. sizeof(*mhi_netdev->chain),
  877. GFP_KERNEL);
  878. if (!mhi_netdev->chain)
  879. return -ENOMEM;
  880. }
  881. mhi_netdev->interface_name = data->interface_name;
  882. ret = mhi_netdev_enable_iface(mhi_netdev);
  883. if (ret)
  884. return ret;
  885. /* setup pool size ~2x ring length*/
  886. nr_tre = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE);
  887. mhi_netdev->pool_size = 1 << __ilog2_u32(nr_tre);
  888. if (nr_tre > mhi_netdev->pool_size)
  889. mhi_netdev->pool_size <<= 1;
  890. mhi_netdev->pool_size <<= 1;
  891. /* if we expect child device to share then double the pool */
  892. if (data->has_rsc_child)
  893. mhi_netdev->pool_size <<= 1;
  894. /* allocate memory pool */
  895. ret = mhi_netdev_alloc_pool(mhi_netdev);
  896. if (ret)
  897. return -ENOMEM;
  898. /* create a background task to allocate memory */
  899. mhi_netdev->bg_pool = kmalloc(sizeof(*mhi_netdev->bg_pool),
  900. GFP_KERNEL);
  901. if (!mhi_netdev->bg_pool)
  902. return -ENOMEM;
  903. init_waitqueue_head(&mhi_netdev->alloc_event);
  904. INIT_LIST_HEAD(mhi_netdev->bg_pool);
  905. spin_lock_init(&mhi_netdev->bg_lock);
  906. mhi_netdev->bg_pool_limit = mhi_netdev->pool_size / 4;
  907. mhi_netdev->alloc_task = kthread_run(mhi_netdev_alloc_thread,
  908. mhi_netdev,
  909. mhi_netdev->ndev->name);
  910. if (IS_ERR(mhi_netdev->alloc_task))
  911. return PTR_ERR(mhi_netdev->alloc_task);
  912. rsc_parent_netdev = mhi_netdev;
  913. /* create ipc log buffer */
  914. snprintf(node_name, sizeof(node_name),
  915. "%s_%s", dev_name(&mhi_dev->dev),
  916. mhi_netdev->interface_name);
  917. mhi_netdev->ipc_log = ipc_log_context_create(IPC_LOG_PAGES,
  918. node_name, 0);
  919. mhi_netdev_create_debugfs(mhi_netdev);
  920. }
  921. /* now we have a pool of buffers allocated, queue to hardware
  922. * by triggering a napi_poll
  923. */
  924. napi_schedule(mhi_netdev->napi);
  925. mhi_netdev->napi_scheduled = true;
  926. return 0;
  927. }
  928. static const struct mhi_netdev_driver_data hw0_308_data = {
  929. .mru = 0x8000,
  930. .chain_skb = true,
  931. .is_rsc_chan = false,
  932. .has_rsc_child = false,
  933. .interface_name = "rmnet_mhi",
  934. };
  935. static const struct mhi_device_id mhi_netdev_match_table[] = {
  936. { .chan = "IP_HW0", .driver_data = (kernel_ulong_t)&hw0_308_data },
  937. {},
  938. };
  939. static struct mhi_driver mhi_netdev_driver = {
  940. .id_table = mhi_netdev_match_table,
  941. .probe = mhi_netdev_probe,
  942. .remove = mhi_netdev_remove,
  943. .ul_xfer_cb = mhi_netdev_xfer_ul_cb,
  944. .dl_xfer_cb = mhi_netdev_xfer_dl_cb,
  945. .status_cb = mhi_netdev_status_cb,
  946. .driver = {
  947. .name = "mhi_netdev",
  948. .owner = THIS_MODULE,
  949. }
  950. };
  951. static int __init mhi_netdev_init(void)
  952. {
  953. BUILD_BUG_ON(sizeof(struct mhi_netbuf) > MAX_NETBUF_SIZE);
  954. mhi_netdev_create_debugfs_dir();
  955. return mhi_driver_register(&mhi_netdev_driver);
  956. }
  957. module_init(mhi_netdev_init);
  958. static void __exit mhi_netdev_exit(void)
  959. {
  960. debugfs_remove_recursive(dentry);
  961. mhi_driver_unregister(&mhi_netdev_driver);
  962. }
  963. module_exit(mhi_netdev_exit);
  964. MODULE_DESCRIPTION("MHI NETDEV Network Interface");
  965. MODULE_LICENSE("GPL");