qdf_nbuf.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017
  1. /*
  2. * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
  3. *
  4. * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  5. *
  6. *
  7. * Permission to use, copy, modify, and/or distribute this software for
  8. * any purpose with or without fee is hereby granted, provided that the
  9. * above copyright notice and this permission notice appear in all
  10. * copies.
  11. *
  12. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  13. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  14. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  15. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  16. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  17. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  18. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  19. * PERFORMANCE OF THIS SOFTWARE.
  20. */
  21. /*
  22. * This file was originally distributed by Qualcomm Atheros, Inc.
  23. * under proprietary terms before Copyright ownership was assigned
  24. * to the Linux Foundation.
  25. */
  26. /**
  27. * DOC: cdf_nbuf.c
  28. *
  29. * Connectivity driver framework(CDF) network buffer management APIs
  30. */
  31. #include <linux/kernel.h>
  32. #include <linux/version.h>
  33. #include <linux/skbuff.h>
  34. #include <linux/module.h>
  35. #include <cdf_types.h>
  36. #include <cdf_nbuf.h>
  37. #include <cdf_memory.h>
  38. #include <cdf_trace.h>
  39. #include <cdf_status.h>
  40. #include <cdf_lock.h>
  41. #if defined(FEATURE_TSO)
  42. #include <net/ipv6.h>
  43. #include <linux/ipv6.h>
  44. #include <linux/tcp.h>
  45. #include <linux/if_vlan.h>
  46. #include <linux/ip.h>
  47. #endif /* FEATURE_TSO */
  48. /* Packet Counter */
  49. static uint32_t nbuf_tx_mgmt[NBUF_TX_PKT_STATE_MAX];
  50. static uint32_t nbuf_tx_data[NBUF_TX_PKT_STATE_MAX];
  51. /**
  52. * cdf_nbuf_tx_desc_count_display() - Displays the packet counter
  53. *
  54. * Return: none
  55. */
  56. void cdf_nbuf_tx_desc_count_display(void)
  57. {
  58. cdf_print("Current Snapshot of the Driver:\n");
  59. cdf_print("Data Packets:\n");
  60. cdf_print("HDD %d TXRX_Q %d TXRX %d HTT %d",
  61. nbuf_tx_data[NBUF_TX_PKT_HDD] -
  62. (nbuf_tx_data[NBUF_TX_PKT_TXRX] +
  63. nbuf_tx_data[NBUF_TX_PKT_TXRX_ENQUEUE] -
  64. nbuf_tx_data[NBUF_TX_PKT_TXRX_DEQUEUE]),
  65. nbuf_tx_data[NBUF_TX_PKT_TXRX_ENQUEUE] -
  66. nbuf_tx_data[NBUF_TX_PKT_TXRX_DEQUEUE],
  67. nbuf_tx_data[NBUF_TX_PKT_TXRX] - nbuf_tx_data[NBUF_TX_PKT_HTT],
  68. nbuf_tx_data[NBUF_TX_PKT_HTT] - nbuf_tx_data[NBUF_TX_PKT_HTC]);
  69. cdf_print(" HTC %d HIF %d CE %d TX_COMP %d\n",
  70. nbuf_tx_data[NBUF_TX_PKT_HTC] - nbuf_tx_data[NBUF_TX_PKT_HIF],
  71. nbuf_tx_data[NBUF_TX_PKT_HIF] - nbuf_tx_data[NBUF_TX_PKT_CE],
  72. nbuf_tx_data[NBUF_TX_PKT_CE] - nbuf_tx_data[NBUF_TX_PKT_FREE],
  73. nbuf_tx_data[NBUF_TX_PKT_FREE]);
  74. cdf_print("Mgmt Packets:\n");
  75. cdf_print("TXRX_Q %d TXRX %d HTT %d HTC %d HIF %d CE %d TX_COMP %d\n",
  76. nbuf_tx_mgmt[NBUF_TX_PKT_TXRX_ENQUEUE] -
  77. nbuf_tx_mgmt[NBUF_TX_PKT_TXRX_DEQUEUE],
  78. nbuf_tx_mgmt[NBUF_TX_PKT_TXRX] - nbuf_tx_mgmt[NBUF_TX_PKT_HTT],
  79. nbuf_tx_mgmt[NBUF_TX_PKT_HTT] - nbuf_tx_mgmt[NBUF_TX_PKT_HTC],
  80. nbuf_tx_mgmt[NBUF_TX_PKT_HTC] - nbuf_tx_mgmt[NBUF_TX_PKT_HIF],
  81. nbuf_tx_mgmt[NBUF_TX_PKT_HIF] - nbuf_tx_mgmt[NBUF_TX_PKT_CE],
  82. nbuf_tx_mgmt[NBUF_TX_PKT_CE] - nbuf_tx_mgmt[NBUF_TX_PKT_FREE],
  83. nbuf_tx_mgmt[NBUF_TX_PKT_FREE]);
  84. }
  85. /**
  86. * cdf_nbuf_tx_desc_count_update() - Updates the layer packet counter
  87. * @packet_type : packet type either mgmt/data
  88. * @current_state : layer at which the packet currently present
  89. *
  90. * Return: none
  91. */
  92. static inline void cdf_nbuf_tx_desc_count_update(uint8_t packet_type,
  93. uint8_t current_state)
  94. {
  95. switch (packet_type) {
  96. case NBUF_TX_PKT_MGMT_TRACK:
  97. nbuf_tx_mgmt[current_state]++;
  98. break;
  99. case NBUF_TX_PKT_DATA_TRACK:
  100. nbuf_tx_data[current_state]++;
  101. break;
  102. default:
  103. break;
  104. }
  105. }
  106. /**
  107. * cdf_nbuf_tx_desc_count_clear() - Clears packet counter for both data, mgmt
  108. *
  109. * Return: none
  110. */
  111. void cdf_nbuf_tx_desc_count_clear(void)
  112. {
  113. memset(nbuf_tx_mgmt, 0, sizeof(nbuf_tx_mgmt));
  114. memset(nbuf_tx_data, 0, sizeof(nbuf_tx_data));
  115. }
  116. /**
  117. * cdf_nbuf_set_state() - Updates the packet state
  118. * @nbuf: network buffer
  119. * @current_state : layer at which the packet currently is
  120. *
  121. * This function updates the packet state to the layer at which the packet
  122. * currently is
  123. *
  124. * Return: none
  125. */
  126. void cdf_nbuf_set_state(cdf_nbuf_t nbuf, uint8_t current_state)
  127. {
  128. /*
  129. * Only Mgmt, Data Packets are tracked. WMI messages
  130. * such as scan commands are not tracked
  131. */
  132. uint8_t packet_type;
  133. packet_type = NBUF_GET_PACKET_TRACK(nbuf);
  134. if ((packet_type != NBUF_TX_PKT_DATA_TRACK) &&
  135. (packet_type != NBUF_TX_PKT_MGMT_TRACK)) {
  136. return;
  137. }
  138. NBUF_SET_PACKET_STATE(nbuf, current_state);
  139. cdf_nbuf_tx_desc_count_update(packet_type,
  140. current_state);
  141. }
  142. cdf_nbuf_trace_update_t trace_update_cb = NULL;
  143. /**
  144. * __cdf_nbuf_alloc() - Allocate nbuf
  145. * @hdl: Device handle
  146. * @size: Netbuf requested size
  147. * @reserve: Reserve
  148. * @align: Align
  149. * @prio: Priority
  150. *
  151. * This allocates an nbuf aligns if needed and reserves some space in the front,
  152. * since the reserve is done after alignment the reserve value if being
  153. * unaligned will result in an unaligned address.
  154. *
  155. * Return: nbuf or %NULL if no memory
  156. */
  157. struct sk_buff *__cdf_nbuf_alloc(cdf_device_t osdev, size_t size, int reserve,
  158. int align, int prio)
  159. {
  160. struct sk_buff *skb;
  161. unsigned long offset;
  162. if (align)
  163. size += (align - 1);
  164. skb = dev_alloc_skb(size);
  165. if (!skb) {
  166. pr_err("ERROR:NBUF alloc failed\n");
  167. return NULL;
  168. }
  169. memset(skb->cb, 0x0, sizeof(skb->cb));
  170. /*
  171. * The default is for netbuf fragments to be interpreted
  172. * as wordstreams rather than bytestreams.
  173. * Set the CVG_NBUF_MAX_EXTRA_FRAGS+1 wordstream_flags bits,
  174. * to provide this default.
  175. */
  176. NBUF_EXTRA_FRAG_WORDSTREAM_FLAGS(skb) =
  177. (1 << (CVG_NBUF_MAX_EXTRA_FRAGS + 1)) - 1;
  178. /*
  179. * XXX:how about we reserve first then align
  180. * Align & make sure that the tail & data are adjusted properly
  181. */
  182. if (align) {
  183. offset = ((unsigned long)skb->data) % align;
  184. if (offset)
  185. skb_reserve(skb, align - offset);
  186. }
  187. /*
  188. * NOTE:alloc doesn't take responsibility if reserve unaligns the data
  189. * pointer
  190. */
  191. skb_reserve(skb, reserve);
  192. return skb;
  193. }
  194. /**
  195. * __cdf_nbuf_free() - free the nbuf its interrupt safe
  196. * @skb: Pointer to network buffer
  197. *
  198. * Return: none
  199. */
  200. void __cdf_nbuf_free(struct sk_buff *skb)
  201. {
  202. if ((NBUF_OWNER_ID(skb) == IPA_NBUF_OWNER_ID) && NBUF_CALLBACK_FN(skb))
  203. NBUF_CALLBACK_FN_EXEC(skb);
  204. else
  205. dev_kfree_skb_any(skb);
  206. }
  207. /**
  208. * __cdf_nbuf_map() - get the dma map of the nbuf
  209. * @osdev: OS device
  210. * @bmap: Bitmap
  211. * @skb: Pointer to network buffer
  212. * @dir: Direction
  213. *
  214. * Return: CDF_STATUS
  215. */
  216. CDF_STATUS
  217. __cdf_nbuf_map(cdf_device_t osdev, struct sk_buff *skb, cdf_dma_dir_t dir)
  218. {
  219. #ifdef CDF_OS_DEBUG
  220. struct skb_shared_info *sh = skb_shinfo(skb);
  221. #endif
  222. cdf_assert((dir == CDF_DMA_TO_DEVICE)
  223. || (dir == CDF_DMA_FROM_DEVICE));
  224. /*
  225. * Assume there's only a single fragment.
  226. * To support multiple fragments, it would be necessary to change
  227. * cdf_nbuf_t to be a separate object that stores meta-info
  228. * (including the bus address for each fragment) and a pointer
  229. * to the underlying sk_buff.
  230. */
  231. cdf_assert(sh->nr_frags == 0);
  232. return __cdf_nbuf_map_single(osdev, skb, dir);
  233. return CDF_STATUS_SUCCESS;
  234. }
  235. /**
  236. * __cdf_nbuf_unmap() - to unmap a previously mapped buf
  237. * @osdev: OS device
  238. * @skb: Pointer to network buffer
  239. * @dir: Direction
  240. *
  241. * Return: none
  242. */
  243. void
  244. __cdf_nbuf_unmap(cdf_device_t osdev, struct sk_buff *skb, cdf_dma_dir_t dir)
  245. {
  246. cdf_assert((dir == CDF_DMA_TO_DEVICE)
  247. || (dir == CDF_DMA_FROM_DEVICE));
  248. cdf_assert(((dir == CDF_DMA_TO_DEVICE)
  249. || (dir == CDF_DMA_FROM_DEVICE)));
  250. /*
  251. * Assume there's a single fragment.
  252. * If this is not true, the assertion in __cdf_nbuf_map will catch it.
  253. */
  254. __cdf_nbuf_unmap_single(osdev, skb, dir);
  255. }
  256. /**
  257. * __cdf_nbuf_map_single() - dma map of the nbuf
  258. * @osdev: OS device
  259. * @skb: Pointer to network buffer
  260. * @dir: Direction
  261. *
  262. * Return: CDF_STATUS
  263. */
  264. CDF_STATUS
  265. __cdf_nbuf_map_single(cdf_device_t osdev, cdf_nbuf_t buf, cdf_dma_dir_t dir)
  266. {
  267. uint32_t paddr_lo;
  268. /* tempory hack for simulation */
  269. #ifdef A_SIMOS_DEVHOST
  270. NBUF_MAPPED_PADDR_LO(buf) = paddr_lo = (uint32_t) buf->data;
  271. return CDF_STATUS_SUCCESS;
  272. #else
  273. /* assume that the OS only provides a single fragment */
  274. NBUF_MAPPED_PADDR_LO(buf) = paddr_lo =
  275. dma_map_single(osdev->dev, buf->data,
  276. skb_end_pointer(buf) - buf->data, dir);
  277. return dma_mapping_error(osdev->dev, paddr_lo) ?
  278. CDF_STATUS_E_FAILURE : CDF_STATUS_SUCCESS;
  279. #endif /* #ifdef A_SIMOS_DEVHOST */
  280. }
  281. /**
  282. * __cdf_nbuf_unmap_single() - dma unmap nbuf
  283. * @osdev: OS device
  284. * @skb: Pointer to network buffer
  285. * @dir: Direction
  286. *
  287. * Return: none
  288. */
  289. void
  290. __cdf_nbuf_unmap_single(cdf_device_t osdev, cdf_nbuf_t buf, cdf_dma_dir_t dir)
  291. {
  292. #if !defined(A_SIMOS_DEVHOST)
  293. dma_unmap_single(osdev->dev, NBUF_MAPPED_PADDR_LO(buf),
  294. skb_end_pointer(buf) - buf->data, dir);
  295. #endif /* #if !defined(A_SIMOS_DEVHOST) */
  296. }
  297. /**
  298. * __cdf_nbuf_set_rx_cksum() - set rx checksum
  299. * @skb: Pointer to network buffer
  300. * @cksum: Pointer to checksum value
  301. *
  302. * Return: CDF_STATUS
  303. */
  304. CDF_STATUS
  305. __cdf_nbuf_set_rx_cksum(struct sk_buff *skb, cdf_nbuf_rx_cksum_t *cksum)
  306. {
  307. switch (cksum->l4_result) {
  308. case CDF_NBUF_RX_CKSUM_NONE:
  309. skb->ip_summed = CHECKSUM_NONE;
  310. break;
  311. case CDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY:
  312. skb->ip_summed = CHECKSUM_UNNECESSARY;
  313. break;
  314. case CDF_NBUF_RX_CKSUM_TCP_UDP_HW:
  315. skb->ip_summed = CHECKSUM_PARTIAL;
  316. skb->csum = cksum->val;
  317. break;
  318. default:
  319. pr_err("ADF_NET:Unknown checksum type\n");
  320. cdf_assert(0);
  321. return CDF_STATUS_E_NOSUPPORT;
  322. }
  323. return CDF_STATUS_SUCCESS;
  324. }
  325. /**
  326. * __cdf_nbuf_get_tx_cksum() - get tx checksum
  327. * @skb: Pointer to network buffer
  328. *
  329. * Return: TX checksum value
  330. */
  331. cdf_nbuf_tx_cksum_t __cdf_nbuf_get_tx_cksum(struct sk_buff *skb)
  332. {
  333. switch (skb->ip_summed) {
  334. case CHECKSUM_NONE:
  335. return CDF_NBUF_TX_CKSUM_NONE;
  336. case CHECKSUM_PARTIAL:
  337. /* XXX ADF and Linux checksum don't map with 1-to-1. This is
  338. * not 100% correct */
  339. return CDF_NBUF_TX_CKSUM_TCP_UDP;
  340. case CHECKSUM_COMPLETE:
  341. return CDF_NBUF_TX_CKSUM_TCP_UDP_IP;
  342. default:
  343. return CDF_NBUF_TX_CKSUM_NONE;
  344. }
  345. }
  346. /**
  347. * __cdf_nbuf_get_tid() - get tid
  348. * @skb: Pointer to network buffer
  349. *
  350. * Return: tid
  351. */
  352. uint8_t __cdf_nbuf_get_tid(struct sk_buff *skb)
  353. {
  354. return skb->priority;
  355. }
  356. /**
  357. * __cdf_nbuf_set_tid() - set tid
  358. * @skb: Pointer to network buffer
  359. *
  360. * Return: none
  361. */
  362. void __cdf_nbuf_set_tid(struct sk_buff *skb, uint8_t tid)
  363. {
  364. skb->priority = tid;
  365. }
  366. /**
  367. * __cdf_nbuf_set_tid() - set tid
  368. * @skb: Pointer to network buffer
  369. *
  370. * Return: none
  371. */
  372. uint8_t __cdf_nbuf_get_exemption_type(struct sk_buff *skb)
  373. {
  374. return CDF_NBUF_EXEMPT_NO_EXEMPTION;
  375. }
  376. /**
  377. * __cdf_nbuf_reg_trace_cb() - register trace callback
  378. * @cb_func_ptr: Pointer to trace callback function
  379. *
  380. * Return: none
  381. */
  382. void __cdf_nbuf_reg_trace_cb(cdf_nbuf_trace_update_t cb_func_ptr)
  383. {
  384. trace_update_cb = cb_func_ptr;
  385. return;
  386. }
  387. #ifdef QCA_PKT_PROTO_TRACE
  388. /**
  389. * __cdf_nbuf_trace_update() - update trace event
  390. * @skb: Pointer to network buffer
  391. * @event_string: Pointer to trace callback function
  392. *
  393. * Return: none
  394. */
  395. void __cdf_nbuf_trace_update(struct sk_buff *buf, char *event_string)
  396. {
  397. char string_buf[NBUF_PKT_TRAC_MAX_STRING];
  398. if ((!trace_update_cb) || (!event_string))
  399. return;
  400. if (!cdf_nbuf_trace_get_proto_type(buf))
  401. return;
  402. /* Buffer over flow */
  403. if (NBUF_PKT_TRAC_MAX_STRING <=
  404. (cdf_str_len(event_string) + NBUF_PKT_TRAC_PROTO_STRING)) {
  405. return;
  406. }
  407. cdf_mem_zero(string_buf, NBUF_PKT_TRAC_MAX_STRING);
  408. cdf_mem_copy(string_buf, event_string, cdf_str_len(event_string));
  409. if (NBUF_PKT_TRAC_TYPE_EAPOL & cdf_nbuf_trace_get_proto_type(buf)) {
  410. cdf_mem_copy(string_buf + cdf_str_len(event_string),
  411. "EPL", NBUF_PKT_TRAC_PROTO_STRING);
  412. } else if (NBUF_PKT_TRAC_TYPE_DHCP & cdf_nbuf_trace_get_proto_type(buf)) {
  413. cdf_mem_copy(string_buf + cdf_str_len(event_string),
  414. "DHC", NBUF_PKT_TRAC_PROTO_STRING);
  415. } else if (NBUF_PKT_TRAC_TYPE_MGMT_ACTION &
  416. cdf_nbuf_trace_get_proto_type(buf)) {
  417. cdf_mem_copy(string_buf + cdf_str_len(event_string),
  418. "MACT", NBUF_PKT_TRAC_PROTO_STRING);
  419. }
  420. trace_update_cb(string_buf);
  421. return;
  422. }
  423. #endif /* QCA_PKT_PROTO_TRACE */
  424. #ifdef MEMORY_DEBUG
  425. #define CDF_NET_BUF_TRACK_MAX_SIZE (1024)
  426. /**
  427. * struct cdf_nbuf_track_t - Network buffer track structure
  428. *
  429. * @p_next: Pointer to next
  430. * @net_buf: Pointer to network buffer
  431. * @file_name: File name
  432. * @line_num: Line number
  433. * @size: Size
  434. */
  435. struct cdf_nbuf_track_t {
  436. struct cdf_nbuf_track_t *p_next;
  437. cdf_nbuf_t net_buf;
  438. uint8_t *file_name;
  439. uint32_t line_num;
  440. size_t size;
  441. };
  442. spinlock_t g_cdf_net_buf_track_lock;
  443. typedef struct cdf_nbuf_track_t CDF_NBUF_TRACK;
  444. CDF_NBUF_TRACK *gp_cdf_net_buf_track_tbl[CDF_NET_BUF_TRACK_MAX_SIZE];
  445. /**
  446. * cdf_net_buf_debug_init() - initialize network buffer debug functionality
  447. *
  448. * CDF network buffer debug feature tracks all SKBs allocated by WLAN driver
  449. * in a hash table and when driver is unloaded it reports about leaked SKBs.
  450. * WLAN driver module whose allocated SKB is freed by network stack are
  451. * suppose to call cdf_net_buf_debug_release_skb() such that the SKB is not
  452. * reported as memory leak.
  453. *
  454. * Return: none
  455. */
  456. void cdf_net_buf_debug_init(void)
  457. {
  458. uint32_t i;
  459. unsigned long irq_flag;
  460. spin_lock_init(&g_cdf_net_buf_track_lock);
  461. spin_lock_irqsave(&g_cdf_net_buf_track_lock, irq_flag);
  462. for (i = 0; i < CDF_NET_BUF_TRACK_MAX_SIZE; i++)
  463. gp_cdf_net_buf_track_tbl[i] = NULL;
  464. spin_unlock_irqrestore(&g_cdf_net_buf_track_lock, irq_flag);
  465. return;
  466. }
  467. /**
  468. * cdf_net_buf_debug_init() - exit network buffer debug functionality
  469. *
  470. * Exit network buffer tracking debug functionality and log SKB memory leaks
  471. *
  472. * Return: none
  473. */
  474. void cdf_net_buf_debug_exit(void)
  475. {
  476. uint32_t i;
  477. unsigned long irq_flag;
  478. CDF_NBUF_TRACK *p_node;
  479. CDF_NBUF_TRACK *p_prev;
  480. spin_lock_irqsave(&g_cdf_net_buf_track_lock, irq_flag);
  481. for (i = 0; i < CDF_NET_BUF_TRACK_MAX_SIZE; i++) {
  482. p_node = gp_cdf_net_buf_track_tbl[i];
  483. while (p_node) {
  484. p_prev = p_node;
  485. p_node = p_node->p_next;
  486. CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
  487. "SKB buf memory Leak@ File %s, @Line %d, size %zu\n",
  488. p_prev->file_name, p_prev->line_num,
  489. p_prev->size);
  490. }
  491. }
  492. spin_unlock_irqrestore(&g_cdf_net_buf_track_lock, irq_flag);
  493. return;
  494. }
  495. /**
  496. * cdf_net_buf_debug_clean() - clean up network buffer debug functionality
  497. *
  498. * Return: none
  499. */
  500. void cdf_net_buf_debug_clean(void)
  501. {
  502. uint32_t i;
  503. unsigned long irq_flag;
  504. CDF_NBUF_TRACK *p_node;
  505. CDF_NBUF_TRACK *p_prev;
  506. spin_lock_irqsave(&g_cdf_net_buf_track_lock, irq_flag);
  507. for (i = 0; i < CDF_NET_BUF_TRACK_MAX_SIZE; i++) {
  508. p_node = gp_cdf_net_buf_track_tbl[i];
  509. while (p_node) {
  510. p_prev = p_node;
  511. p_node = p_node->p_next;
  512. cdf_mem_free(p_prev);
  513. }
  514. }
  515. spin_unlock_irqrestore(&g_cdf_net_buf_track_lock, irq_flag);
  516. return;
  517. }
  518. /**
  519. * cdf_net_buf_debug_hash() - hash network buffer pointer
  520. *
  521. * Return: hash value
  522. */
  523. uint32_t cdf_net_buf_debug_hash(cdf_nbuf_t net_buf)
  524. {
  525. uint32_t i;
  526. i = (uint32_t) ((uintptr_t) net_buf & (CDF_NET_BUF_TRACK_MAX_SIZE - 1));
  527. return i;
  528. }
  529. /**
  530. * cdf_net_buf_debug_look_up() - look up network buffer in debug hash table
  531. *
  532. * Return: If skb is found in hash table then return pointer to network buffer
  533. * else return %NULL
  534. */
  535. CDF_NBUF_TRACK *cdf_net_buf_debug_look_up(cdf_nbuf_t net_buf)
  536. {
  537. uint32_t i;
  538. CDF_NBUF_TRACK *p_node;
  539. i = cdf_net_buf_debug_hash(net_buf);
  540. p_node = gp_cdf_net_buf_track_tbl[i];
  541. while (p_node) {
  542. if (p_node->net_buf == net_buf)
  543. return p_node;
  544. p_node = p_node->p_next;
  545. }
  546. return NULL;
  547. }
  548. /**
  549. * cdf_net_buf_debug_add_node() - store skb in debug hash table
  550. *
  551. * Return: none
  552. */
  553. void cdf_net_buf_debug_add_node(cdf_nbuf_t net_buf, size_t size,
  554. uint8_t *file_name, uint32_t line_num)
  555. {
  556. uint32_t i;
  557. unsigned long irq_flag;
  558. CDF_NBUF_TRACK *p_node;
  559. spin_lock_irqsave(&g_cdf_net_buf_track_lock, irq_flag);
  560. i = cdf_net_buf_debug_hash(net_buf);
  561. p_node = cdf_net_buf_debug_look_up(net_buf);
  562. if (p_node) {
  563. CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
  564. "Double allocation of skb ! Already allocated from %s %d",
  565. p_node->file_name, p_node->line_num);
  566. CDF_ASSERT(0);
  567. goto done;
  568. } else {
  569. p_node = (CDF_NBUF_TRACK *) cdf_mem_malloc(sizeof(*p_node));
  570. if (p_node) {
  571. p_node->net_buf = net_buf;
  572. p_node->file_name = file_name;
  573. p_node->line_num = line_num;
  574. p_node->size = size;
  575. p_node->p_next = gp_cdf_net_buf_track_tbl[i];
  576. gp_cdf_net_buf_track_tbl[i] = p_node;
  577. } else {
  578. CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
  579. "Mem alloc failed ! Could not track skb from %s %d of size %zu",
  580. file_name, line_num, size);
  581. CDF_ASSERT(0);
  582. }
  583. }
  584. done:
  585. spin_unlock_irqrestore(&g_cdf_net_buf_track_lock, irq_flag);
  586. return;
  587. }
  588. /**
  589. * cdf_net_buf_debug_delete_node() - remove skb from debug hash table
  590. *
  591. * Return: none
  592. */
  593. void cdf_net_buf_debug_delete_node(cdf_nbuf_t net_buf)
  594. {
  595. uint32_t i;
  596. bool found = false;
  597. CDF_NBUF_TRACK *p_head;
  598. CDF_NBUF_TRACK *p_node;
  599. unsigned long irq_flag;
  600. CDF_NBUF_TRACK *p_prev;
  601. spin_lock_irqsave(&g_cdf_net_buf_track_lock, irq_flag);
  602. i = cdf_net_buf_debug_hash(net_buf);
  603. p_head = gp_cdf_net_buf_track_tbl[i];
  604. /* Unallocated SKB */
  605. if (!p_head)
  606. goto done;
  607. p_node = p_head;
  608. /* Found at head of the table */
  609. if (p_head->net_buf == net_buf) {
  610. gp_cdf_net_buf_track_tbl[i] = p_node->p_next;
  611. cdf_mem_free((void *)p_node);
  612. found = true;
  613. goto done;
  614. }
  615. /* Search in collision list */
  616. while (p_node) {
  617. p_prev = p_node;
  618. p_node = p_node->p_next;
  619. if ((NULL != p_node) && (p_node->net_buf == net_buf)) {
  620. p_prev->p_next = p_node->p_next;
  621. cdf_mem_free((void *)p_node);
  622. found = true;
  623. break;
  624. }
  625. }
  626. done:
  627. if (!found) {
  628. CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
  629. "Unallocated buffer ! Double free of net_buf %p ?",
  630. net_buf);
  631. CDF_ASSERT(0);
  632. }
  633. spin_unlock_irqrestore(&g_cdf_net_buf_track_lock, irq_flag);
  634. return;
  635. }
  636. /**
  637. * cdf_net_buf_debug_release_skb() - release skb to avoid memory leak
  638. *
  639. * WLAN driver module whose allocated SKB is freed by network stack are
  640. * suppose to call this API before returning SKB to network stack such
  641. * that the SKB is not reported as memory leak.
  642. *
  643. * Return: none
  644. */
  645. void cdf_net_buf_debug_release_skb(cdf_nbuf_t net_buf)
  646. {
  647. cdf_net_buf_debug_delete_node(net_buf);
  648. }
  649. #endif /*MEMORY_DEBUG */
  650. #if defined(FEATURE_TSO)
  651. struct cdf_tso_cmn_seg_info_t {
  652. uint16_t ethproto;
  653. uint16_t ip_tcp_hdr_len;
  654. uint16_t l2_len;
  655. unsigned char *eit_hdr;
  656. unsigned int eit_hdr_len;
  657. struct tcphdr *tcphdr;
  658. uint16_t ipv4_csum_en;
  659. uint16_t tcp_ipv4_csum_en;
  660. uint16_t tcp_ipv6_csum_en;
  661. uint16_t ip_id;
  662. uint32_t tcp_seq_num;
  663. };
  664. /**
  665. * __cdf_nbuf_get_tso_cmn_seg_info() - get TSO common
  666. * information
  667. *
  668. * Get the TSO information that is common across all the TCP
  669. * segments of the jumbo packet
  670. *
  671. * Return: 0 - success 1 - failure
  672. */
  673. uint8_t __cdf_nbuf_get_tso_cmn_seg_info(struct sk_buff *skb,
  674. struct cdf_tso_cmn_seg_info_t *tso_info)
  675. {
  676. /* Get ethernet type and ethernet header length */
  677. tso_info->ethproto = vlan_get_protocol(skb);
  678. /* Determine whether this is an IPv4 or IPv6 packet */
  679. if (tso_info->ethproto == htons(ETH_P_IP)) { /* IPv4 */
  680. /* for IPv4, get the IP ID and enable TCP and IP csum */
  681. struct iphdr *ipv4_hdr = ip_hdr(skb);
  682. tso_info->ip_id = ntohs(ipv4_hdr->id);
  683. tso_info->ipv4_csum_en = 1;
  684. tso_info->tcp_ipv4_csum_en = 1;
  685. if (cdf_unlikely(ipv4_hdr->protocol != IPPROTO_TCP)) {
  686. cdf_print("TSO IPV4 proto 0x%x not TCP\n",
  687. ipv4_hdr->protocol);
  688. return 1;
  689. }
  690. } else if (tso_info->ethproto == htons(ETH_P_IPV6)) { /* IPv6 */
  691. /* for IPv6, enable TCP csum. No IP ID or IP csum */
  692. tso_info->tcp_ipv6_csum_en = 1;
  693. } else {
  694. cdf_print("TSO: ethertype 0x%x is not supported!\n",
  695. tso_info->ethproto);
  696. return 1;
  697. }
  698. tso_info->l2_len = (skb_network_header(skb) - skb_mac_header(skb));
  699. tso_info->tcphdr = tcp_hdr(skb);
  700. tso_info->tcp_seq_num = ntohl(tcp_hdr(skb)->seq);
  701. /* get pointer to the ethernet + IP + TCP header and their length */
  702. tso_info->eit_hdr = skb->data;
  703. tso_info->eit_hdr_len = (skb_transport_header(skb)
  704. - skb_mac_header(skb)) + tcp_hdrlen(skb);
  705. tso_info->ip_tcp_hdr_len = tso_info->eit_hdr_len - tso_info->l2_len;
  706. return 0;
  707. }
  708. /**
  709. * __cdf_nbuf_get_tso_info() - function to divide a TSO nbuf
  710. * into segments
  711. * @nbuf: network buffer to be segmented
  712. * @tso_info: This is the output. The information about the
  713. * TSO segments will be populated within this.
  714. *
  715. * This function fragments a TCP jumbo packet into smaller
  716. * segments to be transmitted by the driver. It chains the TSO
  717. * segments created into a list.
  718. *
  719. * Return: number of TSO segments
  720. */
  721. uint32_t __cdf_nbuf_get_tso_info(cdf_device_t osdev, struct sk_buff *skb,
  722. struct cdf_tso_info_t *tso_info)
  723. {
  724. /* common accross all segments */
  725. struct cdf_tso_cmn_seg_info_t tso_cmn_info;
  726. /* segment specific */
  727. char *tso_frag_vaddr;
  728. uint32_t tso_frag_paddr_32 = 0;
  729. uint32_t num_seg = 0;
  730. struct cdf_tso_seg_elem_t *curr_seg;
  731. const struct skb_frag_struct *frag = NULL;
  732. uint32_t tso_frag_len = 0; /* tso segment's fragment length*/
  733. uint32_t skb_frag_len = 0; /* skb's fragment length (continous memory)*/
  734. uint32_t foffset = 0; /* offset into the skb's fragment */
  735. uint32_t skb_proc = 0; /* bytes of the skb that have been processed*/
  736. uint32_t tso_seg_size = skb_shinfo(skb)->gso_size;
  737. memset(&tso_cmn_info, 0x0, sizeof(tso_cmn_info));
  738. if (cdf_unlikely(__cdf_nbuf_get_tso_cmn_seg_info(skb, &tso_cmn_info))) {
  739. cdf_print("TSO: error getting common segment info\n");
  740. return 0;
  741. }
  742. curr_seg = tso_info->tso_seg_list;
  743. /* length of the first chunk of data in the skb */
  744. skb_proc = skb_frag_len = skb->len - skb->data_len;
  745. /* the 0th tso segment's 0th fragment always contains the EIT header */
  746. /* update the remaining skb fragment length and TSO segment length */
  747. skb_frag_len -= tso_cmn_info.eit_hdr_len;
  748. skb_proc -= tso_cmn_info.eit_hdr_len;
  749. /* get the address to the next tso fragment */
  750. tso_frag_vaddr = skb->data + tso_cmn_info.eit_hdr_len;
  751. /* get the length of the next tso fragment */
  752. tso_frag_len = min(skb_frag_len, tso_seg_size);
  753. tso_frag_paddr_32 = dma_map_single(osdev->dev,
  754. tso_frag_vaddr, tso_frag_len, DMA_TO_DEVICE);
  755. num_seg = tso_info->num_segs;
  756. tso_info->num_segs = 0;
  757. tso_info->is_tso = 1;
  758. while (num_seg && curr_seg) {
  759. int i = 1; /* tso fragment index */
  760. int j = 0; /* skb fragment index */
  761. uint8_t more_tso_frags = 1;
  762. uint8_t from_frag_table = 0;
  763. /* Initialize the flags to 0 */
  764. memset(&curr_seg->seg, 0x0, sizeof(curr_seg->seg));
  765. tso_info->num_segs++;
  766. /* The following fields remain the same across all segments of
  767. a jumbo packet */
  768. curr_seg->seg.tso_flags.tso_enable = 1;
  769. curr_seg->seg.tso_flags.partial_checksum_en = 0;
  770. curr_seg->seg.tso_flags.ipv4_checksum_en =
  771. tso_cmn_info.ipv4_csum_en;
  772. curr_seg->seg.tso_flags.tcp_ipv6_checksum_en =
  773. tso_cmn_info.tcp_ipv6_csum_en;
  774. curr_seg->seg.tso_flags.tcp_ipv4_checksum_en =
  775. tso_cmn_info.tcp_ipv4_csum_en;
  776. curr_seg->seg.tso_flags.l2_len = 0;
  777. curr_seg->seg.tso_flags.tcp_flags_mask = 0x1FF;
  778. curr_seg->seg.num_frags = 0;
  779. /* The following fields change for the segments */
  780. curr_seg->seg.tso_flags.ip_id = tso_cmn_info.ip_id;
  781. tso_cmn_info.ip_id++;
  782. curr_seg->seg.tso_flags.syn = tso_cmn_info.tcphdr->syn;
  783. curr_seg->seg.tso_flags.rst = tso_cmn_info.tcphdr->rst;
  784. curr_seg->seg.tso_flags.psh = tso_cmn_info.tcphdr->psh;
  785. curr_seg->seg.tso_flags.ack = tso_cmn_info.tcphdr->ack;
  786. curr_seg->seg.tso_flags.urg = tso_cmn_info.tcphdr->urg;
  787. curr_seg->seg.tso_flags.ece = tso_cmn_info.tcphdr->ece;
  788. curr_seg->seg.tso_flags.cwr = tso_cmn_info.tcphdr->cwr;
  789. curr_seg->seg.tso_flags.tcp_seq_num = tso_cmn_info.tcp_seq_num;
  790. /* First fragment for each segment always contains the ethernet,
  791. IP and TCP header */
  792. curr_seg->seg.tso_frags[0].vaddr = tso_cmn_info.eit_hdr;
  793. curr_seg->seg.tso_frags[0].length = tso_cmn_info.eit_hdr_len;
  794. tso_info->total_len = curr_seg->seg.tso_frags[0].length;
  795. curr_seg->seg.tso_frags[0].paddr_low_32 =
  796. dma_map_single(osdev->dev, tso_cmn_info.eit_hdr,
  797. tso_cmn_info.eit_hdr_len, DMA_TO_DEVICE);
  798. curr_seg->seg.tso_flags.ip_len = tso_cmn_info.ip_tcp_hdr_len;
  799. curr_seg->seg.num_frags++;
  800. while (more_tso_frags) {
  801. curr_seg->seg.tso_frags[i].vaddr = tso_frag_vaddr;
  802. curr_seg->seg.tso_frags[i].length = tso_frag_len;
  803. tso_info->total_len +=
  804. curr_seg->seg.tso_frags[i].length;
  805. curr_seg->seg.tso_flags.ip_len +=
  806. curr_seg->seg.tso_frags[i].length;
  807. curr_seg->seg.num_frags++;
  808. skb_proc = skb_proc - curr_seg->seg.tso_frags[i].length;
  809. /* increment the TCP sequence number */
  810. tso_cmn_info.tcp_seq_num += tso_frag_len;
  811. curr_seg->seg.tso_frags[i].paddr_upper_16 = 0;
  812. curr_seg->seg.tso_frags[i].paddr_low_32 =
  813. tso_frag_paddr_32;
  814. /* if there is no more data left in the skb */
  815. if (!skb_proc)
  816. return tso_info->num_segs;
  817. /* get the next payload fragment information */
  818. /* check if there are more fragments in this segment */
  819. if ((tso_seg_size - tso_frag_len)) {
  820. more_tso_frags = 1;
  821. i++;
  822. } else {
  823. more_tso_frags = 0;
  824. /* reset i and the tso payload size */
  825. i = 1;
  826. tso_seg_size = skb_shinfo(skb)->gso_size;
  827. }
  828. /* if the next fragment is contiguous */
  829. if (tso_frag_len < skb_frag_len) {
  830. skb_frag_len = skb_frag_len - tso_frag_len;
  831. tso_frag_len = min(skb_frag_len, tso_seg_size);
  832. tso_frag_vaddr = tso_frag_vaddr + tso_frag_len;
  833. if (from_frag_table) {
  834. tso_frag_paddr_32 =
  835. skb_frag_dma_map(osdev->dev,
  836. frag, foffset,
  837. tso_frag_len,
  838. DMA_TO_DEVICE);
  839. } else {
  840. tso_frag_paddr_32 =
  841. dma_map_single(osdev->dev,
  842. tso_frag_vaddr,
  843. tso_frag_len,
  844. DMA_TO_DEVICE);
  845. }
  846. } else { /* the next fragment is not contiguous */
  847. tso_frag_len = min(skb_frag_len, tso_seg_size);
  848. frag = &skb_shinfo(skb)->frags[j];
  849. skb_frag_len = skb_frag_size(frag);
  850. tso_frag_vaddr = skb_frag_address(frag);
  851. tso_frag_paddr_32 = skb_frag_dma_map(osdev->dev,
  852. frag, 0, tso_frag_len,
  853. DMA_TO_DEVICE);
  854. foffset += tso_frag_len;
  855. from_frag_table = 1;
  856. j++;
  857. }
  858. }
  859. num_seg--;
  860. /* if TCP FIN flag was set, set it in the last segment */
  861. if (!num_seg)
  862. curr_seg->seg.tso_flags.fin = tso_cmn_info.tcphdr->fin;
  863. curr_seg = curr_seg->next;
  864. }
  865. return tso_info->num_segs;
  866. }
  867. /**
  868. * __cdf_nbuf_get_tso_num_seg() - function to divide a TSO nbuf
  869. * into segments
  870. * @nbuf: network buffer to be segmented
  871. * @tso_info: This is the output. The information about the
  872. * TSO segments will be populated within this.
  873. *
  874. * This function fragments a TCP jumbo packet into smaller
  875. * segments to be transmitted by the driver. It chains the TSO
  876. * segments created into a list.
  877. *
  878. * Return: 0 - success, 1 - failure
  879. */
  880. uint32_t __cdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
  881. {
  882. uint32_t gso_size, tmp_len, num_segs = 0;
  883. gso_size = skb_shinfo(skb)->gso_size;
  884. tmp_len = skb->len - ((skb_transport_header(skb) - skb_mac_header(skb))
  885. + tcp_hdrlen(skb));
  886. while (tmp_len) {
  887. num_segs++;
  888. if (tmp_len > gso_size)
  889. tmp_len -= gso_size;
  890. else
  891. break;
  892. }
  893. return num_segs;
  894. }
  895. struct sk_buff *__cdf_nbuf_inc_users(struct sk_buff *skb)
  896. {
  897. atomic_inc(&skb->users);
  898. return skb;
  899. }
  900. #endif /* FEATURE_TSO */