i_qdf_nbuf.h 69 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653
  1. /*
  2. * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. /**
  20. * DOC: i_qdf_nbuf.h
  21. * This file provides OS dependent nbuf API's.
  22. */
  23. #ifndef _I_QDF_NBUF_H
  24. #define _I_QDF_NBUF_H
  25. #include <linux/skbuff.h>
  26. #include <linux/netdevice.h>
  27. #include <linux/etherdevice.h>
  28. #include <linux/dma-mapping.h>
  29. #include <linux/version.h>
  30. #include <asm/cacheflush.h>
  31. #include <qdf_types.h>
  32. #include <qdf_net_types.h>
  33. #include <qdf_status.h>
  34. #include <qdf_util.h>
  35. #include <qdf_mem.h>
  36. #include <linux/tcp.h>
  37. #include <qdf_util.h>
  38. #include <qdf_nbuf_frag.h>
  39. #include "qdf_time.h"
  40. /*
  41. * Use socket buffer as the underlying implementation as skbuf .
  42. * Linux use sk_buff to represent both packet and data,
  43. * so we use sk_buffer to represent both skbuf .
  44. */
  45. typedef struct sk_buff *__qdf_nbuf_t;
  46. /**
  47. * typedef __qdf_nbuf_queue_head_t - abstraction for sk_buff_head linux struct
  48. *
  49. * This is used for skb queue management via linux skb buff head APIs
  50. */
  51. typedef struct sk_buff_head __qdf_nbuf_queue_head_t;
  52. #define QDF_NBUF_CB_TX_MAX_OS_FRAGS 1
  53. #define QDF_SHINFO_SIZE SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
  54. /* QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS -
  55. * max tx fragments added by the driver
  56. * The driver will always add one tx fragment (the tx descriptor)
  57. */
  58. #define QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS 2
  59. #define QDF_NBUF_CB_PACKET_TYPE_EAPOL 1
  60. #define QDF_NBUF_CB_PACKET_TYPE_ARP 2
  61. #define QDF_NBUF_CB_PACKET_TYPE_WAPI 3
  62. #define QDF_NBUF_CB_PACKET_TYPE_DHCP 4
  63. #define QDF_NBUF_CB_PACKET_TYPE_ICMP 5
  64. #define QDF_NBUF_CB_PACKET_TYPE_ICMPv6 6
  65. #define QDF_NBUF_CB_PACKET_TYPE_DHCPV6 7
  66. #define RADIOTAP_BASE_HEADER_LEN sizeof(struct ieee80211_radiotap_header)
  67. #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0))
  68. #define IEEE80211_RADIOTAP_HE 23
  69. #define IEEE80211_RADIOTAP_HE_MU 24
  70. #endif
  71. #define IEEE80211_RADIOTAP_HE_MU_OTHER 25
  72. #define IEEE80211_RADIOTAP_EXT1_USIG 1
  73. #define IEEE80211_RADIOTAP_EXT1_EHT 2
  74. /* mark the first packet after wow wakeup */
  75. #define QDF_MARK_FIRST_WAKEUP_PACKET 0x80000000
  76. /* TCP Related MASK */
  77. #define QDF_NBUF_PKT_TCPOP_FIN 0x01
  78. #define QDF_NBUF_PKT_TCPOP_FIN_ACK 0x11
  79. #define QDF_NBUF_PKT_TCPOP_RST 0x04
  80. /*
  81. * Make sure that qdf_dma_addr_t in the cb block is always 64 bit aligned
  82. */
  83. typedef union {
  84. uint64_t u64;
  85. qdf_dma_addr_t dma_addr;
  86. } qdf_paddr_t;
  87. /**
  88. * struct qdf_nbuf_cb - network buffer control block contents (skb->cb)
  89. * - data passed between layers of the driver.
  90. *
  91. * Notes:
  92. * 1. Hard limited to 48 bytes. Please count your bytes
  93. * 2. The size of this structure has to be easily calculatable and
  94. * consistently so: do not use any conditional compile flags
  95. * 3. Split into a common part followed by a tx/rx overlay
  96. * 4. There is only one extra frag, which represents the HTC/HTT header
  97. * 5. "ext_cb_pt" must be the first member in both TX and RX unions
  98. * for the priv_cb_w since it must be at same offset for both
  99. * TX and RX union
  100. * 6. "ipa.owned" bit must be first member in both TX and RX unions
  101. * for the priv_cb_m since it must be at same offset for both
  102. * TX and RX union.
  103. *
  104. * @paddr : physical addressed retrieved by dma_map of nbuf->data
  105. *
  106. * @rx.dev.priv_cb_w.ext_cb_ptr: extended cb pointer
  107. * @rx.dev.priv_cb_w.fctx: ctx to handle special pkts defined by ftype
  108. * @rx.dev.priv_cb_w.msdu_len: length of RX packet
  109. * @rx.dev.priv_cb_w.ipa_smmu_map: do IPA smmu map
  110. * @rx.dev.priv_cb_w.peer_id: peer_id for RX packet
  111. * @rx.dev.priv_cb_w.flag_intra_bss: flag to indicate this is intra bss packet
  112. * @rx.dev.priv_cb_w.protocol_tag: protocol tag set by app for rcvd packet type
  113. * @rx.dev.priv_cb_w.flow_tag: flow tag set by application for 5 tuples rcvd
  114. *
  115. * @rx.dev.priv_cb_m.peer_cached_buf_frm: peer cached buffer
  116. * @rx.dev.priv_cb_m.flush_ind: flush indication
  117. * @rx.dev.priv_cb_m.packet_buf_pool: packet buff bool
  118. * @rx.dev.priv_cb_m.l3_hdr_pad: L3 header padding offset
  119. * @rx.dev.priv_cb_m.exc_frm: exception frame
  120. * @rx.dev.priv_cb_m.ipa_smmu_map: do IPA smmu map
  121. * @rx.dev.priv_cb_m.reo_dest_ind_or_sw_excpt: reo destination indication or
  122. sw execption bit from ring desc
  123. * @rx.dev.priv_cb_m.tcp_seq_num: TCP sequence number
  124. * @rx.dev.priv_cb_m.tcp_ack_num: TCP ACK number
  125. * @rx.dev.priv_cb_m.lro_ctx: LRO context
  126. * @rx.dev.priv_cb_m.dp.wifi3.msdu_len: length of RX packet
  127. * @rx.dev.priv_cb_m.dp.wifi3.peer_id: peer_id for RX packet
  128. * @rx.dev.priv_cb_m.dp.wifi2.map_index:
  129. * @rx.dev.priv_cb_m.ipa_owned: packet owned by IPA
  130. *
  131. * @rx.lro_eligible: flag to indicate whether the MSDU is LRO eligible
  132. * @rx.tcp_proto: L4 protocol is TCP
  133. * @rx.tcp_pure_ack: A TCP ACK packet with no payload
  134. * @rx.ipv6_proto: L3 protocol is IPV6
  135. * @rx.ip_offset: offset to IP header
  136. * @rx.tcp_offset: offset to TCP header
  137. * @rx_ctx_id: Rx context id
  138. * @num_elements_in_list: number of elements in the nbuf list
  139. *
  140. * @rx.tcp_udp_chksum: L4 payload checksum
  141. * @rx.tcp_wim: TCP window size
  142. *
  143. * @rx.flow_id: 32bit flow id
  144. *
  145. * @rx.flag_chfrag_start: first MSDU in an AMSDU
  146. * @rx.flag_chfrag_cont: middle or part of MSDU in an AMSDU
  147. * @rx.flag_chfrag_end: last MSDU in an AMSDU
  148. * @rx.flag_retry: flag to indicate MSDU is retried
  149. * @rx.flag_da_mcbc: flag to indicate mulicast or broadcast packets
  150. * @rx.flag_da_valid: flag to indicate DA is valid for RX packet
  151. * @rx.flag_sa_valid: flag to indicate SA is valid for RX packet
  152. * @rx.flag_is_frag: flag to indicate skb has frag list
  153. * @rx.rsrvd: reserved
  154. *
  155. * @rx.trace: combined structure for DP and protocol trace
  156. * @rx.trace.packet_stat: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)|
  157. * + (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)]
  158. * @rx.trace.dp_trace: flag (Datapath trace)
  159. * @rx.trace.packet_track: RX_DATA packet
  160. * @rx.trace.rsrvd: enable packet logging
  161. *
  162. * @rx.vdev_id: vdev_id for RX pkt
  163. * @rx.is_raw_frame: RAW frame
  164. * @rx.fcs_err: FCS error
  165. * @rx.tid_val: tid value
  166. * @rx.reserved: reserved
  167. * @rx.ftype: mcast2ucast, TSO, SG, MESH
  168. *
  169. * @tx.dev.priv_cb_w.fctx: ctx to handle special pkts defined by ftype
  170. * @tx.dev.priv_cb_w.ext_cb_ptr: extended cb pointer
  171. *
  172. * @tx.dev.priv_cb_w.data_attr: value that is programmed in CE descr, includes
  173. * + (1) CE classification enablement bit
  174. * + (2) packet type (802.3 or Ethernet type II)
  175. * + (3) packet offset (usually length of HTC/HTT descr)
  176. * @tx.dev.priv_cb_m.ipa.owned: packet owned by IPA
  177. * @tx.dev.priv_cb_m.ipa.priv: private data, used by IPA
  178. * @tx.dev.priv_cb_m.desc_id: tx desc id, used to sync between host and fw
  179. * @tx.dev.priv_cb_m.mgmt_desc_id: mgmt descriptor for tx completion cb
  180. * @tx.dev.priv_cb_m.dma_option.bi_map: flag to do bi-direction dma map
  181. * @tx.dev.priv_cb_m.dma_option.reserved: reserved bits for future use
  182. * @tx.dev.priv_cb_m.reserved: reserved
  183. *
  184. * @tx.ftype: mcast2ucast, TSO, SG, MESH
  185. * @tx.vdev_id: vdev (for protocol trace)
  186. * @tx.len: length of efrag pointed by the above pointers
  187. *
  188. * @tx.flags.bits.flag_efrag: flag, efrag payload to be swapped (wordstream)
  189. * @tx.flags.bits.num: number of extra frags ( 0 or 1)
  190. * @tx.flags.bits.nbuf: flag, nbuf payload to be swapped (wordstream)
  191. * @tx.flags.bits.flag_chfrag_start: first MSDU in an AMSDU
  192. * @tx.flags.bits.flag_chfrag_cont: middle or part of MSDU in an AMSDU
  193. * @tx.flags.bits.flag_chfrag_end: last MSDU in an AMSDU
  194. * @tx.flags.bits.flag_ext_header: extended flags
  195. * @tx.flags.bits.is_critical: flag indicating a critical frame
  196. * @tx.trace: combined structure for DP and protocol trace
  197. * @tx.trace.packet_stat: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)|
  198. * + (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)]
  199. * @tx.trace.is_packet_priv:
  200. * @tx.trace.packet_track: {NBUF_TX_PKT_[(DATA)|(MGMT)]_TRACK}
  201. * @tx.trace.to_fw: Flag to indicate send this packet to FW
  202. * @tx.trace.proto_type: bitmap of NBUF_PKT_TRAC_TYPE[(EAPOL)|(DHCP)|
  203. * + (MGMT_ACTION)] - 4 bits
  204. * @tx.trace.dp_trace: flag (Datapath trace)
  205. * @tx.trace.is_bcast: flag (Broadcast packet)
  206. * @tx.trace.is_mcast: flag (Multicast packet)
  207. * @tx.trace.packet_type: flag (Packet type)
  208. * @tx.trace.htt2_frm: flag (high-latency path only)
  209. * @tx.trace.print: enable packet logging
  210. *
  211. * @tx.vaddr: virtual address of ~
  212. * @tx.paddr: physical/DMA address of ~
  213. */
  214. struct qdf_nbuf_cb {
  215. /* common */
  216. qdf_paddr_t paddr; /* of skb->data */
  217. /* valid only in one direction */
  218. union {
  219. /* Note: MAX: 40 bytes */
  220. struct {
  221. union {
  222. struct {
  223. void *ext_cb_ptr;
  224. void *fctx;
  225. uint16_t msdu_len : 14,
  226. flag_intra_bss : 1,
  227. ipa_smmu_map : 1;
  228. uint16_t peer_id;
  229. uint16_t protocol_tag;
  230. uint16_t flow_tag;
  231. } priv_cb_w;
  232. struct {
  233. /* ipa_owned bit is common between rx
  234. * control block and tx control block.
  235. * Do not change location of this bit.
  236. */
  237. uint32_t ipa_owned:1,
  238. peer_cached_buf_frm:1,
  239. flush_ind:1,
  240. packet_buf_pool:1,
  241. l3_hdr_pad:3,
  242. /* exception frame flag */
  243. exc_frm:1,
  244. ipa_smmu_map:1,
  245. reo_dest_ind_or_sw_excpt:5,
  246. reserved:2,
  247. reserved1:16;
  248. uint32_t tcp_seq_num;
  249. uint32_t tcp_ack_num;
  250. union {
  251. struct {
  252. uint16_t msdu_len;
  253. uint16_t peer_id;
  254. } wifi3;
  255. struct {
  256. uint32_t map_index;
  257. } wifi2;
  258. } dp;
  259. unsigned char *lro_ctx;
  260. } priv_cb_m;
  261. } dev;
  262. uint32_t lro_eligible:1,
  263. tcp_proto:1,
  264. tcp_pure_ack:1,
  265. ipv6_proto:1,
  266. ip_offset:7,
  267. tcp_offset:7,
  268. rx_ctx_id:4,
  269. fcs_err:1,
  270. is_raw_frame:1,
  271. num_elements_in_list:8;
  272. uint32_t tcp_udp_chksum:16,
  273. tcp_win:16;
  274. uint32_t flow_id;
  275. uint8_t flag_chfrag_start:1,
  276. flag_chfrag_cont:1,
  277. flag_chfrag_end:1,
  278. flag_retry:1,
  279. flag_da_mcbc:1,
  280. flag_da_valid:1,
  281. flag_sa_valid:1,
  282. flag_is_frag:1;
  283. union {
  284. uint8_t packet_state;
  285. uint8_t dp_trace:1,
  286. packet_track:3,
  287. rsrvd:4;
  288. } trace;
  289. uint16_t vdev_id:8,
  290. tid_val:4,
  291. ftype:4;
  292. } rx;
  293. /* Note: MAX: 40 bytes */
  294. struct {
  295. union {
  296. struct {
  297. void *ext_cb_ptr;
  298. void *fctx;
  299. } priv_cb_w;
  300. struct {
  301. /* ipa_owned bit is common between rx
  302. * control block and tx control block.
  303. * Do not change location of this bit.
  304. */
  305. struct {
  306. uint32_t owned:1,
  307. priv:31;
  308. } ipa;
  309. uint32_t data_attr;
  310. uint16_t desc_id;
  311. uint16_t mgmt_desc_id;
  312. struct {
  313. uint8_t bi_map:1,
  314. reserved:7;
  315. } dma_option;
  316. uint8_t flag_notify_comp:1,
  317. rsvd:7;
  318. uint8_t reserved[2];
  319. } priv_cb_m;
  320. } dev;
  321. uint8_t ftype;
  322. uint8_t vdev_id;
  323. uint16_t len;
  324. union {
  325. struct {
  326. uint8_t flag_efrag:1,
  327. flag_nbuf:1,
  328. num:1,
  329. flag_chfrag_start:1,
  330. flag_chfrag_cont:1,
  331. flag_chfrag_end:1,
  332. flag_ext_header:1,
  333. is_critical:1;
  334. } bits;
  335. uint8_t u8;
  336. } flags;
  337. struct {
  338. uint8_t packet_state:7,
  339. is_packet_priv:1;
  340. uint8_t packet_track:3,
  341. to_fw:1,
  342. proto_type:4;
  343. uint8_t dp_trace:1,
  344. is_bcast:1,
  345. is_mcast:1,
  346. packet_type:3,
  347. /* used only for hl*/
  348. htt2_frm:1,
  349. print:1;
  350. } trace;
  351. unsigned char *vaddr;
  352. qdf_paddr_t paddr;
  353. } tx;
  354. } u;
  355. }; /* struct qdf_nbuf_cb: MAX 48 bytes */
  356. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 16, 0))
  357. QDF_COMPILE_TIME_ASSERT(qdf_nbuf_cb_size,
  358. (sizeof(struct qdf_nbuf_cb)) <=
  359. sizeof_field(struct sk_buff, cb));
  360. #else
  361. QDF_COMPILE_TIME_ASSERT(qdf_nbuf_cb_size,
  362. (sizeof(struct qdf_nbuf_cb)) <=
  363. FIELD_SIZEOF(struct sk_buff, cb));
  364. #endif
  365. /**
  366. * access macros to qdf_nbuf_cb
  367. * Note: These macros can be used as L-values as well as R-values.
  368. * When used as R-values, they effectively function as "get" macros
  369. * When used as L_values, they effectively function as "set" macros
  370. */
  371. #define QDF_NBUF_CB_PADDR(skb) \
  372. (((struct qdf_nbuf_cb *)((skb)->cb))->paddr.dma_addr)
  373. #define QDF_NBUF_CB_RX_LRO_ELIGIBLE(skb) \
  374. (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.lro_eligible)
  375. #define QDF_NBUF_CB_RX_TCP_PROTO(skb) \
  376. (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_proto)
  377. #define QDF_NBUF_CB_RX_TCP_PURE_ACK(skb) \
  378. (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_pure_ack)
  379. #define QDF_NBUF_CB_RX_IPV6_PROTO(skb) \
  380. (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ipv6_proto)
  381. #define QDF_NBUF_CB_RX_IP_OFFSET(skb) \
  382. (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ip_offset)
  383. #define QDF_NBUF_CB_RX_TCP_OFFSET(skb) \
  384. (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_offset)
  385. #define QDF_NBUF_CB_RX_CTX_ID(skb) \
  386. (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.rx_ctx_id)
  387. #define QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(skb) \
  388. (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.num_elements_in_list)
  389. #define QDF_NBUF_CB_RX_TCP_CHKSUM(skb) \
  390. (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_udp_chksum)
  391. #define QDF_NBUF_CB_RX_TCP_WIN(skb) \
  392. (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_win)
  393. #define QDF_NBUF_CB_RX_FLOW_ID(skb) \
  394. (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.flow_id)
  395. #define QDF_NBUF_CB_RX_PACKET_STATE(skb)\
  396. (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.trace.packet_state)
  397. #define QDF_NBUF_CB_RX_DP_TRACE(skb) \
  398. (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.trace.dp_trace)
  399. #define QDF_NBUF_CB_RX_FTYPE(skb) \
  400. (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ftype)
  401. #define QDF_NBUF_CB_RX_VDEV_ID(skb) \
  402. (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.vdev_id)
  403. #define QDF_NBUF_CB_RX_CHFRAG_START(skb) \
  404. (((struct qdf_nbuf_cb *) \
  405. ((skb)->cb))->u.rx.flag_chfrag_start)
  406. #define QDF_NBUF_CB_RX_CHFRAG_CONT(skb) \
  407. (((struct qdf_nbuf_cb *) \
  408. ((skb)->cb))->u.rx.flag_chfrag_cont)
  409. #define QDF_NBUF_CB_RX_CHFRAG_END(skb) \
  410. (((struct qdf_nbuf_cb *) \
  411. ((skb)->cb))->u.rx.flag_chfrag_end)
  412. #define QDF_NBUF_CB_RX_DA_MCBC(skb) \
  413. (((struct qdf_nbuf_cb *) \
  414. ((skb)->cb))->u.rx.flag_da_mcbc)
  415. #define QDF_NBUF_CB_RX_DA_VALID(skb) \
  416. (((struct qdf_nbuf_cb *) \
  417. ((skb)->cb))->u.rx.flag_da_valid)
  418. #define QDF_NBUF_CB_RX_SA_VALID(skb) \
  419. (((struct qdf_nbuf_cb *) \
  420. ((skb)->cb))->u.rx.flag_sa_valid)
  421. #define QDF_NBUF_CB_RX_RETRY_FLAG(skb) \
  422. (((struct qdf_nbuf_cb *) \
  423. ((skb)->cb))->u.rx.flag_retry)
  424. #define QDF_NBUF_CB_RX_RAW_FRAME(skb) \
  425. (((struct qdf_nbuf_cb *) \
  426. ((skb)->cb))->u.rx.is_raw_frame)
  427. #define QDF_NBUF_CB_RX_TID_VAL(skb) \
  428. (((struct qdf_nbuf_cb *) \
  429. ((skb)->cb))->u.rx.tid_val)
  430. #define QDF_NBUF_CB_RX_IS_FRAG(skb) \
  431. (((struct qdf_nbuf_cb *) \
  432. ((skb)->cb))->u.rx.flag_is_frag)
  433. #define QDF_NBUF_CB_RX_FCS_ERR(skb) \
  434. (((struct qdf_nbuf_cb *) \
  435. ((skb)->cb))->u.rx.fcs_err)
  436. #define QDF_NBUF_UPDATE_TX_PKT_COUNT(skb, PACKET_STATE) \
  437. qdf_nbuf_set_state(skb, PACKET_STATE)
  438. #define QDF_NBUF_CB_TX_DATA_ATTR(skb) \
  439. (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_m.data_attr)
  440. #define QDF_NBUF_CB_TX_FTYPE(skb) \
  441. (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.ftype)
  442. #define QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) \
  443. (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.len)
  444. #define QDF_NBUF_CB_TX_VDEV_CTX(skb) \
  445. (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.vdev_id)
  446. /* Tx Flags Accessor Macros*/
  447. #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) \
  448. (((struct qdf_nbuf_cb *) \
  449. ((skb)->cb))->u.tx.flags.bits.flag_efrag)
  450. #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) \
  451. (((struct qdf_nbuf_cb *) \
  452. ((skb)->cb))->u.tx.flags.bits.flag_nbuf)
  453. #define QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) \
  454. (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.bits.num)
  455. #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START(skb) \
  456. (((struct qdf_nbuf_cb *) \
  457. ((skb)->cb))->u.tx.flags.bits.flag_chfrag_start)
  458. #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT(skb) \
  459. (((struct qdf_nbuf_cb *) \
  460. ((skb)->cb))->u.tx.flags.bits.flag_chfrag_cont)
  461. #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END(skb) \
  462. (((struct qdf_nbuf_cb *) \
  463. ((skb)->cb))->u.tx.flags.bits.flag_chfrag_end)
  464. #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_EXT_HEADER(skb) \
  465. (((struct qdf_nbuf_cb *) \
  466. ((skb)->cb))->u.tx.flags.bits.flag_ext_header)
  467. #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_FLAGS(skb) \
  468. (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.u8)
  469. #define QDF_NBUF_CB_TX_EXTRA_IS_CRITICAL(skb) \
  470. (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.bits.is_critical)
  471. /* End of Tx Flags Accessor Macros */
  472. /* Tx trace accessor macros */
  473. #define QDF_NBUF_CB_TX_PACKET_STATE(skb)\
  474. (((struct qdf_nbuf_cb *) \
  475. ((skb)->cb))->u.tx.trace.packet_state)
  476. #define QDF_NBUF_CB_TX_IS_PACKET_PRIV(skb) \
  477. (((struct qdf_nbuf_cb *) \
  478. ((skb)->cb))->u.tx.trace.is_packet_priv)
  479. #define QDF_NBUF_CB_TX_PACKET_TRACK(skb)\
  480. (((struct qdf_nbuf_cb *) \
  481. ((skb)->cb))->u.tx.trace.packet_track)
  482. #define QDF_NBUF_CB_TX_PACKET_TO_FW(skb)\
  483. (((struct qdf_nbuf_cb *) \
  484. ((skb)->cb))->u.tx.trace.to_fw)
  485. #define QDF_NBUF_CB_RX_PACKET_TRACK(skb)\
  486. (((struct qdf_nbuf_cb *) \
  487. ((skb)->cb))->u.rx.trace.packet_track)
  488. #define QDF_NBUF_CB_TX_PROTO_TYPE(skb)\
  489. (((struct qdf_nbuf_cb *) \
  490. ((skb)->cb))->u.tx.trace.proto_type)
  491. #define QDF_NBUF_CB_TX_DP_TRACE(skb)\
  492. (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.dp_trace)
  493. #define QDF_NBUF_CB_DP_TRACE_PRINT(skb) \
  494. (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.print)
  495. #define QDF_NBUF_CB_TX_HL_HTT2_FRM(skb) \
  496. (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.htt2_frm)
  497. #define QDF_NBUF_CB_GET_IS_BCAST(skb)\
  498. (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.is_bcast)
  499. #define QDF_NBUF_CB_GET_IS_MCAST(skb)\
  500. (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.is_mcast)
  501. #define QDF_NBUF_CB_GET_PACKET_TYPE(skb)\
  502. (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.packet_type)
  503. #define QDF_NBUF_CB_SET_BCAST(skb) \
  504. (((struct qdf_nbuf_cb *) \
  505. ((skb)->cb))->u.tx.trace.is_bcast = true)
  506. #define QDF_NBUF_CB_SET_MCAST(skb) \
  507. (((struct qdf_nbuf_cb *) \
  508. ((skb)->cb))->u.tx.trace.is_mcast = true)
  509. /* End of Tx trace accessor macros */
  510. #define QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) \
  511. (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.vaddr)
  512. #define QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) \
  513. (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.paddr.dma_addr)
  514. /* assume the OS provides a single fragment */
  515. #define __qdf_nbuf_get_num_frags(skb) \
  516. (QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) + 1)
  517. #define __qdf_nbuf_reset_num_frags(skb) \
  518. (QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0)
  519. /**
  520. * end of nbuf->cb access macros
  521. */
  522. typedef void (*qdf_nbuf_trace_update_t)(char *);
  523. typedef void (*qdf_nbuf_free_t)(__qdf_nbuf_t);
  524. #define __qdf_nbuf_mapped_paddr_get(skb) QDF_NBUF_CB_PADDR(skb)
  525. #define __qdf_nbuf_mapped_paddr_set(skb, paddr) \
  526. (QDF_NBUF_CB_PADDR(skb) = paddr)
  527. #define __qdf_nbuf_frag_push_head( \
  528. skb, frag_len, frag_vaddr, frag_paddr) \
  529. do { \
  530. QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 1; \
  531. QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) = frag_vaddr; \
  532. QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) = frag_paddr; \
  533. QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) = frag_len; \
  534. } while (0)
  535. #define __qdf_nbuf_get_frag_vaddr(skb, frag_num) \
  536. ((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ? \
  537. QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) : ((skb)->data))
  538. #define __qdf_nbuf_get_frag_vaddr_always(skb) \
  539. QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb)
  540. #define __qdf_nbuf_get_frag_paddr(skb, frag_num) \
  541. ((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ? \
  542. QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) : \
  543. /* assume that the OS only provides a single fragment */ \
  544. QDF_NBUF_CB_PADDR(skb))
  545. #define __qdf_nbuf_get_tx_frag_paddr(skb) QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb)
  546. #define __qdf_nbuf_get_frag_len(skb, frag_num) \
  547. ((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ? \
  548. QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) : (skb)->len)
  549. #define __qdf_nbuf_get_frag_is_wordstream(skb, frag_num) \
  550. ((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) \
  551. ? (QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb)) \
  552. : (QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb)))
  553. #define __qdf_nbuf_set_frag_is_wordstream(skb, frag_num, is_wstrm) \
  554. do { \
  555. if (frag_num >= QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) \
  556. frag_num = QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS; \
  557. if (frag_num) \
  558. QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = \
  559. is_wstrm; \
  560. else \
  561. QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = \
  562. is_wstrm; \
  563. } while (0)
  564. #define __qdf_nbuf_set_vdev_ctx(skb, vdev_id) \
  565. do { \
  566. QDF_NBUF_CB_TX_VDEV_CTX((skb)) = (vdev_id); \
  567. } while (0)
  568. #define __qdf_nbuf_get_vdev_ctx(skb) \
  569. QDF_NBUF_CB_TX_VDEV_CTX((skb))
  570. #define __qdf_nbuf_set_tx_ftype(skb, type) \
  571. do { \
  572. QDF_NBUF_CB_TX_FTYPE((skb)) = (type); \
  573. } while (0)
  574. #define __qdf_nbuf_get_tx_ftype(skb) \
  575. QDF_NBUF_CB_TX_FTYPE((skb))
  576. #define __qdf_nbuf_set_rx_ftype(skb, type) \
  577. do { \
  578. QDF_NBUF_CB_RX_FTYPE((skb)) = (type); \
  579. } while (0)
  580. #define __qdf_nbuf_get_rx_ftype(skb) \
  581. QDF_NBUF_CB_RX_FTYPE((skb))
  582. #define __qdf_nbuf_set_rx_chfrag_start(skb, val) \
  583. ((QDF_NBUF_CB_RX_CHFRAG_START((skb))) = val)
  584. #define __qdf_nbuf_is_rx_chfrag_start(skb) \
  585. (QDF_NBUF_CB_RX_CHFRAG_START((skb)))
  586. #define __qdf_nbuf_set_rx_chfrag_cont(skb, val) \
  587. do { \
  588. (QDF_NBUF_CB_RX_CHFRAG_CONT((skb))) = val; \
  589. } while (0)
  590. #define __qdf_nbuf_is_rx_chfrag_cont(skb) \
  591. (QDF_NBUF_CB_RX_CHFRAG_CONT((skb)))
  592. #define __qdf_nbuf_set_rx_chfrag_end(skb, val) \
  593. ((QDF_NBUF_CB_RX_CHFRAG_END((skb))) = val)
  594. #define __qdf_nbuf_is_rx_chfrag_end(skb) \
  595. (QDF_NBUF_CB_RX_CHFRAG_END((skb)))
  596. #define __qdf_nbuf_set_da_mcbc(skb, val) \
  597. ((QDF_NBUF_CB_RX_DA_MCBC((skb))) = val)
  598. #define __qdf_nbuf_is_da_mcbc(skb) \
  599. (QDF_NBUF_CB_RX_DA_MCBC((skb)))
  600. #define __qdf_nbuf_set_da_valid(skb, val) \
  601. ((QDF_NBUF_CB_RX_DA_VALID((skb))) = val)
  602. #define __qdf_nbuf_is_da_valid(skb) \
  603. (QDF_NBUF_CB_RX_DA_VALID((skb)))
  604. #define __qdf_nbuf_set_sa_valid(skb, val) \
  605. ((QDF_NBUF_CB_RX_SA_VALID((skb))) = val)
  606. #define __qdf_nbuf_is_sa_valid(skb) \
  607. (QDF_NBUF_CB_RX_SA_VALID((skb)))
  608. #define __qdf_nbuf_set_rx_retry_flag(skb, val) \
  609. ((QDF_NBUF_CB_RX_RETRY_FLAG((skb))) = val)
  610. #define __qdf_nbuf_is_rx_retry_flag(skb) \
  611. (QDF_NBUF_CB_RX_RETRY_FLAG((skb)))
  612. #define __qdf_nbuf_set_raw_frame(skb, val) \
  613. ((QDF_NBUF_CB_RX_RAW_FRAME((skb))) = val)
  614. #define __qdf_nbuf_is_raw_frame(skb) \
  615. (QDF_NBUF_CB_RX_RAW_FRAME((skb)))
  616. #define __qdf_nbuf_get_tid_val(skb) \
  617. (QDF_NBUF_CB_RX_TID_VAL((skb)))
  618. #define __qdf_nbuf_set_tid_val(skb, val) \
  619. ((QDF_NBUF_CB_RX_TID_VAL((skb))) = val)
  620. #define __qdf_nbuf_set_is_frag(skb, val) \
  621. ((QDF_NBUF_CB_RX_IS_FRAG((skb))) = val)
  622. #define __qdf_nbuf_is_frag(skb) \
  623. (QDF_NBUF_CB_RX_IS_FRAG((skb)))
  624. #define __qdf_nbuf_set_tx_chfrag_start(skb, val) \
  625. ((QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START((skb))) = val)
  626. #define __qdf_nbuf_is_tx_chfrag_start(skb) \
  627. (QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START((skb)))
  628. #define __qdf_nbuf_set_tx_chfrag_cont(skb, val) \
  629. do { \
  630. (QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT((skb))) = val; \
  631. } while (0)
  632. #define __qdf_nbuf_is_tx_chfrag_cont(skb) \
  633. (QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT((skb)))
  634. #define __qdf_nbuf_set_tx_chfrag_end(skb, val) \
  635. ((QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END((skb))) = val)
  636. #define __qdf_nbuf_is_tx_chfrag_end(skb) \
  637. (QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END((skb)))
  638. #define __qdf_nbuf_trace_set_proto_type(skb, proto_type) \
  639. (QDF_NBUF_CB_TX_PROTO_TYPE(skb) = (proto_type))
  640. #define __qdf_nbuf_trace_get_proto_type(skb) \
  641. QDF_NBUF_CB_TX_PROTO_TYPE(skb)
  642. #define __qdf_nbuf_data_attr_get(skb) \
  643. QDF_NBUF_CB_TX_DATA_ATTR(skb)
  644. #define __qdf_nbuf_data_attr_set(skb, data_attr) \
  645. (QDF_NBUF_CB_TX_DATA_ATTR(skb) = (data_attr))
  646. #define __qdf_nbuf_queue_walk_safe(queue, var, tvar) \
  647. skb_queue_walk_safe(queue, var, tvar)
  648. /**
  649. * __qdf_nbuf_num_frags_init() - init extra frags
  650. * @skb: sk buffer
  651. *
  652. * Return: none
  653. */
  654. static inline
  655. void __qdf_nbuf_num_frags_init(struct sk_buff *skb)
  656. {
  657. QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0;
  658. }
  659. /*
  660. * prototypes. Implemented in qdf_nbuf.c
  661. */
  662. /**
  663. * __qdf_nbuf_alloc() - Allocate nbuf
  664. * @osdev: Device handle
  665. * @size: Netbuf requested size
  666. * @reserve: headroom to start with
  667. * @align: Align
  668. * @prio: Priority
  669. * @func: Function name of the call site
  670. * @line: line number of the call site
  671. *
  672. * This allocates an nbuf aligns if needed and reserves some space in the front,
  673. * since the reserve is done after alignment the reserve value if being
  674. * unaligned will result in an unaligned address.
  675. *
  676. * Return: nbuf or %NULL if no memory
  677. */
  678. __qdf_nbuf_t
  679. __qdf_nbuf_alloc(__qdf_device_t osdev, size_t size, int reserve, int align,
  680. int prio, const char *func, uint32_t line);
  681. __qdf_nbuf_t __qdf_nbuf_alloc_simple(__qdf_device_t osdev, size_t size);
  682. /**
  683. * __qdf_nbuf_alloc_no_recycler() - Allocates skb
  684. * @size: Size to be allocated for skb
  685. * @reserve: Reserve headroom size
  686. * @align: Align data
  687. * @func: Function name of the call site
  688. * @line: Line number of the callsite
  689. *
  690. * This API allocates a nbuf and aligns it if needed and reserves some headroom
  691. * space after the alignment where nbuf is not allocated from skb recycler pool.
  692. *
  693. * Return: Allocated nbuf pointer
  694. */
  695. __qdf_nbuf_t __qdf_nbuf_alloc_no_recycler(size_t size, int reserve, int align,
  696. const char *func, uint32_t line);
  697. /**
  698. * __qdf_nbuf_clone() - clone the nbuf (copy is readonly)
  699. * @skb: Pointer to network buffer
  700. *
  701. * if GFP_ATOMIC is overkill then we can check whether its
  702. * called from interrupt context and then do it or else in
  703. * normal case use GFP_KERNEL
  704. *
  705. * example use "in_irq() || irqs_disabled()"
  706. *
  707. * Return: cloned skb
  708. */
  709. __qdf_nbuf_t __qdf_nbuf_clone(__qdf_nbuf_t nbuf);
  710. void __qdf_nbuf_free(struct sk_buff *skb);
  711. QDF_STATUS __qdf_nbuf_map(__qdf_device_t osdev,
  712. struct sk_buff *skb, qdf_dma_dir_t dir);
  713. void __qdf_nbuf_unmap(__qdf_device_t osdev,
  714. struct sk_buff *skb, qdf_dma_dir_t dir);
  715. QDF_STATUS __qdf_nbuf_map_single(__qdf_device_t osdev,
  716. struct sk_buff *skb, qdf_dma_dir_t dir);
  717. void __qdf_nbuf_unmap_single(__qdf_device_t osdev,
  718. struct sk_buff *skb, qdf_dma_dir_t dir);
  719. void __qdf_nbuf_reg_trace_cb(qdf_nbuf_trace_update_t cb_func_ptr);
  720. void __qdf_nbuf_reg_free_cb(qdf_nbuf_free_t cb_func_ptr);
  721. QDF_STATUS __qdf_nbuf_dmamap_create(qdf_device_t osdev, __qdf_dma_map_t *dmap);
  722. void __qdf_nbuf_dmamap_destroy(qdf_device_t osdev, __qdf_dma_map_t dmap);
  723. void __qdf_nbuf_dmamap_set_cb(__qdf_dma_map_t dmap, void *cb, void *arg);
  724. QDF_STATUS __qdf_nbuf_map_nbytes(qdf_device_t osdev, struct sk_buff *skb,
  725. qdf_dma_dir_t dir, int nbytes);
  726. void __qdf_nbuf_unmap_nbytes(qdf_device_t osdev, struct sk_buff *skb,
  727. qdf_dma_dir_t dir, int nbytes);
  728. void __qdf_nbuf_sync_for_cpu(qdf_device_t osdev, struct sk_buff *skb,
  729. qdf_dma_dir_t dir);
  730. void __qdf_nbuf_dma_map_info(__qdf_dma_map_t bmap, qdf_dmamap_info_t *sg);
  731. uint32_t __qdf_nbuf_get_frag_size(__qdf_nbuf_t nbuf, uint32_t cur_frag);
  732. void __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t *sg);
  733. QDF_STATUS __qdf_nbuf_frag_map(
  734. qdf_device_t osdev, __qdf_nbuf_t nbuf,
  735. int offset, qdf_dma_dir_t dir, int cur_frag);
  736. void qdf_nbuf_classify_pkt(struct sk_buff *skb);
  737. bool __qdf_nbuf_is_ipv4_wapi_pkt(struct sk_buff *skb);
  738. bool __qdf_nbuf_is_ipv4_tdls_pkt(struct sk_buff *skb);
  739. bool __qdf_nbuf_data_is_ipv4_pkt(uint8_t *data);
  740. bool __qdf_nbuf_data_is_ipv6_pkt(uint8_t *data);
  741. bool __qdf_nbuf_data_is_ipv4_mcast_pkt(uint8_t *data);
  742. bool __qdf_nbuf_data_is_ipv6_mcast_pkt(uint8_t *data);
  743. bool __qdf_nbuf_data_is_icmp_pkt(uint8_t *data);
  744. bool __qdf_nbuf_data_is_icmpv6_pkt(uint8_t *data);
  745. bool __qdf_nbuf_data_is_ipv4_udp_pkt(uint8_t *data);
  746. bool __qdf_nbuf_data_is_ipv4_tcp_pkt(uint8_t *data);
  747. bool __qdf_nbuf_data_is_ipv6_udp_pkt(uint8_t *data);
  748. bool __qdf_nbuf_data_is_ipv6_tcp_pkt(uint8_t *data);
  749. bool __qdf_nbuf_data_is_ipv4_dhcp_pkt(uint8_t *data);
  750. bool __qdf_nbuf_data_is_ipv6_dhcp_pkt(uint8_t *data);
  751. bool __qdf_nbuf_data_is_ipv6_mdns_pkt(uint8_t *data);
  752. bool __qdf_nbuf_data_is_ipv4_eapol_pkt(uint8_t *data);
  753. bool __qdf_nbuf_data_is_ipv4_igmp_pkt(uint8_t *data);
  754. bool __qdf_nbuf_data_is_ipv6_igmp_pkt(uint8_t *data);
  755. bool __qdf_nbuf_data_is_ipv4_arp_pkt(uint8_t *data);
  756. bool __qdf_nbuf_is_bcast_pkt(__qdf_nbuf_t nbuf);
  757. bool __qdf_nbuf_data_is_arp_req(uint8_t *data);
  758. bool __qdf_nbuf_data_is_arp_rsp(uint8_t *data);
  759. uint32_t __qdf_nbuf_get_arp_src_ip(uint8_t *data);
  760. uint32_t __qdf_nbuf_get_arp_tgt_ip(uint8_t *data);
  761. uint8_t *__qdf_nbuf_get_dns_domain_name(uint8_t *data, uint32_t len);
  762. bool __qdf_nbuf_data_is_dns_query(uint8_t *data);
  763. bool __qdf_nbuf_data_is_dns_response(uint8_t *data);
  764. bool __qdf_nbuf_data_is_tcp_fin(uint8_t *data);
  765. bool __qdf_nbuf_data_is_tcp_fin_ack(uint8_t *data);
  766. bool __qdf_nbuf_data_is_tcp_syn(uint8_t *data);
  767. bool __qdf_nbuf_data_is_tcp_syn_ack(uint8_t *data);
  768. bool __qdf_nbuf_data_is_tcp_rst(uint8_t *data);
  769. bool __qdf_nbuf_data_is_tcp_ack(uint8_t *data);
  770. uint16_t __qdf_nbuf_data_get_tcp_src_port(uint8_t *data);
  771. uint16_t __qdf_nbuf_data_get_tcp_dst_port(uint8_t *data);
  772. bool __qdf_nbuf_data_is_icmpv4_req(uint8_t *data);
  773. bool __qdf_nbuf_data_is_icmpv4_rsp(uint8_t *data);
  774. uint32_t __qdf_nbuf_get_icmpv4_src_ip(uint8_t *data);
  775. uint32_t __qdf_nbuf_get_icmpv4_tgt_ip(uint8_t *data);
  776. enum qdf_proto_subtype __qdf_nbuf_data_get_dhcp_subtype(uint8_t *data);
  777. enum qdf_proto_subtype __qdf_nbuf_data_get_eapol_subtype(uint8_t *data);
  778. enum qdf_proto_subtype __qdf_nbuf_data_get_arp_subtype(uint8_t *data);
  779. enum qdf_proto_subtype __qdf_nbuf_data_get_icmp_subtype(uint8_t *data);
  780. enum qdf_proto_subtype __qdf_nbuf_data_get_icmpv6_subtype(uint8_t *data);
  781. uint8_t __qdf_nbuf_data_get_ipv4_proto(uint8_t *data);
  782. uint8_t __qdf_nbuf_data_get_ipv6_proto(uint8_t *data);
  783. #ifdef QDF_NBUF_GLOBAL_COUNT
  784. int __qdf_nbuf_count_get(void);
  785. void __qdf_nbuf_count_inc(struct sk_buff *skb);
  786. void __qdf_nbuf_count_dec(struct sk_buff *skb);
  787. void __qdf_nbuf_mod_init(void);
  788. void __qdf_nbuf_mod_exit(void);
  789. #else
  790. static inline int __qdf_nbuf_count_get(void)
  791. {
  792. return 0;
  793. }
  794. static inline void __qdf_nbuf_count_inc(struct sk_buff *skb)
  795. {
  796. return;
  797. }
  798. static inline void __qdf_nbuf_count_dec(struct sk_buff *skb)
  799. {
  800. return;
  801. }
  802. static inline void __qdf_nbuf_mod_init(void)
  803. {
  804. return;
  805. }
  806. static inline void __qdf_nbuf_mod_exit(void)
  807. {
  808. return;
  809. }
  810. #endif
  811. /**
  812. * __qdf_to_status() - OS to QDF status conversion
  813. * @error : OS error
  814. *
  815. * Return: QDF status
  816. */
  817. static inline QDF_STATUS __qdf_to_status(signed int error)
  818. {
  819. switch (error) {
  820. case 0:
  821. return QDF_STATUS_SUCCESS;
  822. case ENOMEM:
  823. case -ENOMEM:
  824. return QDF_STATUS_E_NOMEM;
  825. default:
  826. return QDF_STATUS_E_NOSUPPORT;
  827. }
  828. }
  829. /**
  830. * __qdf_nbuf_len() - return the amount of valid data in the skb
  831. * @skb: Pointer to network buffer
  832. *
  833. * This API returns the amount of valid data in the skb, If there are frags
  834. * then it returns total length.
  835. *
  836. * Return: network buffer length
  837. */
  838. static inline size_t __qdf_nbuf_len(struct sk_buff *skb)
  839. {
  840. int i, extra_frag_len = 0;
  841. i = QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb);
  842. if (i > 0)
  843. extra_frag_len = QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb);
  844. return extra_frag_len + skb->len;
  845. }
  846. /**
  847. * __qdf_nbuf_cat() - link two nbufs
  848. * @dst: Buffer to piggyback into
  849. * @src: Buffer to put
  850. *
  851. * Concat two nbufs, the new buf(src) is piggybacked into the older one.
  852. * It is callers responsibility to free the src skb.
  853. *
  854. * Return: QDF_STATUS (status of the call) if failed the src skb
  855. * is released
  856. */
  857. static inline QDF_STATUS
  858. __qdf_nbuf_cat(struct sk_buff *dst, struct sk_buff *src)
  859. {
  860. QDF_STATUS error = 0;
  861. qdf_assert(dst && src);
  862. /*
  863. * Since pskb_expand_head unconditionally reallocates the skb->head
  864. * buffer, first check whether the current buffer is already large
  865. * enough.
  866. */
  867. if (skb_tailroom(dst) < src->len) {
  868. error = pskb_expand_head(dst, 0, src->len, GFP_ATOMIC);
  869. if (error)
  870. return __qdf_to_status(error);
  871. }
  872. memcpy(skb_tail_pointer(dst), src->data, src->len);
  873. skb_put(dst, src->len);
  874. return __qdf_to_status(error);
  875. }
  876. /*
  877. * nbuf manipulation routines
  878. */
  879. /**
  880. * __qdf_nbuf_headroom() - return the amount of tail space available
  881. * @buf: Pointer to network buffer
  882. *
  883. * Return: amount of tail room
  884. */
  885. static inline int __qdf_nbuf_headroom(struct sk_buff *skb)
  886. {
  887. return skb_headroom(skb);
  888. }
  889. /**
  890. * __qdf_nbuf_tailroom() - return the amount of tail space available
  891. * @buf: Pointer to network buffer
  892. *
  893. * Return: amount of tail room
  894. */
  895. static inline uint32_t __qdf_nbuf_tailroom(struct sk_buff *skb)
  896. {
  897. return skb_tailroom(skb);
  898. }
  899. /**
  900. * __qdf_nbuf_put_tail() - Puts data in the end
  901. * @skb: Pointer to network buffer
  902. * @size: size to be pushed
  903. *
  904. * Return: data pointer of this buf where new data has to be
  905. * put, or NULL if there is not enough room in this buf.
  906. */
  907. static inline uint8_t *__qdf_nbuf_put_tail(struct sk_buff *skb, size_t size)
  908. {
  909. if (skb_tailroom(skb) < size) {
  910. if (unlikely(pskb_expand_head(skb, 0,
  911. size - skb_tailroom(skb), GFP_ATOMIC))) {
  912. dev_kfree_skb_any(skb);
  913. return NULL;
  914. }
  915. }
  916. return skb_put(skb, size);
  917. }
  918. /**
  919. * __qdf_nbuf_trim_tail() - trim data out from the end
  920. * @skb: Pointer to network buffer
  921. * @size: size to be popped
  922. *
  923. * Return: none
  924. */
  925. static inline void __qdf_nbuf_trim_tail(struct sk_buff *skb, size_t size)
  926. {
  927. return skb_trim(skb, skb->len - size);
  928. }
  929. /*
  930. * prototypes. Implemented in qdf_nbuf.c
  931. */
  932. qdf_nbuf_tx_cksum_t __qdf_nbuf_get_tx_cksum(struct sk_buff *skb);
  933. QDF_STATUS __qdf_nbuf_set_rx_cksum(struct sk_buff *skb,
  934. qdf_nbuf_rx_cksum_t *cksum);
  935. uint8_t __qdf_nbuf_get_tid(struct sk_buff *skb);
  936. void __qdf_nbuf_set_tid(struct sk_buff *skb, uint8_t tid);
  937. uint8_t __qdf_nbuf_get_exemption_type(struct sk_buff *skb);
  938. void __qdf_nbuf_ref(struct sk_buff *skb);
  939. int __qdf_nbuf_shared(struct sk_buff *skb);
  940. /**
  941. * __qdf_nbuf_get_nr_frags() - return the number of fragments in an skb,
  942. * @skb: sk buff
  943. *
  944. * Return: number of fragments
  945. */
  946. static inline size_t __qdf_nbuf_get_nr_frags(struct sk_buff *skb)
  947. {
  948. return skb_shinfo(skb)->nr_frags;
  949. }
  950. /*
  951. * qdf_nbuf_pool_delete() implementation - do nothing in linux
  952. */
  953. #define __qdf_nbuf_pool_delete(osdev)
  954. /**
  955. * __qdf_nbuf_copy() - returns a private copy of the skb
  956. * @skb: Pointer to network buffer
  957. *
  958. * This API returns a private copy of the skb, the skb returned is completely
  959. * modifiable by callers
  960. *
  961. * Return: skb or NULL
  962. */
  963. static inline struct sk_buff *__qdf_nbuf_copy(struct sk_buff *skb)
  964. {
  965. struct sk_buff *skb_new = NULL;
  966. skb_new = skb_copy(skb, GFP_ATOMIC);
  967. if (skb_new) {
  968. __qdf_nbuf_count_inc(skb_new);
  969. }
  970. return skb_new;
  971. }
  972. #define __qdf_nbuf_reserve skb_reserve
  973. /**
  974. * __qdf_nbuf_set_data_pointer() - set buffer data pointer
  975. * @skb: Pointer to network buffer
  976. * @data: data pointer
  977. *
  978. * Return: none
  979. */
  980. static inline void
  981. __qdf_nbuf_set_data_pointer(struct sk_buff *skb, uint8_t *data)
  982. {
  983. skb->data = data;
  984. }
  985. /**
  986. * __qdf_nbuf_set_len() - set buffer data length
  987. * @skb: Pointer to network buffer
  988. * @len: data length
  989. *
  990. * Return: none
  991. */
  992. static inline void
  993. __qdf_nbuf_set_len(struct sk_buff *skb, uint32_t len)
  994. {
  995. skb->len = len;
  996. }
  997. /**
  998. * __qdf_nbuf_set_tail_pointer() - set buffer data tail pointer
  999. * @skb: Pointer to network buffer
  1000. * @len: skb data length
  1001. *
  1002. * Return: none
  1003. */
  1004. static inline void
  1005. __qdf_nbuf_set_tail_pointer(struct sk_buff *skb, int len)
  1006. {
  1007. skb_set_tail_pointer(skb, len);
  1008. }
  1009. /**
  1010. * __qdf_nbuf_unlink_no_lock() - unlink an skb from skb queue
  1011. * @skb: Pointer to network buffer
  1012. * @list: list to use
  1013. *
  1014. * This is a lockless version, driver must acquire locks if it
  1015. * needs to synchronize
  1016. *
  1017. * Return: none
  1018. */
  1019. static inline void
  1020. __qdf_nbuf_unlink_no_lock(struct sk_buff *skb, struct sk_buff_head *list)
  1021. {
  1022. __skb_unlink(skb, list);
  1023. }
  1024. /**
  1025. * __qdf_nbuf_reset() - reset the buffer data and pointer
  1026. * @buf: Network buf instance
  1027. * @reserve: reserve
  1028. * @align: align
  1029. *
  1030. * Return: none
  1031. */
  1032. static inline void
  1033. __qdf_nbuf_reset(struct sk_buff *skb, int reserve, int align)
  1034. {
  1035. int offset;
  1036. skb_push(skb, skb_headroom(skb));
  1037. skb_put(skb, skb_tailroom(skb));
  1038. memset(skb->data, 0x0, skb->len);
  1039. skb_trim(skb, 0);
  1040. skb_reserve(skb, NET_SKB_PAD);
  1041. memset(skb->cb, 0x0, sizeof(skb->cb));
  1042. /*
  1043. * The default is for netbuf fragments to be interpreted
  1044. * as wordstreams rather than bytestreams.
  1045. */
  1046. QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
  1047. QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
  1048. /*
  1049. * Align & make sure that the tail & data are adjusted properly
  1050. */
  1051. if (align) {
  1052. offset = ((unsigned long)skb->data) % align;
  1053. if (offset)
  1054. skb_reserve(skb, align - offset);
  1055. }
  1056. skb_reserve(skb, reserve);
  1057. }
  1058. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
  1059. /**
  1060. * qdf_nbuf_dev_scratch_is_supported() - dev_scratch support for network buffer
  1061. * in kernel
  1062. *
  1063. * Return: true if dev_scratch is supported
  1064. * false if dev_scratch is not supported
  1065. */
  1066. static inline bool __qdf_nbuf_is_dev_scratch_supported(void)
  1067. {
  1068. return true;
  1069. }
  1070. /**
  1071. * qdf_nbuf_get_dev_scratch() - get dev_scratch of network buffer
  1072. * @skb: Pointer to network buffer
  1073. *
  1074. * Return: dev_scratch if dev_scratch supported
  1075. * 0 if dev_scratch not supported
  1076. */
  1077. static inline unsigned long __qdf_nbuf_get_dev_scratch(struct sk_buff *skb)
  1078. {
  1079. return skb->dev_scratch;
  1080. }
  1081. /**
  1082. * qdf_nbuf_set_dev_scratch() - set dev_scratch of network buffer
  1083. * @skb: Pointer to network buffer
  1084. * @value: value to be set in dev_scratch of network buffer
  1085. *
  1086. * Return: void
  1087. */
  1088. static inline void
  1089. __qdf_nbuf_set_dev_scratch(struct sk_buff *skb, unsigned long value)
  1090. {
  1091. skb->dev_scratch = value;
  1092. }
  1093. #else
  1094. static inline bool __qdf_nbuf_is_dev_scratch_supported(void)
  1095. {
  1096. return false;
  1097. }
  1098. static inline unsigned long __qdf_nbuf_get_dev_scratch(struct sk_buff *skb)
  1099. {
  1100. return 0;
  1101. }
  1102. static inline void
  1103. __qdf_nbuf_set_dev_scratch(struct sk_buff *skb, unsigned long value)
  1104. {
  1105. }
  1106. #endif /* KERNEL_VERSION(4, 14, 0) */
  1107. /**
  1108. * __qdf_nbuf_head() - return the pointer the skb's head pointer
  1109. * @skb: Pointer to network buffer
  1110. *
  1111. * Return: Pointer to head buffer
  1112. */
  1113. static inline uint8_t *__qdf_nbuf_head(struct sk_buff *skb)
  1114. {
  1115. return skb->head;
  1116. }
  1117. /**
  1118. * __qdf_nbuf_data() - return the pointer to data header in the skb
  1119. * @skb: Pointer to network buffer
  1120. *
  1121. * Return: Pointer to skb data
  1122. */
  1123. static inline uint8_t *__qdf_nbuf_data(struct sk_buff *skb)
  1124. {
  1125. return skb->data;
  1126. }
  1127. static inline uint8_t *__qdf_nbuf_data_addr(struct sk_buff *skb)
  1128. {
  1129. return (uint8_t *)&skb->data;
  1130. }
  1131. /**
  1132. * __qdf_nbuf_get_protocol() - return the protocol value of the skb
  1133. * @skb: Pointer to network buffer
  1134. *
  1135. * Return: skb protocol
  1136. */
  1137. static inline uint16_t __qdf_nbuf_get_protocol(struct sk_buff *skb)
  1138. {
  1139. return skb->protocol;
  1140. }
  1141. /**
  1142. * __qdf_nbuf_get_ip_summed() - return the ip checksum value of the skb
  1143. * @skb: Pointer to network buffer
  1144. *
  1145. * Return: skb ip_summed
  1146. */
  1147. static inline uint8_t __qdf_nbuf_get_ip_summed(struct sk_buff *skb)
  1148. {
  1149. return skb->ip_summed;
  1150. }
  1151. /**
  1152. * __qdf_nbuf_set_ip_summed() - sets the ip_summed value of the skb
  1153. * @skb: Pointer to network buffer
  1154. * @ip_summed: ip checksum
  1155. *
  1156. * Return: none
  1157. */
  1158. static inline void __qdf_nbuf_set_ip_summed(struct sk_buff *skb,
  1159. uint8_t ip_summed)
  1160. {
  1161. skb->ip_summed = ip_summed;
  1162. }
  1163. /**
  1164. * __qdf_nbuf_get_priority() - return the priority value of the skb
  1165. * @skb: Pointer to network buffer
  1166. *
  1167. * Return: skb priority
  1168. */
  1169. static inline uint32_t __qdf_nbuf_get_priority(struct sk_buff *skb)
  1170. {
  1171. return skb->priority;
  1172. }
  1173. /**
  1174. * __qdf_nbuf_set_priority() - sets the priority value of the skb
  1175. * @skb: Pointer to network buffer
  1176. * @p: priority
  1177. *
  1178. * Return: none
  1179. */
  1180. static inline void __qdf_nbuf_set_priority(struct sk_buff *skb, uint32_t p)
  1181. {
  1182. skb->priority = p;
  1183. }
  1184. /**
  1185. * __qdf_nbuf_set_next() - sets the next skb pointer of the current skb
  1186. * @skb: Current skb
  1187. * @next_skb: Next skb
  1188. *
  1189. * Return: void
  1190. */
  1191. static inline void
  1192. __qdf_nbuf_set_next(struct sk_buff *skb, struct sk_buff *skb_next)
  1193. {
  1194. skb->next = skb_next;
  1195. }
  1196. /**
  1197. * __qdf_nbuf_next() - return the next skb pointer of the current skb
  1198. * @skb: Current skb
  1199. *
  1200. * Return: the next skb pointed to by the current skb
  1201. */
  1202. static inline struct sk_buff *__qdf_nbuf_next(struct sk_buff *skb)
  1203. {
  1204. return skb->next;
  1205. }
  1206. /**
  1207. * __qdf_nbuf_set_next_ext() - sets the next skb pointer of the current skb
  1208. * @skb: Current skb
  1209. * @next_skb: Next skb
  1210. *
  1211. * This fn is used to link up extensions to the head skb. Does not handle
  1212. * linking to the head
  1213. *
  1214. * Return: none
  1215. */
  1216. static inline void
  1217. __qdf_nbuf_set_next_ext(struct sk_buff *skb, struct sk_buff *skb_next)
  1218. {
  1219. skb->next = skb_next;
  1220. }
  1221. /**
  1222. * __qdf_nbuf_next_ext() - return the next skb pointer of the current skb
  1223. * @skb: Current skb
  1224. *
  1225. * Return: the next skb pointed to by the current skb
  1226. */
  1227. static inline struct sk_buff *__qdf_nbuf_next_ext(struct sk_buff *skb)
  1228. {
  1229. return skb->next;
  1230. }
  1231. /**
  1232. * __qdf_nbuf_append_ext_list() - link list of packet extensions to the head
  1233. * @skb_head: head_buf nbuf holding head segment (single)
  1234. * @ext_list: nbuf list holding linked extensions to the head
  1235. * @ext_len: Total length of all buffers in the extension list
  1236. *
  1237. * This function is used to link up a list of packet extensions (seg1, 2,* ...)
  1238. * to the nbuf holding the head segment (seg0)
  1239. *
  1240. * Return: none
  1241. */
  1242. static inline void
  1243. __qdf_nbuf_append_ext_list(struct sk_buff *skb_head,
  1244. struct sk_buff *ext_list, size_t ext_len)
  1245. {
  1246. skb_shinfo(skb_head)->frag_list = ext_list;
  1247. skb_head->data_len += ext_len;
  1248. skb_head->len += ext_len;
  1249. }
  1250. /**
  1251. * __qdf_nbuf_get_ext_list() - Get the link to extended nbuf list.
  1252. * @head_buf: Network buf holding head segment (single)
  1253. *
  1254. * This ext_list is populated when we have Jumbo packet, for example in case of
  1255. * monitor mode amsdu packet reception, and are stiched using frags_list.
  1256. *
  1257. * Return: Network buf list holding linked extensions from head buf.
  1258. */
  1259. static inline struct sk_buff *__qdf_nbuf_get_ext_list(struct sk_buff *head_buf)
  1260. {
  1261. return (skb_shinfo(head_buf)->frag_list);
  1262. }
  1263. /**
  1264. * __qdf_nbuf_get_age() - return the checksum value of the skb
  1265. * @skb: Pointer to network buffer
  1266. *
  1267. * Return: checksum value
  1268. */
  1269. static inline uint32_t __qdf_nbuf_get_age(struct sk_buff *skb)
  1270. {
  1271. return skb->csum;
  1272. }
  1273. /**
  1274. * __qdf_nbuf_set_age() - sets the checksum value of the skb
  1275. * @skb: Pointer to network buffer
  1276. * @v: Value
  1277. *
  1278. * Return: none
  1279. */
  1280. static inline void __qdf_nbuf_set_age(struct sk_buff *skb, uint32_t v)
  1281. {
  1282. skb->csum = v;
  1283. }
  1284. /**
  1285. * __qdf_nbuf_adj_age() - adjusts the checksum/age value of the skb
  1286. * @skb: Pointer to network buffer
  1287. * @adj: Adjustment value
  1288. *
  1289. * Return: none
  1290. */
  1291. static inline void __qdf_nbuf_adj_age(struct sk_buff *skb, uint32_t adj)
  1292. {
  1293. skb->csum -= adj;
  1294. }
  1295. /**
  1296. * __qdf_nbuf_copy_bits() - return the length of the copy bits for skb
  1297. * @skb: Pointer to network buffer
  1298. * @offset: Offset value
  1299. * @len: Length
  1300. * @to: Destination pointer
  1301. *
  1302. * Return: length of the copy bits for skb
  1303. */
  1304. static inline int32_t
  1305. __qdf_nbuf_copy_bits(struct sk_buff *skb, int32_t offset, int32_t len, void *to)
  1306. {
  1307. return skb_copy_bits(skb, offset, to, len);
  1308. }
  1309. /**
  1310. * __qdf_nbuf_set_pktlen() - sets the length of the skb and adjust the tail
  1311. * @skb: Pointer to network buffer
  1312. * @len: Packet length
  1313. *
  1314. * Return: none
  1315. */
  1316. static inline void __qdf_nbuf_set_pktlen(struct sk_buff *skb, uint32_t len)
  1317. {
  1318. if (skb->len > len) {
  1319. skb_trim(skb, len);
  1320. } else {
  1321. if (skb_tailroom(skb) < len - skb->len) {
  1322. if (unlikely(pskb_expand_head(skb, 0,
  1323. len - skb->len - skb_tailroom(skb),
  1324. GFP_ATOMIC))) {
  1325. QDF_DEBUG_PANIC(
  1326. "SKB tailroom is lessthan requested length."
  1327. " tail-room: %u, len: %u, skb->len: %u",
  1328. skb_tailroom(skb), len, skb->len);
  1329. dev_kfree_skb_any(skb);
  1330. }
  1331. }
  1332. skb_put(skb, (len - skb->len));
  1333. }
  1334. }
  1335. /**
  1336. * __qdf_nbuf_set_protocol() - sets the protocol value of the skb
  1337. * @skb: Pointer to network buffer
  1338. * @protocol: Protocol type
  1339. *
  1340. * Return: none
  1341. */
  1342. static inline void
  1343. __qdf_nbuf_set_protocol(struct sk_buff *skb, uint16_t protocol)
  1344. {
  1345. skb->protocol = protocol;
  1346. }
  1347. #define __qdf_nbuf_set_tx_htt2_frm(skb, candi) \
  1348. (QDF_NBUF_CB_TX_HL_HTT2_FRM(skb) = (candi))
  1349. #define __qdf_nbuf_get_tx_htt2_frm(skb) \
  1350. QDF_NBUF_CB_TX_HL_HTT2_FRM(skb)
  1351. void __qdf_dmaaddr_to_32s(qdf_dma_addr_t dmaaddr,
  1352. uint32_t *lo, uint32_t *hi);
  1353. uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
  1354. struct qdf_tso_info_t *tso_info);
  1355. void __qdf_nbuf_unmap_tso_segment(qdf_device_t osdev,
  1356. struct qdf_tso_seg_elem_t *tso_seg,
  1357. bool is_last_seg);
  1358. #ifdef FEATURE_TSO
  1359. /**
  1360. * __qdf_nbuf_get_tcp_payload_len() - function to return the tcp
  1361. * payload len
  1362. * @skb: buffer
  1363. *
  1364. * Return: size
  1365. */
  1366. size_t __qdf_nbuf_get_tcp_payload_len(struct sk_buff *skb);
  1367. uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb);
  1368. #else
  1369. static inline
  1370. size_t __qdf_nbuf_get_tcp_payload_len(struct sk_buff *skb)
  1371. {
  1372. return 0;
  1373. }
  1374. static inline uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
  1375. {
  1376. return 0;
  1377. }
  1378. #endif /* FEATURE_TSO */
  1379. static inline bool __qdf_nbuf_is_tso(struct sk_buff *skb)
  1380. {
  1381. if (skb_is_gso(skb) &&
  1382. (skb_is_gso_v6(skb) ||
  1383. (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)))
  1384. return true;
  1385. else
  1386. return false;
  1387. }
  1388. struct sk_buff *__qdf_nbuf_inc_users(struct sk_buff *skb);
  1389. int __qdf_nbuf_get_users(struct sk_buff *skb);
  1390. /**
  1391. * __qdf_nbuf_tx_info_get() - Modify pkt_type, set pkt_subtype,
  1392. * and get hw_classify by peeking
  1393. * into packet
  1394. * @nbuf: Network buffer (skb on Linux)
  1395. * @pkt_type: Pkt type (from enum htt_pkt_type)
  1396. * @pkt_subtype: Bit 4 of this field in HTT descriptor
  1397. * needs to be set in case of CE classification support
  1398. * Is set by this macro.
  1399. * @hw_classify: This is a flag which is set to indicate
  1400. * CE classification is enabled.
  1401. * Do not set this bit for VLAN packets
  1402. * OR for mcast / bcast frames.
  1403. *
  1404. * This macro parses the payload to figure out relevant Tx meta-data e.g.
  1405. * whether to enable tx_classify bit in CE.
  1406. *
  1407. * Overrides pkt_type only if required for 802.3 frames (original ethernet)
  1408. * If protocol is less than ETH_P_802_3_MIN (0x600), then
  1409. * it is the length and a 802.3 frame else it is Ethernet Type II
  1410. * (RFC 894).
  1411. * Bit 4 in pkt_subtype is the tx_classify bit
  1412. *
  1413. * Return: void
  1414. */
  1415. #define __qdf_nbuf_tx_info_get(skb, pkt_type, \
  1416. pkt_subtype, hw_classify) \
  1417. do { \
  1418. struct ethhdr *eh = (struct ethhdr *)skb->data; \
  1419. uint16_t ether_type = ntohs(eh->h_proto); \
  1420. bool is_mc_bc; \
  1421. \
  1422. is_mc_bc = is_broadcast_ether_addr((uint8_t *)eh) || \
  1423. is_multicast_ether_addr((uint8_t *)eh); \
  1424. \
  1425. if (likely((ether_type != ETH_P_8021Q) && !is_mc_bc)) { \
  1426. hw_classify = 1; \
  1427. pkt_subtype = 0x01 << \
  1428. HTT_TX_CLASSIFY_BIT_S; \
  1429. } \
  1430. \
  1431. if (unlikely(ether_type < ETH_P_802_3_MIN)) \
  1432. pkt_type = htt_pkt_type_ethernet; \
  1433. \
  1434. } while (0)
  1435. /**
  1436. * nbuf private buffer routines
  1437. */
  1438. /**
  1439. * __qdf_nbuf_peek_header() - return the header's addr & m_len
  1440. * @skb: Pointer to network buffer
  1441. * @addr: Pointer to store header's addr
  1442. * @m_len: network buffer length
  1443. *
  1444. * Return: none
  1445. */
  1446. static inline void
  1447. __qdf_nbuf_peek_header(struct sk_buff *skb, uint8_t **addr, uint32_t *len)
  1448. {
  1449. *addr = skb->data;
  1450. *len = skb->len;
  1451. }
  1452. /**
  1453. * typedef struct __qdf_nbuf_queue_t - network buffer queue
  1454. * @head: Head pointer
  1455. * @tail: Tail pointer
  1456. * @qlen: Queue length
  1457. */
  1458. typedef struct __qdf_nbuf_qhead {
  1459. struct sk_buff *head;
  1460. struct sk_buff *tail;
  1461. unsigned int qlen;
  1462. } __qdf_nbuf_queue_t;
  1463. /******************Functions *************/
  1464. /**
  1465. * __qdf_nbuf_queue_init() - initiallize the queue head
  1466. * @qhead: Queue head
  1467. *
  1468. * Return: QDF status
  1469. */
  1470. static inline QDF_STATUS __qdf_nbuf_queue_init(__qdf_nbuf_queue_t *qhead)
  1471. {
  1472. memset(qhead, 0, sizeof(struct __qdf_nbuf_qhead));
  1473. return QDF_STATUS_SUCCESS;
  1474. }
  1475. /**
  1476. * __qdf_nbuf_queue_add() - add an skb in the tail of the queue
  1477. * @qhead: Queue head
  1478. * @skb: Pointer to network buffer
  1479. *
  1480. * This is a lockless version, driver must acquire locks if it
  1481. * needs to synchronize
  1482. *
  1483. * Return: none
  1484. */
  1485. static inline void
  1486. __qdf_nbuf_queue_add(__qdf_nbuf_queue_t *qhead, struct sk_buff *skb)
  1487. {
  1488. skb->next = NULL; /*Nullify the next ptr */
  1489. if (!qhead->head)
  1490. qhead->head = skb;
  1491. else
  1492. qhead->tail->next = skb;
  1493. qhead->tail = skb;
  1494. qhead->qlen++;
  1495. }
  1496. /**
  1497. * __qdf_nbuf_queue_append() - Append src list at the end of dest list
  1498. * @dest: target netbuf queue
  1499. * @src: source netbuf queue
  1500. *
  1501. * Return: target netbuf queue
  1502. */
  1503. static inline __qdf_nbuf_queue_t *
  1504. __qdf_nbuf_queue_append(__qdf_nbuf_queue_t *dest, __qdf_nbuf_queue_t *src)
  1505. {
  1506. if (!dest)
  1507. return NULL;
  1508. else if (!src || !(src->head))
  1509. return dest;
  1510. if (!(dest->head))
  1511. dest->head = src->head;
  1512. else
  1513. dest->tail->next = src->head;
  1514. dest->tail = src->tail;
  1515. dest->qlen += src->qlen;
  1516. return dest;
  1517. }
  1518. /**
  1519. * __qdf_nbuf_queue_insert_head() - add an skb at the head of the queue
  1520. * @qhead: Queue head
  1521. * @skb: Pointer to network buffer
  1522. *
  1523. * This is a lockless version, driver must acquire locks if it needs to
  1524. * synchronize
  1525. *
  1526. * Return: none
  1527. */
  1528. static inline void
  1529. __qdf_nbuf_queue_insert_head(__qdf_nbuf_queue_t *qhead, __qdf_nbuf_t skb)
  1530. {
  1531. if (!qhead->head) {
  1532. /*Empty queue Tail pointer Must be updated */
  1533. qhead->tail = skb;
  1534. }
  1535. skb->next = qhead->head;
  1536. qhead->head = skb;
  1537. qhead->qlen++;
  1538. }
  1539. /**
  1540. * __qdf_nbuf_queue_remove() - remove a skb from the head of the queue
  1541. * @qhead: Queue head
  1542. *
  1543. * This is a lockless version. Driver should take care of the locks
  1544. *
  1545. * Return: skb or NULL
  1546. */
  1547. static inline
  1548. struct sk_buff *__qdf_nbuf_queue_remove(__qdf_nbuf_queue_t *qhead)
  1549. {
  1550. __qdf_nbuf_t tmp = NULL;
  1551. if (qhead->head) {
  1552. qhead->qlen--;
  1553. tmp = qhead->head;
  1554. if (qhead->head == qhead->tail) {
  1555. qhead->head = NULL;
  1556. qhead->tail = NULL;
  1557. } else {
  1558. qhead->head = tmp->next;
  1559. }
  1560. tmp->next = NULL;
  1561. }
  1562. return tmp;
  1563. }
  1564. /**
  1565. * __qdf_nbuf_queue_first() - returns the first skb in the queue
  1566. * @qhead: head of queue
  1567. *
  1568. * Return: NULL if the queue is empty
  1569. */
  1570. static inline struct sk_buff *
  1571. __qdf_nbuf_queue_first(__qdf_nbuf_queue_t *qhead)
  1572. {
  1573. return qhead->head;
  1574. }
  1575. /**
  1576. * __qdf_nbuf_queue_last() - returns the last skb in the queue
  1577. * @qhead: head of queue
  1578. *
  1579. * Return: NULL if the queue is empty
  1580. */
  1581. static inline struct sk_buff *
  1582. __qdf_nbuf_queue_last(__qdf_nbuf_queue_t *qhead)
  1583. {
  1584. return qhead->tail;
  1585. }
  1586. /**
  1587. * __qdf_nbuf_queue_len() - return the queue length
  1588. * @qhead: Queue head
  1589. *
  1590. * Return: Queue length
  1591. */
  1592. static inline uint32_t __qdf_nbuf_queue_len(__qdf_nbuf_queue_t *qhead)
  1593. {
  1594. return qhead->qlen;
  1595. }
  1596. /**
  1597. * __qdf_nbuf_queue_next() - return the next skb from packet chain
  1598. * @skb: Pointer to network buffer
  1599. *
  1600. * This API returns the next skb from packet chain, remember the skb is
  1601. * still in the queue
  1602. *
  1603. * Return: NULL if no packets are there
  1604. */
  1605. static inline struct sk_buff *__qdf_nbuf_queue_next(struct sk_buff *skb)
  1606. {
  1607. return skb->next;
  1608. }
  1609. /**
  1610. * __qdf_nbuf_is_queue_empty() - check if the queue is empty or not
  1611. * @qhead: Queue head
  1612. *
  1613. * Return: true if length is 0 else false
  1614. */
  1615. static inline bool __qdf_nbuf_is_queue_empty(__qdf_nbuf_queue_t *qhead)
  1616. {
  1617. return qhead->qlen == 0;
  1618. }
  1619. /*
  1620. * Use sk_buff_head as the implementation of qdf_nbuf_queue_t.
  1621. * Because the queue head will most likely put in some structure,
  1622. * we don't use pointer type as the definition.
  1623. */
  1624. /*
  1625. * Use sk_buff_head as the implementation of qdf_nbuf_queue_t.
  1626. * Because the queue head will most likely put in some structure,
  1627. * we don't use pointer type as the definition.
  1628. */
  1629. static inline void
  1630. __qdf_nbuf_set_send_complete_flag(struct sk_buff *skb, bool flag)
  1631. {
  1632. }
  1633. /**
  1634. * __qdf_nbuf_realloc_headroom() - This keeps the skb shell intact
  1635. * expands the headroom
  1636. * in the data region. In case of failure the skb is released.
  1637. * @skb: sk buff
  1638. * @headroom: size of headroom
  1639. *
  1640. * Return: skb or NULL
  1641. */
  1642. static inline struct sk_buff *
  1643. __qdf_nbuf_realloc_headroom(struct sk_buff *skb, uint32_t headroom)
  1644. {
  1645. if (pskb_expand_head(skb, headroom, 0, GFP_ATOMIC)) {
  1646. dev_kfree_skb_any(skb);
  1647. skb = NULL;
  1648. }
  1649. return skb;
  1650. }
  1651. /**
  1652. * __qdf_nbuf_realloc_tailroom() - This keeps the skb shell intact
  1653. * exapnds the tailroom
  1654. * in data region. In case of failure it releases the skb.
  1655. * @skb: sk buff
  1656. * @tailroom: size of tailroom
  1657. *
  1658. * Return: skb or NULL
  1659. */
  1660. static inline struct sk_buff *
  1661. __qdf_nbuf_realloc_tailroom(struct sk_buff *skb, uint32_t tailroom)
  1662. {
  1663. if (likely(!pskb_expand_head(skb, 0, tailroom, GFP_ATOMIC)))
  1664. return skb;
  1665. /**
  1666. * unlikely path
  1667. */
  1668. dev_kfree_skb_any(skb);
  1669. return NULL;
  1670. }
  1671. /**
  1672. * __qdf_nbuf_linearize() - skb linearize
  1673. * @skb: sk buff
  1674. *
  1675. * create a version of the specified nbuf whose contents
  1676. * can be safely modified without affecting other
  1677. * users.If the nbuf is non-linear then this function
  1678. * linearize. if unable to linearize returns -ENOMEM on
  1679. * success 0 is returned
  1680. *
  1681. * Return: 0 on Success, -ENOMEM on failure is returned.
  1682. */
  1683. static inline int
  1684. __qdf_nbuf_linearize(struct sk_buff *skb)
  1685. {
  1686. return skb_linearize(skb);
  1687. }
  1688. /**
  1689. * __qdf_nbuf_unshare() - skb unshare
  1690. * @skb: sk buff
  1691. *
  1692. * create a version of the specified nbuf whose contents
  1693. * can be safely modified without affecting other
  1694. * users.If the nbuf is a clone then this function
  1695. * creates a new copy of the data. If the buffer is not
  1696. * a clone the original buffer is returned.
  1697. *
  1698. * Return: skb or NULL
  1699. */
  1700. static inline struct sk_buff *
  1701. __qdf_nbuf_unshare(struct sk_buff *skb)
  1702. {
  1703. struct sk_buff *skb_new;
  1704. __qdf_frag_count_dec(__qdf_nbuf_get_nr_frags(skb));
  1705. skb_new = skb_unshare(skb, GFP_ATOMIC);
  1706. if (skb_new)
  1707. __qdf_frag_count_inc(__qdf_nbuf_get_nr_frags(skb_new));
  1708. return skb_new;
  1709. }
  1710. /**
  1711. * __qdf_nbuf_is_cloned() - test whether the nbuf is cloned or not
  1712. *@buf: sk buff
  1713. *
  1714. * Return: true/false
  1715. */
  1716. static inline bool __qdf_nbuf_is_cloned(struct sk_buff *skb)
  1717. {
  1718. return skb_cloned(skb);
  1719. }
  1720. /**
  1721. * __qdf_nbuf_pool_init() - init pool
  1722. * @net: net handle
  1723. *
  1724. * Return: QDF status
  1725. */
  1726. static inline QDF_STATUS __qdf_nbuf_pool_init(qdf_net_handle_t net)
  1727. {
  1728. return QDF_STATUS_SUCCESS;
  1729. }
  1730. /*
  1731. * adf_nbuf_pool_delete() implementation - do nothing in linux
  1732. */
  1733. #define __qdf_nbuf_pool_delete(osdev)
  1734. /**
  1735. * __qdf_nbuf_expand() - Expand both tailroom & headroom. In case of failure
  1736. * release the skb.
  1737. * @skb: sk buff
  1738. * @headroom: size of headroom
  1739. * @tailroom: size of tailroom
  1740. *
  1741. * Return: skb or NULL
  1742. */
  1743. static inline struct sk_buff *
  1744. __qdf_nbuf_expand(struct sk_buff *skb, uint32_t headroom, uint32_t tailroom)
  1745. {
  1746. if (likely(!pskb_expand_head(skb, headroom, tailroom, GFP_ATOMIC)))
  1747. return skb;
  1748. dev_kfree_skb_any(skb);
  1749. return NULL;
  1750. }
  1751. /**
  1752. * __qdf_nbuf_copy_expand() - copy and expand nbuf
  1753. * @buf: Network buf instance
  1754. * @headroom: Additional headroom to be added
  1755. * @tailroom: Additional tailroom to be added
  1756. *
  1757. * Return: New nbuf that is a copy of buf, with additional head and tailroom
  1758. * or NULL if there is no memory
  1759. */
  1760. static inline struct sk_buff *
  1761. __qdf_nbuf_copy_expand(struct sk_buff *buf, int headroom, int tailroom)
  1762. {
  1763. return skb_copy_expand(buf, headroom, tailroom, GFP_ATOMIC);
  1764. }
  1765. /**
  1766. * __qdf_nbuf_has_fraglist() - check buf has fraglist
  1767. * @buf: Network buf instance
  1768. *
  1769. * Return: True, if buf has frag_list else return False
  1770. */
  1771. static inline bool
  1772. __qdf_nbuf_has_fraglist(struct sk_buff *buf)
  1773. {
  1774. return skb_has_frag_list(buf);
  1775. }
  1776. /**
  1777. * __qdf_nbuf_get_last_frag_list_nbuf() - Get last frag_list nbuf
  1778. * @buf: Network buf instance
  1779. *
  1780. * Return: Network buf instance
  1781. */
  1782. static inline struct sk_buff *
  1783. __qdf_nbuf_get_last_frag_list_nbuf(struct sk_buff *buf)
  1784. {
  1785. struct sk_buff *list;
  1786. if (!__qdf_nbuf_has_fraglist(buf))
  1787. return NULL;
  1788. for (list = skb_shinfo(buf)->frag_list; list->next; list = list->next)
  1789. ;
  1790. return list;
  1791. }
  1792. /**
  1793. * __qdf_nbuf_get_ref_fraglist() - get reference to fragments
  1794. * @buf: Network buf instance
  1795. *
  1796. * Return: void
  1797. */
  1798. static inline void
  1799. __qdf_nbuf_get_ref_fraglist(struct sk_buff *buf)
  1800. {
  1801. struct sk_buff *list;
  1802. skb_walk_frags(buf, list)
  1803. skb_get(list);
  1804. }
  1805. /**
  1806. * __qdf_nbuf_tx_cksum_info() - tx checksum info
  1807. *
  1808. * Return: true/false
  1809. */
  1810. static inline bool
  1811. __qdf_nbuf_tx_cksum_info(struct sk_buff *skb, uint8_t **hdr_off,
  1812. uint8_t **where)
  1813. {
  1814. qdf_assert(0);
  1815. return false;
  1816. }
  1817. /**
  1818. * __qdf_nbuf_reset_ctxt() - mem zero control block
  1819. * @nbuf: buffer
  1820. *
  1821. * Return: none
  1822. */
  1823. static inline void __qdf_nbuf_reset_ctxt(__qdf_nbuf_t nbuf)
  1824. {
  1825. qdf_mem_zero(nbuf->cb, sizeof(nbuf->cb));
  1826. }
  1827. /**
  1828. * __qdf_nbuf_network_header() - get network header
  1829. * @buf: buffer
  1830. *
  1831. * Return: network header pointer
  1832. */
  1833. static inline void *__qdf_nbuf_network_header(__qdf_nbuf_t buf)
  1834. {
  1835. return skb_network_header(buf);
  1836. }
  1837. /**
  1838. * __qdf_nbuf_transport_header() - get transport header
  1839. * @buf: buffer
  1840. *
  1841. * Return: transport header pointer
  1842. */
  1843. static inline void *__qdf_nbuf_transport_header(__qdf_nbuf_t buf)
  1844. {
  1845. return skb_transport_header(buf);
  1846. }
  1847. /**
  1848. * __qdf_nbuf_tcp_tso_size() - return the size of TCP segment size (MSS),
  1849. * passed as part of network buffer by network stack
  1850. * @skb: sk buff
  1851. *
  1852. * Return: TCP MSS size
  1853. *
  1854. */
  1855. static inline size_t __qdf_nbuf_tcp_tso_size(struct sk_buff *skb)
  1856. {
  1857. return skb_shinfo(skb)->gso_size;
  1858. }
  1859. /**
  1860. * __qdf_nbuf_init() - Re-initializes the skb for re-use
  1861. * @nbuf: sk buff
  1862. *
  1863. * Return: none
  1864. */
  1865. void __qdf_nbuf_init(__qdf_nbuf_t nbuf);
  1866. /*
  1867. * __qdf_nbuf_get_cb() - returns a pointer to skb->cb
  1868. * @nbuf: sk buff
  1869. *
  1870. * Return: void ptr
  1871. */
  1872. static inline void *
  1873. __qdf_nbuf_get_cb(__qdf_nbuf_t nbuf)
  1874. {
  1875. return (void *)nbuf->cb;
  1876. }
  1877. /**
  1878. * __qdf_nbuf_headlen() - return the length of linear buffer of the skb
  1879. * @skb: sk buff
  1880. *
  1881. * Return: head size
  1882. */
  1883. static inline size_t
  1884. __qdf_nbuf_headlen(struct sk_buff *skb)
  1885. {
  1886. return skb_headlen(skb);
  1887. }
  1888. /**
  1889. * __qdf_nbuf_tso_tcp_v4() - to check if the TSO TCP pkt is a IPv4 or not.
  1890. * @buf: sk buff
  1891. *
  1892. * Return: true/false
  1893. */
  1894. static inline bool __qdf_nbuf_tso_tcp_v4(struct sk_buff *skb)
  1895. {
  1896. return skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4 ? 1 : 0;
  1897. }
  1898. /**
  1899. * __qdf_nbuf_tso_tcp_v6() - to check if the TSO TCP pkt is a IPv6 or not.
  1900. * @buf: sk buff
  1901. *
  1902. * Return: true/false
  1903. */
  1904. static inline bool __qdf_nbuf_tso_tcp_v6(struct sk_buff *skb)
  1905. {
  1906. return skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6 ? 1 : 0;
  1907. }
  1908. /**
  1909. * __qdf_nbuf_l2l3l4_hdr_len() - return the l2+l3+l4 hdr length of the skb
  1910. * @skb: sk buff
  1911. *
  1912. * Return: size of l2+l3+l4 header length
  1913. */
  1914. static inline size_t __qdf_nbuf_l2l3l4_hdr_len(struct sk_buff *skb)
  1915. {
  1916. return skb_transport_offset(skb) + tcp_hdrlen(skb);
  1917. }
  1918. /**
  1919. * __qdf_nbuf_is_nonlinear() - test whether the nbuf is nonlinear or not
  1920. * @buf: sk buff
  1921. *
  1922. * Return: true/false
  1923. */
  1924. static inline bool __qdf_nbuf_is_nonlinear(struct sk_buff *skb)
  1925. {
  1926. if (skb_is_nonlinear(skb))
  1927. return true;
  1928. else
  1929. return false;
  1930. }
  1931. /**
  1932. * __qdf_nbuf_tcp_seq() - get the TCP sequence number of the skb
  1933. * @buf: sk buff
  1934. *
  1935. * Return: TCP sequence number
  1936. */
  1937. static inline uint32_t __qdf_nbuf_tcp_seq(struct sk_buff *skb)
  1938. {
  1939. return ntohl(tcp_hdr(skb)->seq);
  1940. }
  1941. /**
  1942. * __qdf_nbuf_get_priv_ptr() - get the priv pointer from the nbuf'f private space
  1943. *@buf: sk buff
  1944. *
  1945. * Return: data pointer to typecast into your priv structure
  1946. */
  1947. static inline uint8_t *
  1948. __qdf_nbuf_get_priv_ptr(struct sk_buff *skb)
  1949. {
  1950. return &skb->cb[8];
  1951. }
  1952. /**
  1953. * __qdf_nbuf_mark_wakeup_frame() - mark wakeup frame.
  1954. * @buf: Pointer to nbuf
  1955. *
  1956. * Return: None
  1957. */
  1958. static inline void
  1959. __qdf_nbuf_mark_wakeup_frame(__qdf_nbuf_t buf)
  1960. {
  1961. buf->mark |= QDF_MARK_FIRST_WAKEUP_PACKET;
  1962. }
  1963. /**
  1964. * __qdf_nbuf_record_rx_queue() - set rx queue in skb
  1965. *
  1966. * @buf: sk buff
  1967. * @queue_id: Queue id
  1968. *
  1969. * Return: void
  1970. */
  1971. static inline void
  1972. __qdf_nbuf_record_rx_queue(struct sk_buff *skb, uint16_t queue_id)
  1973. {
  1974. skb_record_rx_queue(skb, queue_id);
  1975. }
  1976. /**
  1977. * __qdf_nbuf_get_queue_mapping() - get the queue mapping set by linux kernel
  1978. *
  1979. * @buf: sk buff
  1980. *
  1981. * Return: Queue mapping
  1982. */
  1983. static inline uint16_t
  1984. __qdf_nbuf_get_queue_mapping(struct sk_buff *skb)
  1985. {
  1986. return skb->queue_mapping;
  1987. }
  1988. /**
  1989. * __qdf_nbuf_set_timestamp() - set the timestamp for frame
  1990. *
  1991. * @buf: sk buff
  1992. *
  1993. * Return: void
  1994. */
  1995. static inline void
  1996. __qdf_nbuf_set_timestamp(struct sk_buff *skb)
  1997. {
  1998. __net_timestamp(skb);
  1999. }
  2000. /**
  2001. * __qdf_nbuf_get_timestamp() - get the timestamp for frame
  2002. *
  2003. * @buf: sk buff
  2004. *
  2005. * Return: timestamp stored in skb in ms
  2006. */
  2007. static inline uint64_t
  2008. __qdf_nbuf_get_timestamp(struct sk_buff *skb)
  2009. {
  2010. return ktime_to_ms(skb_get_ktime(skb));
  2011. }
  2012. /**
  2013. * __qdf_nbuf_get_timedelta_ms() - get time difference in ms
  2014. *
  2015. * @buf: sk buff
  2016. *
  2017. * Return: time difference in ms
  2018. */
  2019. static inline uint64_t
  2020. __qdf_nbuf_get_timedelta_ms(struct sk_buff *skb)
  2021. {
  2022. return ktime_to_ms(net_timedelta(skb->tstamp));
  2023. }
  2024. /**
  2025. * __qdf_nbuf_get_timedelta_us() - get time difference in micro seconds
  2026. *
  2027. * @buf: sk buff
  2028. *
  2029. * Return: time difference in micro seconds
  2030. */
  2031. static inline uint64_t
  2032. __qdf_nbuf_get_timedelta_us(struct sk_buff *skb)
  2033. {
  2034. return ktime_to_us(net_timedelta(skb->tstamp));
  2035. }
  2036. /**
  2037. * __qdf_nbuf_orphan() - orphan a nbuf
  2038. * @skb: sk buff
  2039. *
  2040. * If a buffer currently has an owner then we call the
  2041. * owner's destructor function
  2042. *
  2043. * Return: void
  2044. */
  2045. static inline void __qdf_nbuf_orphan(struct sk_buff *skb)
  2046. {
  2047. return skb_orphan(skb);
  2048. }
  2049. /**
  2050. * __qdf_nbuf_get_end_offset() - Return the size of the nbuf from
  2051. * head pointer to end pointer
  2052. * @nbuf: qdf_nbuf_t
  2053. *
  2054. * Return: size of network buffer from head pointer to end
  2055. * pointer
  2056. */
  2057. static inline unsigned int __qdf_nbuf_get_end_offset(__qdf_nbuf_t nbuf)
  2058. {
  2059. return skb_end_offset(nbuf);
  2060. }
  2061. /**
  2062. * __qdf_nbuf_get_truesize() - Return the true size of the nbuf
  2063. * including the header and variable data area
  2064. * @skb: sk buff
  2065. *
  2066. * Return: size of network buffer
  2067. */
  2068. static inline unsigned int __qdf_nbuf_get_truesize(struct sk_buff *skb)
  2069. {
  2070. return skb->truesize;
  2071. }
  2072. #ifdef CONFIG_WLAN_SYSFS_MEM_STATS
  2073. /**
  2074. * __qdf_record_nbuf_nbytes() - add or subtract the size of the nbuf
  2075. * from the total skb mem and DP tx/rx skb mem
  2076. * @nbytes: number of bytes
  2077. * @dir: direction
  2078. * @is_mapped: is mapped or unmapped memory
  2079. *
  2080. * Return: none
  2081. */
  2082. static inline void __qdf_record_nbuf_nbytes(
  2083. int nbytes, qdf_dma_dir_t dir, bool is_mapped)
  2084. {
  2085. if (is_mapped) {
  2086. if (dir == QDF_DMA_TO_DEVICE) {
  2087. qdf_mem_dp_tx_skb_cnt_inc();
  2088. qdf_mem_dp_tx_skb_inc(nbytes);
  2089. } else if (dir == QDF_DMA_FROM_DEVICE) {
  2090. qdf_mem_dp_rx_skb_cnt_inc();
  2091. qdf_mem_dp_rx_skb_inc(nbytes);
  2092. }
  2093. qdf_mem_skb_total_inc(nbytes);
  2094. } else {
  2095. if (dir == QDF_DMA_TO_DEVICE) {
  2096. qdf_mem_dp_tx_skb_cnt_dec();
  2097. qdf_mem_dp_tx_skb_dec(nbytes);
  2098. } else if (dir == QDF_DMA_FROM_DEVICE) {
  2099. qdf_mem_dp_rx_skb_cnt_dec();
  2100. qdf_mem_dp_rx_skb_dec(nbytes);
  2101. }
  2102. qdf_mem_skb_total_dec(nbytes);
  2103. }
  2104. }
  2105. #else /* CONFIG_WLAN_SYSFS_MEM_STATS */
  2106. static inline void __qdf_record_nbuf_nbytes(
  2107. int nbytes, qdf_dma_dir_t dir, bool is_mapped)
  2108. {
  2109. }
  2110. #endif /* CONFIG_WLAN_SYSFS_MEM_STATS */
  2111. /**
  2112. * __qdf_nbuf_map_nbytes_single() - map nbytes
  2113. * @osdev: os device
  2114. * @buf: buffer
  2115. * @dir: direction
  2116. * @nbytes: number of bytes
  2117. *
  2118. * Return: QDF_STATUS
  2119. */
  2120. #ifdef A_SIMOS_DEVHOST
  2121. static inline QDF_STATUS __qdf_nbuf_map_nbytes_single(
  2122. qdf_device_t osdev, struct sk_buff *buf,
  2123. qdf_dma_dir_t dir, int nbytes)
  2124. {
  2125. qdf_dma_addr_t paddr;
  2126. QDF_NBUF_CB_PADDR(buf) = paddr = buf->data;
  2127. return QDF_STATUS_SUCCESS;
  2128. }
  2129. #else
  2130. static inline QDF_STATUS __qdf_nbuf_map_nbytes_single(
  2131. qdf_device_t osdev, struct sk_buff *buf,
  2132. qdf_dma_dir_t dir, int nbytes)
  2133. {
  2134. qdf_dma_addr_t paddr;
  2135. QDF_STATUS ret;
  2136. /* assume that the OS only provides a single fragment */
  2137. QDF_NBUF_CB_PADDR(buf) = paddr =
  2138. dma_map_single(osdev->dev, buf->data,
  2139. nbytes, __qdf_dma_dir_to_os(dir));
  2140. ret = dma_mapping_error(osdev->dev, paddr) ?
  2141. QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS;
  2142. if (QDF_IS_STATUS_SUCCESS(ret))
  2143. __qdf_record_nbuf_nbytes(__qdf_nbuf_get_end_offset(buf),
  2144. dir, true);
  2145. return ret;
  2146. }
  2147. #endif
  2148. /**
  2149. * __qdf_nbuf_unmap_nbytes_single() - unmap nbytes
  2150. * @osdev: os device
  2151. * @buf: buffer
  2152. * @dir: direction
  2153. * @nbytes: number of bytes
  2154. *
  2155. * Return: none
  2156. */
  2157. #if defined(A_SIMOS_DEVHOST)
  2158. static inline void
  2159. __qdf_nbuf_unmap_nbytes_single(qdf_device_t osdev, struct sk_buff *buf,
  2160. qdf_dma_dir_t dir, int nbytes)
  2161. {
  2162. }
  2163. #else
  2164. static inline void
  2165. __qdf_nbuf_unmap_nbytes_single(qdf_device_t osdev, struct sk_buff *buf,
  2166. qdf_dma_dir_t dir, int nbytes)
  2167. {
  2168. qdf_dma_addr_t paddr = QDF_NBUF_CB_PADDR(buf);
  2169. if (qdf_likely(paddr)) {
  2170. __qdf_record_nbuf_nbytes(
  2171. __qdf_nbuf_get_end_offset(buf), dir, false);
  2172. dma_unmap_single(osdev->dev, paddr, nbytes,
  2173. __qdf_dma_dir_to_os(dir));
  2174. return;
  2175. }
  2176. }
  2177. #endif
  2178. static inline struct sk_buff *
  2179. __qdf_nbuf_queue_head_dequeue(struct sk_buff_head *skb_queue_head)
  2180. {
  2181. return skb_dequeue(skb_queue_head);
  2182. }
  2183. static inline
  2184. uint32_t __qdf_nbuf_queue_head_qlen(struct sk_buff_head *skb_queue_head)
  2185. {
  2186. return skb_queue_head->qlen;
  2187. }
  2188. static inline
  2189. void __qdf_nbuf_queue_head_enqueue_tail(struct sk_buff_head *skb_queue_head,
  2190. struct sk_buff *skb)
  2191. {
  2192. return skb_queue_tail(skb_queue_head, skb);
  2193. }
  2194. static inline
  2195. void __qdf_nbuf_queue_head_init(struct sk_buff_head *skb_queue_head)
  2196. {
  2197. return skb_queue_head_init(skb_queue_head);
  2198. }
  2199. static inline
  2200. void __qdf_nbuf_queue_head_purge(struct sk_buff_head *skb_queue_head)
  2201. {
  2202. return skb_queue_purge(skb_queue_head);
  2203. }
  2204. /**
  2205. * __qdf_nbuf_queue_head_lock() - Acquire the skb list lock
  2206. * @head: skb list for which lock is to be acquired
  2207. *
  2208. * Return: void
  2209. */
  2210. static inline
  2211. void __qdf_nbuf_queue_head_lock(struct sk_buff_head *skb_queue_head)
  2212. {
  2213. spin_lock_bh(&skb_queue_head->lock);
  2214. }
  2215. /**
  2216. * __qdf_nbuf_queue_head_unlock() - Release the skb list lock
  2217. * @head: skb list for which lock is to be release
  2218. *
  2219. * Return: void
  2220. */
  2221. static inline
  2222. void __qdf_nbuf_queue_head_unlock(struct sk_buff_head *skb_queue_head)
  2223. {
  2224. spin_unlock_bh(&skb_queue_head->lock);
  2225. }
  2226. /**
  2227. * __qdf_nbuf_get_frag_size_by_idx() - Get nbuf frag size at index idx
  2228. * @nbuf: qdf_nbuf_t
  2229. * @idx: Index for which frag size is requested
  2230. *
  2231. * Return: Frag size
  2232. */
  2233. static inline unsigned int __qdf_nbuf_get_frag_size_by_idx(__qdf_nbuf_t nbuf,
  2234. uint8_t idx)
  2235. {
  2236. unsigned int size = 0;
  2237. if (likely(idx < __QDF_NBUF_MAX_FRAGS))
  2238. size = skb_frag_size(&skb_shinfo(nbuf)->frags[idx]);
  2239. return size;
  2240. }
  2241. /**
  2242. * __qdf_nbuf_get_frag_address() - Get nbuf frag address at index idx
  2243. * @nbuf: qdf_nbuf_t
  2244. * @idx: Index for which frag address is requested
  2245. *
  2246. * Return: Frag address in success, else NULL
  2247. */
  2248. static inline __qdf_frag_t __qdf_nbuf_get_frag_addr(__qdf_nbuf_t nbuf,
  2249. uint8_t idx)
  2250. {
  2251. __qdf_frag_t frag_addr = NULL;
  2252. if (likely(idx < __QDF_NBUF_MAX_FRAGS))
  2253. frag_addr = skb_frag_address(&skb_shinfo(nbuf)->frags[idx]);
  2254. return frag_addr;
  2255. }
  2256. /**
  2257. * __qdf_nbuf_trim_add_frag_size() - Increase/Decrease frag_size by size
  2258. * @nbuf: qdf_nbuf_t
  2259. * @idx: Frag index
  2260. * @size: Size by which frag_size needs to be increased/decreased
  2261. * +Ve means increase, -Ve means decrease
  2262. * @truesize: truesize
  2263. */
  2264. static inline void __qdf_nbuf_trim_add_frag_size(__qdf_nbuf_t nbuf, uint8_t idx,
  2265. int size,
  2266. unsigned int truesize)
  2267. {
  2268. skb_coalesce_rx_frag(nbuf, idx, size, truesize);
  2269. }
  2270. /**
  2271. * __qdf_nbuf_move_frag_page_offset() - Move frag page_offset by size
  2272. * and adjust length by size.
  2273. * @nbuf: qdf_nbuf_t
  2274. * @idx: Frag index
  2275. * @offset: Frag page offset should be moved by offset.
  2276. * +Ve - Move offset forward.
  2277. * -Ve - Move offset backward.
  2278. *
  2279. * Return: QDF_STATUS
  2280. */
  2281. QDF_STATUS __qdf_nbuf_move_frag_page_offset(__qdf_nbuf_t nbuf, uint8_t idx,
  2282. int offset);
  2283. /**
  2284. * __qdf_nbuf_add_rx_frag() - Add frag to nbuf at nr_frag index
  2285. * @buf: Frag pointer needs to be added in nbuf frag
  2286. * @nbuf: qdf_nbuf_t where frag will be added
  2287. * @offset: Offset in frag to be added to nbuf_frags
  2288. * @frag_len: Frag length
  2289. * @truesize: truesize
  2290. * @take_frag_ref: Whether to take ref for frag or not
  2291. * This bool must be set as per below comdition:
  2292. * 1. False: If this frag is being added in any nbuf
  2293. * for the first time after allocation.
  2294. * 2. True: If frag is already attached part of any
  2295. * nbuf.
  2296. *
  2297. * It takes ref_count based on boolean flag take_frag_ref
  2298. */
  2299. void __qdf_nbuf_add_rx_frag(__qdf_frag_t buf, __qdf_nbuf_t nbuf,
  2300. int offset, int frag_len,
  2301. unsigned int truesize, bool take_frag_ref);
  2302. /**
  2303. * __qdf_nbuf_ref_frag() - get frag reference
  2304. *
  2305. * Return: void
  2306. */
  2307. void __qdf_nbuf_ref_frag(qdf_frag_t buf);
  2308. /**
  2309. * __qdf_nbuf_set_mark() - Set nbuf mark
  2310. * @buf: Pointer to nbuf
  2311. * @mark: Value to set mark
  2312. *
  2313. * Return: None
  2314. */
  2315. static inline void __qdf_nbuf_set_mark(__qdf_nbuf_t buf, uint32_t mark)
  2316. {
  2317. buf->mark = mark;
  2318. }
  2319. /**
  2320. * __qdf_nbuf_get_mark() - Get nbuf mark
  2321. * @buf: Pointer to nbuf
  2322. *
  2323. * Return: Value of mark
  2324. */
  2325. static inline uint32_t __qdf_nbuf_get_mark(__qdf_nbuf_t buf)
  2326. {
  2327. return buf->mark;
  2328. }
  2329. /**
  2330. * __qdf_nbuf_get_data_len() - Return the size of the nbuf from
  2331. * the data pointer to the end pointer
  2332. * @nbuf: qdf_nbuf_t
  2333. *
  2334. * Return: size of skb from data pointer to end pointer
  2335. */
  2336. static inline qdf_size_t __qdf_nbuf_get_data_len(__qdf_nbuf_t nbuf)
  2337. {
  2338. return (skb_end_pointer(nbuf) - nbuf->data);
  2339. }
  2340. /**
  2341. * __qdf_nbuf_get_gso_segs() - Return the number of gso segments
  2342. * @skb: Pointer to network buffer
  2343. *
  2344. * Return: Return the number of gso segments
  2345. */
  2346. static inline uint16_t __qdf_nbuf_get_gso_segs(struct sk_buff *skb)
  2347. {
  2348. return skb_shinfo(skb)->gso_segs;
  2349. }
  2350. /*
  2351. * __qdf_nbuf_net_timedelta() - get time delta
  2352. * @t: time as __qdf_ktime_t object
  2353. *
  2354. * Return: time delta as ktime_t object
  2355. */
  2356. static inline qdf_ktime_t __qdf_nbuf_net_timedelta(qdf_ktime_t t)
  2357. {
  2358. return net_timedelta(t);
  2359. }
  2360. #ifdef CONFIG_NBUF_AP_PLATFORM
  2361. #include <i_qdf_nbuf_w.h>
  2362. #else
  2363. #include <i_qdf_nbuf_m.h>
  2364. #endif
  2365. #endif /*_I_QDF_NET_BUF_H */