i_qdf_nbuf.h 60 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343
  1. /*
  2. * Copyright (c) 2014-2020 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. /**
  19. * DOC: i_qdf_nbuf.h
  20. * This file provides OS dependent nbuf API's.
  21. */
  22. #ifndef _I_QDF_NBUF_H
  23. #define _I_QDF_NBUF_H
  24. #include <linux/skbuff.h>
  25. #include <linux/netdevice.h>
  26. #include <linux/etherdevice.h>
  27. #include <linux/dma-mapping.h>
  28. #include <linux/version.h>
  29. #include <asm/cacheflush.h>
  30. #include <qdf_types.h>
  31. #include <qdf_net_types.h>
  32. #include <qdf_status.h>
  33. #include <qdf_util.h>
  34. #include <qdf_mem.h>
  35. #include <linux/tcp.h>
  36. #include <qdf_util.h>
  37. /*
  38. * Use socket buffer as the underlying implementation as skbuf .
  39. * Linux use sk_buff to represent both packet and data,
  40. * so we use sk_buffer to represent both skbuf .
  41. */
  42. typedef struct sk_buff *__qdf_nbuf_t;
  43. /**
  44. * typedef __qdf_nbuf_queue_head_t - abstraction for sk_buff_head linux struct
  45. *
  46. * This is used for skb queue management via linux skb buff head APIs
  47. */
  48. typedef struct sk_buff_head __qdf_nbuf_queue_head_t;
  49. #define QDF_NBUF_CB_TX_MAX_OS_FRAGS 1
  50. /* QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS -
  51. * max tx fragments added by the driver
  52. * The driver will always add one tx fragment (the tx descriptor)
  53. */
  54. #define QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS 2
  55. #define QDF_NBUF_CB_PACKET_TYPE_EAPOL 1
  56. #define QDF_NBUF_CB_PACKET_TYPE_ARP 2
  57. #define QDF_NBUF_CB_PACKET_TYPE_WAPI 3
  58. #define QDF_NBUF_CB_PACKET_TYPE_DHCP 4
  59. #define QDF_NBUF_CB_PACKET_TYPE_ICMP 5
  60. #define QDF_NBUF_CB_PACKET_TYPE_ICMPv6 6
  61. /* mark the first packet after wow wakeup */
  62. #define QDF_MARK_FIRST_WAKEUP_PACKET 0x80000000
  63. /*
  64. * Make sure that qdf_dma_addr_t in the cb block is always 64 bit aligned
  65. */
  66. typedef union {
  67. uint64_t u64;
  68. qdf_dma_addr_t dma_addr;
  69. } qdf_paddr_t;
  70. /**
  71. * struct qdf_nbuf_cb - network buffer control block contents (skb->cb)
  72. * - data passed between layers of the driver.
  73. *
  74. * Notes:
  75. * 1. Hard limited to 48 bytes. Please count your bytes
  76. * 2. The size of this structure has to be easily calculatable and
  77. * consistently so: do not use any conditional compile flags
  78. * 3. Split into a common part followed by a tx/rx overlay
  79. * 4. There is only one extra frag, which represents the HTC/HTT header
  80. * 5. "ext_cb_pt" must be the first member in both TX and RX unions
  81. * for the priv_cb_w since it must be at same offset for both
  82. * TX and RX union
  83. * 6. "ipa.owned" bit must be first member in both TX and RX unions
  84. * for the priv_cb_m since it must be at same offset for both
  85. * TX and RX union.
  86. *
  87. * @paddr : physical addressed retrieved by dma_map of nbuf->data
  88. *
  89. * @rx.dev.priv_cb_w.ext_cb_ptr: extended cb pointer
  90. * @rx.dev.priv_cb_w.fctx: ctx to handle special pkts defined by ftype
  91. * @rx.dev.priv_cb_w.msdu_len: length of RX packet
  92. * @rx.dev.priv_cb_w.peer_id: peer_id for RX packet
  93. * @rx.dev.priv_cb_w.protocol_tag: protocol tag set by app for rcvd packet type
  94. * @rx.dev.priv_cb_w.flow_tag: flow tag set by application for 5 tuples rcvd
  95. *
  96. * @rx.dev.priv_cb_m.peer_cached_buf_frm: peer cached buffer
  97. * @rx.dev.priv_cb_m.flush_ind: flush indication
  98. * @rx.dev.priv_cb_m.packet_buf_pool: packet buff bool
  99. * @rx.dev.priv_cb_m.l3_hdr_pad: L3 header padding offset
  100. * @rx.dev.priv_cb_m.tcp_seq_num: TCP sequence number
  101. * @rx.dev.priv_cb_m.tcp_ack_num: TCP ACK number
  102. * @rx.dev.priv_cb_m.lro_ctx: LRO context
  103. * @rx.dev.priv_cb_m.dp.wifi3.msdu_len: length of RX packet
  104. * @rx.dev.priv_cb_m.dp.wifi3.peer_id: peer_id for RX packet
  105. * @rx.dev.priv_cb_m.dp.wifi2.map_index:
  106. * @rx.dev.priv_cb_m.ipa_owned: packet owned by IPA
  107. *
  108. * @rx.lro_eligible: flag to indicate whether the MSDU is LRO eligible
  109. * @rx.tcp_proto: L4 protocol is TCP
  110. * @rx.tcp_pure_ack: A TCP ACK packet with no payload
  111. * @rx.ipv6_proto: L3 protocol is IPV6
  112. * @rx.ip_offset: offset to IP header
  113. * @rx.tcp_offset: offset to TCP header
  114. * @rx_ctx_id: Rx context id
  115. * @num_elements_in_list: number of elements in the nbuf list
  116. *
  117. * @rx.tcp_udp_chksum: L4 payload checksum
  118. * @rx.tcp_wim: TCP window size
  119. *
  120. * @rx.flow_id: 32bit flow id
  121. *
  122. * @rx.flag_chfrag_start: first MSDU in an AMSDU
  123. * @rx.flag_chfrag_cont: middle or part of MSDU in an AMSDU
  124. * @rx.flag_chfrag_end: last MSDU in an AMSDU
  125. * @rx.flag_retry: flag to indicate MSDU is retried
  126. * @rx.flag_da_mcbc: flag to indicate mulicast or broadcast packets
  127. * @rx.flag_da_valid: flag to indicate DA is valid for RX packet
  128. * @rx.flag_sa_valid: flag to indicate SA is valid for RX packet
  129. * @rx.flag_is_frag: flag to indicate skb has frag list
  130. * @rx.rsrvd: reserved
  131. *
  132. * @rx.trace: combined structure for DP and protocol trace
  133. * @rx.trace.packet_stat: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)|
  134. * + (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)]
  135. * @rx.trace.dp_trace: flag (Datapath trace)
  136. * @rx.trace.packet_track: RX_DATA packet
  137. * @rx.trace.rsrvd: enable packet logging
  138. *
  139. * @rx.vdev_id: vdev_id for RX pkt
  140. * @rx.is_raw_frame: RAW frame
  141. * @rx.fcs_err: FCS error
  142. * @rx.tid_val: tid value
  143. * @rx.reserved: reserved
  144. * @rx.ftype: mcast2ucast, TSO, SG, MESH
  145. *
  146. * @tx.dev.priv_cb_w.fctx: ctx to handle special pkts defined by ftype
  147. * @tx.dev.priv_cb_w.ext_cb_ptr: extended cb pointer
  148. *
  149. * @tx.dev.priv_cb_w.data_attr: value that is programmed in CE descr, includes
  150. * + (1) CE classification enablement bit
  151. * + (2) packet type (802.3 or Ethernet type II)
  152. * + (3) packet offset (usually length of HTC/HTT descr)
  153. * @tx.dev.priv_cb_m.ipa.owned: packet owned by IPA
  154. * @tx.dev.priv_cb_m.ipa.priv: private data, used by IPA
  155. * @tx.dev.priv_cb_m.desc_id: tx desc id, used to sync between host and fw
  156. * @tx.dev.priv_cb_m.mgmt_desc_id: mgmt descriptor for tx completion cb
  157. * @tx.dev.priv_cb_m.dma_option.bi_map: flag to do bi-direction dma map
  158. * @tx.dev.priv_cb_m.dma_option.reserved: reserved bits for future use
  159. * @tx.dev.priv_cb_m.reserved: reserved
  160. *
  161. * @tx.ftype: mcast2ucast, TSO, SG, MESH
  162. * @tx.vdev_id: vdev (for protocol trace)
  163. * @tx.len: length of efrag pointed by the above pointers
  164. *
  165. * @tx.flags.bits.flag_efrag: flag, efrag payload to be swapped (wordstream)
  166. * @tx.flags.bits.num: number of extra frags ( 0 or 1)
  167. * @tx.flags.bits.nbuf: flag, nbuf payload to be swapped (wordstream)
  168. * @tx.flags.bits.flag_chfrag_start: first MSDU in an AMSDU
  169. * @tx.flags.bits.flag_chfrag_cont: middle or part of MSDU in an AMSDU
  170. * @tx.flags.bits.flag_chfrag_end: last MSDU in an AMSDU
  171. * @tx.flags.bits.flag_ext_header: extended flags
  172. * @tx.flags.bits.reserved: reserved
  173. * @tx.trace: combined structure for DP and protocol trace
  174. * @tx.trace.packet_stat: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)|
  175. * + (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)]
  176. * @tx.trace.is_packet_priv:
  177. * @tx.trace.packet_track: {NBUF_TX_PKT_[(DATA)|(MGMT)]_TRACK}
  178. * @tx.trace.proto_type: bitmap of NBUF_PKT_TRAC_TYPE[(EAPOL)|(DHCP)|
  179. * + (MGMT_ACTION)] - 4 bits
  180. * @tx.trace.dp_trace: flag (Datapath trace)
  181. * @tx.trace.is_bcast: flag (Broadcast packet)
  182. * @tx.trace.is_mcast: flag (Multicast packet)
  183. * @tx.trace.packet_type: flag (Packet type)
  184. * @tx.trace.htt2_frm: flag (high-latency path only)
  185. * @tx.trace.print: enable packet logging
  186. *
  187. * @tx.vaddr: virtual address of ~
  188. * @tx.paddr: physical/DMA address of ~
  189. */
  190. struct qdf_nbuf_cb {
  191. /* common */
  192. qdf_paddr_t paddr; /* of skb->data */
  193. /* valid only in one direction */
  194. union {
  195. /* Note: MAX: 40 bytes */
  196. struct {
  197. union {
  198. struct {
  199. void *ext_cb_ptr;
  200. void *fctx;
  201. uint16_t msdu_len;
  202. uint16_t peer_id;
  203. uint16_t protocol_tag;
  204. uint16_t flow_tag;
  205. } priv_cb_w;
  206. struct {
  207. /* ipa_owned bit is common between rx
  208. * control block and tx control block.
  209. * Do not change location of this bit.
  210. */
  211. uint32_t ipa_owned:1,
  212. peer_cached_buf_frm:1,
  213. flush_ind:1,
  214. packet_buf_pool:1,
  215. l3_hdr_pad:3,
  216. reserved:9,
  217. reserved1:16;
  218. uint32_t tcp_seq_num;
  219. uint32_t tcp_ack_num;
  220. union {
  221. struct {
  222. uint16_t msdu_len;
  223. uint16_t peer_id;
  224. } wifi3;
  225. struct {
  226. uint32_t map_index;
  227. } wifi2;
  228. } dp;
  229. unsigned char *lro_ctx;
  230. } priv_cb_m;
  231. } dev;
  232. uint32_t lro_eligible:1,
  233. tcp_proto:1,
  234. tcp_pure_ack:1,
  235. ipv6_proto:1,
  236. ip_offset:7,
  237. tcp_offset:7,
  238. rx_ctx_id:4,
  239. fcs_err:1,
  240. is_raw_frame:1,
  241. num_elements_in_list:8;
  242. uint32_t tcp_udp_chksum:16,
  243. tcp_win:16;
  244. uint32_t flow_id;
  245. uint8_t flag_chfrag_start:1,
  246. flag_chfrag_cont:1,
  247. flag_chfrag_end:1,
  248. flag_retry:1,
  249. flag_da_mcbc:1,
  250. flag_da_valid:1,
  251. flag_sa_valid:1,
  252. flag_is_frag:1;
  253. union {
  254. uint8_t packet_state;
  255. uint8_t dp_trace:1,
  256. packet_track:4,
  257. rsrvd:3;
  258. } trace;
  259. uint16_t vdev_id:8,
  260. tid_val:4,
  261. ftype:4;
  262. } rx;
  263. /* Note: MAX: 40 bytes */
  264. struct {
  265. union {
  266. struct {
  267. void *ext_cb_ptr;
  268. void *fctx;
  269. } priv_cb_w;
  270. struct {
  271. /* ipa_owned bit is common between rx
  272. * control block and tx control block.
  273. * Do not change location of this bit.
  274. */
  275. struct {
  276. uint32_t owned:1,
  277. priv:31;
  278. } ipa;
  279. uint32_t data_attr;
  280. uint16_t desc_id;
  281. uint16_t mgmt_desc_id;
  282. struct {
  283. uint8_t bi_map:1,
  284. reserved:7;
  285. } dma_option;
  286. uint8_t reserved[3];
  287. } priv_cb_m;
  288. } dev;
  289. uint8_t ftype;
  290. uint8_t vdev_id;
  291. uint16_t len;
  292. union {
  293. struct {
  294. uint8_t flag_efrag:1,
  295. flag_nbuf:1,
  296. num:1,
  297. flag_chfrag_start:1,
  298. flag_chfrag_cont:1,
  299. flag_chfrag_end:1,
  300. flag_ext_header:1,
  301. flag_notify_comp:1;
  302. } bits;
  303. uint8_t u8;
  304. } flags;
  305. struct {
  306. uint8_t packet_state:7,
  307. is_packet_priv:1;
  308. uint8_t packet_track:4,
  309. proto_type:4;
  310. uint8_t dp_trace:1,
  311. is_bcast:1,
  312. is_mcast:1,
  313. packet_type:3,
  314. /* used only for hl*/
  315. htt2_frm:1,
  316. print:1;
  317. } trace;
  318. unsigned char *vaddr;
  319. qdf_paddr_t paddr;
  320. } tx;
  321. } u;
  322. }; /* struct qdf_nbuf_cb: MAX 48 bytes */
  323. QDF_COMPILE_TIME_ASSERT(qdf_nbuf_cb_size,
  324. (sizeof(struct qdf_nbuf_cb)) <= FIELD_SIZEOF(struct sk_buff, cb));
  325. /**
  326. * access macros to qdf_nbuf_cb
  327. * Note: These macros can be used as L-values as well as R-values.
  328. * When used as R-values, they effectively function as "get" macros
  329. * When used as L_values, they effectively function as "set" macros
  330. */
  331. #define QDF_NBUF_CB_PADDR(skb) \
  332. (((struct qdf_nbuf_cb *)((skb)->cb))->paddr.dma_addr)
  333. #define QDF_NBUF_CB_RX_LRO_ELIGIBLE(skb) \
  334. (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.lro_eligible)
  335. #define QDF_NBUF_CB_RX_TCP_PROTO(skb) \
  336. (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_proto)
  337. #define QDF_NBUF_CB_RX_TCP_PURE_ACK(skb) \
  338. (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_pure_ack)
  339. #define QDF_NBUF_CB_RX_IPV6_PROTO(skb) \
  340. (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ipv6_proto)
  341. #define QDF_NBUF_CB_RX_IP_OFFSET(skb) \
  342. (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ip_offset)
  343. #define QDF_NBUF_CB_RX_TCP_OFFSET(skb) \
  344. (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_offset)
  345. #define QDF_NBUF_CB_RX_CTX_ID(skb) \
  346. (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.rx_ctx_id)
  347. #define QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(skb) \
  348. (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.num_elements_in_list)
  349. #define QDF_NBUF_CB_RX_TCP_CHKSUM(skb) \
  350. (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_udp_chksum)
  351. #define QDF_NBUF_CB_RX_TCP_WIN(skb) \
  352. (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_win)
  353. #define QDF_NBUF_CB_RX_FLOW_ID(skb) \
  354. (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.flow_id)
  355. #define QDF_NBUF_CB_RX_PACKET_STATE(skb)\
  356. (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.trace.packet_state)
  357. #define QDF_NBUF_CB_RX_DP_TRACE(skb) \
  358. (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.trace.dp_trace)
  359. #define QDF_NBUF_CB_RX_FTYPE(skb) \
  360. (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ftype)
  361. #define QDF_NBUF_CB_RX_VDEV_ID(skb) \
  362. (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.vdev_id)
  363. #define QDF_NBUF_CB_RX_CHFRAG_START(skb) \
  364. (((struct qdf_nbuf_cb *) \
  365. ((skb)->cb))->u.rx.flag_chfrag_start)
  366. #define QDF_NBUF_CB_RX_CHFRAG_CONT(skb) \
  367. (((struct qdf_nbuf_cb *) \
  368. ((skb)->cb))->u.rx.flag_chfrag_cont)
  369. #define QDF_NBUF_CB_RX_CHFRAG_END(skb) \
  370. (((struct qdf_nbuf_cb *) \
  371. ((skb)->cb))->u.rx.flag_chfrag_end)
  372. #define QDF_NBUF_CB_RX_DA_MCBC(skb) \
  373. (((struct qdf_nbuf_cb *) \
  374. ((skb)->cb))->u.rx.flag_da_mcbc)
  375. #define QDF_NBUF_CB_RX_DA_VALID(skb) \
  376. (((struct qdf_nbuf_cb *) \
  377. ((skb)->cb))->u.rx.flag_da_valid)
  378. #define QDF_NBUF_CB_RX_SA_VALID(skb) \
  379. (((struct qdf_nbuf_cb *) \
  380. ((skb)->cb))->u.rx.flag_sa_valid)
  381. #define QDF_NBUF_CB_RX_RETRY_FLAG(skb) \
  382. (((struct qdf_nbuf_cb *) \
  383. ((skb)->cb))->u.rx.flag_retry)
  384. #define QDF_NBUF_CB_RX_RAW_FRAME(skb) \
  385. (((struct qdf_nbuf_cb *) \
  386. ((skb)->cb))->u.rx.is_raw_frame)
  387. #define QDF_NBUF_CB_RX_TID_VAL(skb) \
  388. (((struct qdf_nbuf_cb *) \
  389. ((skb)->cb))->u.rx.tid_val)
  390. #define QDF_NBUF_CB_RX_IS_FRAG(skb) \
  391. (((struct qdf_nbuf_cb *) \
  392. ((skb)->cb))->u.rx.flag_is_frag)
  393. #define QDF_NBUF_CB_RX_FCS_ERR(skb) \
  394. (((struct qdf_nbuf_cb *) \
  395. ((skb)->cb))->u.rx.fcs_err)
  396. #define QDF_NBUF_UPDATE_TX_PKT_COUNT(skb, PACKET_STATE) \
  397. qdf_nbuf_set_state(skb, PACKET_STATE)
  398. #define QDF_NBUF_CB_TX_DATA_ATTR(skb) \
  399. (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_m.data_attr)
  400. #define QDF_NBUF_CB_TX_FTYPE(skb) \
  401. (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.ftype)
  402. #define QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) \
  403. (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.len)
  404. #define QDF_NBUF_CB_TX_VDEV_CTX(skb) \
  405. (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.vdev_id)
  406. /* Tx Flags Accessor Macros*/
  407. #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) \
  408. (((struct qdf_nbuf_cb *) \
  409. ((skb)->cb))->u.tx.flags.bits.flag_efrag)
  410. #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) \
  411. (((struct qdf_nbuf_cb *) \
  412. ((skb)->cb))->u.tx.flags.bits.flag_nbuf)
  413. #define QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) \
  414. (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.bits.num)
  415. #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_NOTIFY_COMP(skb) \
  416. (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.bits.flag_notify_comp)
  417. #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START(skb) \
  418. (((struct qdf_nbuf_cb *) \
  419. ((skb)->cb))->u.tx.flags.bits.flag_chfrag_start)
  420. #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT(skb) \
  421. (((struct qdf_nbuf_cb *) \
  422. ((skb)->cb))->u.tx.flags.bits.flag_chfrag_cont)
  423. #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END(skb) \
  424. (((struct qdf_nbuf_cb *) \
  425. ((skb)->cb))->u.tx.flags.bits.flag_chfrag_end)
  426. #define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_EXT_HEADER(skb) \
  427. (((struct qdf_nbuf_cb *) \
  428. ((skb)->cb))->u.tx.flags.bits.flag_ext_header)
  429. #define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_FLAGS(skb) \
  430. (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.u8)
  431. /* End of Tx Flags Accessor Macros */
  432. /* Tx trace accessor macros */
  433. #define QDF_NBUF_CB_TX_PACKET_STATE(skb)\
  434. (((struct qdf_nbuf_cb *) \
  435. ((skb)->cb))->u.tx.trace.packet_state)
  436. #define QDF_NBUF_CB_TX_IS_PACKET_PRIV(skb) \
  437. (((struct qdf_nbuf_cb *) \
  438. ((skb)->cb))->u.tx.trace.is_packet_priv)
  439. #define QDF_NBUF_CB_TX_PACKET_TRACK(skb)\
  440. (((struct qdf_nbuf_cb *) \
  441. ((skb)->cb))->u.tx.trace.packet_track)
  442. #define QDF_NBUF_CB_RX_PACKET_TRACK(skb)\
  443. (((struct qdf_nbuf_cb *) \
  444. ((skb)->cb))->u.rx.trace.packet_track)
  445. #define QDF_NBUF_CB_TX_PROTO_TYPE(skb)\
  446. (((struct qdf_nbuf_cb *) \
  447. ((skb)->cb))->u.tx.trace.proto_type)
  448. #define QDF_NBUF_CB_TX_DP_TRACE(skb)\
  449. (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.dp_trace)
  450. #define QDF_NBUF_CB_DP_TRACE_PRINT(skb) \
  451. (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.print)
  452. #define QDF_NBUF_CB_TX_HL_HTT2_FRM(skb) \
  453. (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.htt2_frm)
  454. #define QDF_NBUF_CB_GET_IS_BCAST(skb)\
  455. (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.is_bcast)
  456. #define QDF_NBUF_CB_GET_IS_MCAST(skb)\
  457. (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.is_mcast)
  458. #define QDF_NBUF_CB_GET_PACKET_TYPE(skb)\
  459. (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.packet_type)
  460. #define QDF_NBUF_CB_SET_BCAST(skb) \
  461. (((struct qdf_nbuf_cb *) \
  462. ((skb)->cb))->u.tx.trace.is_bcast = true)
  463. #define QDF_NBUF_CB_SET_MCAST(skb) \
  464. (((struct qdf_nbuf_cb *) \
  465. ((skb)->cb))->u.tx.trace.is_mcast = true)
  466. /* End of Tx trace accessor macros */
  467. #define QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) \
  468. (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.vaddr)
  469. #define QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) \
  470. (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.paddr.dma_addr)
  471. /* assume the OS provides a single fragment */
  472. #define __qdf_nbuf_get_num_frags(skb) \
  473. (QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) + 1)
  474. #define __qdf_nbuf_reset_num_frags(skb) \
  475. (QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0)
  476. /**
  477. * end of nbuf->cb access macros
  478. */
  479. typedef void (*qdf_nbuf_trace_update_t)(char *);
  480. typedef void (*qdf_nbuf_free_t)(__qdf_nbuf_t);
  481. #define __qdf_nbuf_mapped_paddr_get(skb) QDF_NBUF_CB_PADDR(skb)
  482. #define __qdf_nbuf_mapped_paddr_set(skb, paddr) \
  483. (QDF_NBUF_CB_PADDR(skb) = paddr)
  484. #define __qdf_nbuf_frag_push_head( \
  485. skb, frag_len, frag_vaddr, frag_paddr) \
  486. do { \
  487. QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 1; \
  488. QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) = frag_vaddr; \
  489. QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) = frag_paddr; \
  490. QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) = frag_len; \
  491. } while (0)
  492. #define __qdf_nbuf_get_frag_vaddr(skb, frag_num) \
  493. ((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ? \
  494. QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) : ((skb)->data))
  495. #define __qdf_nbuf_get_frag_vaddr_always(skb) \
  496. QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb)
  497. #define __qdf_nbuf_get_frag_paddr(skb, frag_num) \
  498. ((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ? \
  499. QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) : \
  500. /* assume that the OS only provides a single fragment */ \
  501. QDF_NBUF_CB_PADDR(skb))
  502. #define __qdf_nbuf_get_tx_frag_paddr(skb) QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb)
  503. #define __qdf_nbuf_get_frag_len(skb, frag_num) \
  504. ((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ? \
  505. QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) : (skb)->len)
  506. #define __qdf_nbuf_get_frag_is_wordstream(skb, frag_num) \
  507. ((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) \
  508. ? (QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb)) \
  509. : (QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb)))
  510. #define __qdf_nbuf_set_frag_is_wordstream(skb, frag_num, is_wstrm) \
  511. do { \
  512. if (frag_num >= QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) \
  513. frag_num = QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS; \
  514. if (frag_num) \
  515. QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = \
  516. is_wstrm; \
  517. else \
  518. QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = \
  519. is_wstrm; \
  520. } while (0)
  521. #define __qdf_nbuf_set_vdev_ctx(skb, vdev_id) \
  522. do { \
  523. QDF_NBUF_CB_TX_VDEV_CTX((skb)) = (vdev_id); \
  524. } while (0)
  525. #define __qdf_nbuf_get_vdev_ctx(skb) \
  526. QDF_NBUF_CB_TX_VDEV_CTX((skb))
  527. #define __qdf_nbuf_set_tx_ftype(skb, type) \
  528. do { \
  529. QDF_NBUF_CB_TX_FTYPE((skb)) = (type); \
  530. } while (0)
  531. #define __qdf_nbuf_get_tx_ftype(skb) \
  532. QDF_NBUF_CB_TX_FTYPE((skb))
  533. #define __qdf_nbuf_set_rx_ftype(skb, type) \
  534. do { \
  535. QDF_NBUF_CB_RX_FTYPE((skb)) = (type); \
  536. } while (0)
  537. #define __qdf_nbuf_get_rx_ftype(skb) \
  538. QDF_NBUF_CB_RX_FTYPE((skb))
  539. #define __qdf_nbuf_set_rx_chfrag_start(skb, val) \
  540. ((QDF_NBUF_CB_RX_CHFRAG_START((skb))) = val)
  541. #define __qdf_nbuf_is_rx_chfrag_start(skb) \
  542. (QDF_NBUF_CB_RX_CHFRAG_START((skb)))
  543. #define __qdf_nbuf_set_rx_chfrag_cont(skb, val) \
  544. do { \
  545. (QDF_NBUF_CB_RX_CHFRAG_CONT((skb))) = val; \
  546. } while (0)
  547. #define __qdf_nbuf_is_rx_chfrag_cont(skb) \
  548. (QDF_NBUF_CB_RX_CHFRAG_CONT((skb)))
  549. #define __qdf_nbuf_set_rx_chfrag_end(skb, val) \
  550. ((QDF_NBUF_CB_RX_CHFRAG_END((skb))) = val)
  551. #define __qdf_nbuf_is_rx_chfrag_end(skb) \
  552. (QDF_NBUF_CB_RX_CHFRAG_END((skb)))
  553. #define __qdf_nbuf_set_da_mcbc(skb, val) \
  554. ((QDF_NBUF_CB_RX_DA_MCBC((skb))) = val)
  555. #define __qdf_nbuf_is_da_mcbc(skb) \
  556. (QDF_NBUF_CB_RX_DA_MCBC((skb)))
  557. #define __qdf_nbuf_set_da_valid(skb, val) \
  558. ((QDF_NBUF_CB_RX_DA_VALID((skb))) = val)
  559. #define __qdf_nbuf_is_da_valid(skb) \
  560. (QDF_NBUF_CB_RX_DA_VALID((skb)))
  561. #define __qdf_nbuf_set_sa_valid(skb, val) \
  562. ((QDF_NBUF_CB_RX_SA_VALID((skb))) = val)
  563. #define __qdf_nbuf_is_sa_valid(skb) \
  564. (QDF_NBUF_CB_RX_SA_VALID((skb)))
  565. #define __qdf_nbuf_set_rx_retry_flag(skb, val) \
  566. ((QDF_NBUF_CB_RX_RETRY_FLAG((skb))) = val)
  567. #define __qdf_nbuf_is_rx_retry_flag(skb) \
  568. (QDF_NBUF_CB_RX_RETRY_FLAG((skb)))
  569. #define __qdf_nbuf_set_raw_frame(skb, val) \
  570. ((QDF_NBUF_CB_RX_RAW_FRAME((skb))) = val)
  571. #define __qdf_nbuf_is_raw_frame(skb) \
  572. (QDF_NBUF_CB_RX_RAW_FRAME((skb)))
  573. #define __qdf_nbuf_get_tid_val(skb) \
  574. (QDF_NBUF_CB_RX_TID_VAL((skb)))
  575. #define __qdf_nbuf_set_tid_val(skb, val) \
  576. ((QDF_NBUF_CB_RX_TID_VAL((skb))) = val)
  577. #define __qdf_nbuf_set_is_frag(skb, val) \
  578. ((QDF_NBUF_CB_RX_IS_FRAG((skb))) = val)
  579. #define __qdf_nbuf_is_frag(skb) \
  580. (QDF_NBUF_CB_RX_IS_FRAG((skb)))
  581. #define __qdf_nbuf_set_tx_chfrag_start(skb, val) \
  582. ((QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START((skb))) = val)
  583. #define __qdf_nbuf_is_tx_chfrag_start(skb) \
  584. (QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START((skb)))
  585. #define __qdf_nbuf_set_tx_chfrag_cont(skb, val) \
  586. do { \
  587. (QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT((skb))) = val; \
  588. } while (0)
  589. #define __qdf_nbuf_is_tx_chfrag_cont(skb) \
  590. (QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT((skb)))
  591. #define __qdf_nbuf_set_tx_chfrag_end(skb, val) \
  592. ((QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END((skb))) = val)
  593. #define __qdf_nbuf_is_tx_chfrag_end(skb) \
  594. (QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END((skb)))
  595. #define __qdf_nbuf_trace_set_proto_type(skb, proto_type) \
  596. (QDF_NBUF_CB_TX_PROTO_TYPE(skb) = (proto_type))
  597. #define __qdf_nbuf_trace_get_proto_type(skb) \
  598. QDF_NBUF_CB_TX_PROTO_TYPE(skb)
  599. #define __qdf_nbuf_data_attr_get(skb) \
  600. QDF_NBUF_CB_TX_DATA_ATTR(skb)
  601. #define __qdf_nbuf_data_attr_set(skb, data_attr) \
  602. (QDF_NBUF_CB_TX_DATA_ATTR(skb) = (data_attr))
  603. #define __qdf_nbuf_queue_walk_safe(queue, var, tvar) \
  604. skb_queue_walk_safe(queue, var, tvar)
  605. /**
  606. * __qdf_nbuf_num_frags_init() - init extra frags
  607. * @skb: sk buffer
  608. *
  609. * Return: none
  610. */
  611. static inline
  612. void __qdf_nbuf_num_frags_init(struct sk_buff *skb)
  613. {
  614. QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0;
  615. }
  616. /*
  617. * prototypes. Implemented in qdf_nbuf.c
  618. */
  619. /**
  620. * __qdf_nbuf_alloc() - Allocate nbuf
  621. * @osdev: Device handle
  622. * @size: Netbuf requested size
  623. * @reserve: headroom to start with
  624. * @align: Align
  625. * @prio: Priority
  626. * @func: Function name of the call site
  627. * @line: line number of the call site
  628. *
  629. * This allocates an nbuf aligns if needed and reserves some space in the front,
  630. * since the reserve is done after alignment the reserve value if being
  631. * unaligned will result in an unaligned address.
  632. *
  633. * Return: nbuf or %NULL if no memory
  634. */
  635. __qdf_nbuf_t
  636. __qdf_nbuf_alloc(__qdf_device_t osdev, size_t size, int reserve, int align,
  637. int prio, const char *func, uint32_t line);
  638. void __qdf_nbuf_free(struct sk_buff *skb);
  639. QDF_STATUS __qdf_nbuf_map(__qdf_device_t osdev,
  640. struct sk_buff *skb, qdf_dma_dir_t dir);
  641. void __qdf_nbuf_unmap(__qdf_device_t osdev,
  642. struct sk_buff *skb, qdf_dma_dir_t dir);
  643. QDF_STATUS __qdf_nbuf_map_single(__qdf_device_t osdev,
  644. struct sk_buff *skb, qdf_dma_dir_t dir);
  645. void __qdf_nbuf_unmap_single(__qdf_device_t osdev,
  646. struct sk_buff *skb, qdf_dma_dir_t dir);
  647. void __qdf_nbuf_reg_trace_cb(qdf_nbuf_trace_update_t cb_func_ptr);
  648. void __qdf_nbuf_reg_free_cb(qdf_nbuf_free_t cb_func_ptr);
  649. QDF_STATUS __qdf_nbuf_dmamap_create(qdf_device_t osdev, __qdf_dma_map_t *dmap);
  650. void __qdf_nbuf_dmamap_destroy(qdf_device_t osdev, __qdf_dma_map_t dmap);
  651. void __qdf_nbuf_dmamap_set_cb(__qdf_dma_map_t dmap, void *cb, void *arg);
  652. QDF_STATUS __qdf_nbuf_map_nbytes(qdf_device_t osdev, struct sk_buff *skb,
  653. qdf_dma_dir_t dir, int nbytes);
  654. void __qdf_nbuf_unmap_nbytes(qdf_device_t osdev, struct sk_buff *skb,
  655. qdf_dma_dir_t dir, int nbytes);
  656. void __qdf_nbuf_sync_for_cpu(qdf_device_t osdev, struct sk_buff *skb,
  657. qdf_dma_dir_t dir);
  658. void __qdf_nbuf_dma_map_info(__qdf_dma_map_t bmap, qdf_dmamap_info_t *sg);
  659. uint32_t __qdf_nbuf_get_frag_size(__qdf_nbuf_t nbuf, uint32_t cur_frag);
  660. void __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t *sg);
  661. QDF_STATUS __qdf_nbuf_frag_map(
  662. qdf_device_t osdev, __qdf_nbuf_t nbuf,
  663. int offset, qdf_dma_dir_t dir, int cur_frag);
  664. void qdf_nbuf_classify_pkt(struct sk_buff *skb);
  665. bool __qdf_nbuf_is_ipv4_wapi_pkt(struct sk_buff *skb);
  666. bool __qdf_nbuf_is_ipv4_tdls_pkt(struct sk_buff *skb);
  667. bool __qdf_nbuf_data_is_ipv4_pkt(uint8_t *data);
  668. bool __qdf_nbuf_data_is_ipv6_pkt(uint8_t *data);
  669. bool __qdf_nbuf_data_is_ipv4_mcast_pkt(uint8_t *data);
  670. bool __qdf_nbuf_data_is_ipv6_mcast_pkt(uint8_t *data);
  671. bool __qdf_nbuf_data_is_icmp_pkt(uint8_t *data);
  672. bool __qdf_nbuf_data_is_icmpv6_pkt(uint8_t *data);
  673. bool __qdf_nbuf_data_is_ipv4_udp_pkt(uint8_t *data);
  674. bool __qdf_nbuf_data_is_ipv4_tcp_pkt(uint8_t *data);
  675. bool __qdf_nbuf_data_is_ipv6_udp_pkt(uint8_t *data);
  676. bool __qdf_nbuf_data_is_ipv6_tcp_pkt(uint8_t *data);
  677. bool __qdf_nbuf_data_is_ipv4_dhcp_pkt(uint8_t *data);
  678. bool __qdf_nbuf_data_is_ipv6_dhcp_pkt(uint8_t *data);
  679. bool __qdf_nbuf_data_is_ipv6_mdns_pkt(uint8_t *data);
  680. bool __qdf_nbuf_data_is_ipv4_eapol_pkt(uint8_t *data);
  681. bool __qdf_nbuf_data_is_ipv4_arp_pkt(uint8_t *data);
  682. bool __qdf_nbuf_is_bcast_pkt(__qdf_nbuf_t nbuf);
  683. bool __qdf_nbuf_data_is_arp_req(uint8_t *data);
  684. bool __qdf_nbuf_data_is_arp_rsp(uint8_t *data);
  685. uint32_t __qdf_nbuf_get_arp_src_ip(uint8_t *data);
  686. uint32_t __qdf_nbuf_get_arp_tgt_ip(uint8_t *data);
  687. uint8_t *__qdf_nbuf_get_dns_domain_name(uint8_t *data, uint32_t len);
  688. bool __qdf_nbuf_data_is_dns_query(uint8_t *data);
  689. bool __qdf_nbuf_data_is_dns_response(uint8_t *data);
  690. bool __qdf_nbuf_data_is_tcp_syn(uint8_t *data);
  691. bool __qdf_nbuf_data_is_tcp_syn_ack(uint8_t *data);
  692. bool __qdf_nbuf_data_is_tcp_ack(uint8_t *data);
  693. uint16_t __qdf_nbuf_data_get_tcp_src_port(uint8_t *data);
  694. uint16_t __qdf_nbuf_data_get_tcp_dst_port(uint8_t *data);
  695. bool __qdf_nbuf_data_is_icmpv4_req(uint8_t *data);
  696. bool __qdf_nbuf_data_is_icmpv4_rsp(uint8_t *data);
  697. uint32_t __qdf_nbuf_get_icmpv4_src_ip(uint8_t *data);
  698. uint32_t __qdf_nbuf_get_icmpv4_tgt_ip(uint8_t *data);
  699. enum qdf_proto_subtype __qdf_nbuf_data_get_dhcp_subtype(uint8_t *data);
  700. enum qdf_proto_subtype __qdf_nbuf_data_get_eapol_subtype(uint8_t *data);
  701. enum qdf_proto_subtype __qdf_nbuf_data_get_arp_subtype(uint8_t *data);
  702. enum qdf_proto_subtype __qdf_nbuf_data_get_icmp_subtype(uint8_t *data);
  703. enum qdf_proto_subtype __qdf_nbuf_data_get_icmpv6_subtype(uint8_t *data);
  704. uint8_t __qdf_nbuf_data_get_ipv4_proto(uint8_t *data);
  705. uint8_t __qdf_nbuf_data_get_ipv6_proto(uint8_t *data);
  706. #ifdef QDF_NBUF_GLOBAL_COUNT
  707. int __qdf_nbuf_count_get(void);
  708. void __qdf_nbuf_count_inc(struct sk_buff *skb);
  709. void __qdf_nbuf_count_dec(struct sk_buff *skb);
  710. void __qdf_nbuf_mod_init(void);
  711. void __qdf_nbuf_mod_exit(void);
  712. #else
  713. static inline int __qdf_nbuf_count_get(void)
  714. {
  715. return 0;
  716. }
  717. static inline void __qdf_nbuf_count_inc(struct sk_buff *skb)
  718. {
  719. return;
  720. }
  721. static inline void __qdf_nbuf_count_dec(struct sk_buff *skb)
  722. {
  723. return;
  724. }
  725. static inline void __qdf_nbuf_mod_init(void)
  726. {
  727. return;
  728. }
  729. static inline void __qdf_nbuf_mod_exit(void)
  730. {
  731. return;
  732. }
  733. #endif
  734. /**
  735. * __qdf_to_status() - OS to QDF status conversion
  736. * @error : OS error
  737. *
  738. * Return: QDF status
  739. */
  740. static inline QDF_STATUS __qdf_to_status(signed int error)
  741. {
  742. switch (error) {
  743. case 0:
  744. return QDF_STATUS_SUCCESS;
  745. case ENOMEM:
  746. case -ENOMEM:
  747. return QDF_STATUS_E_NOMEM;
  748. default:
  749. return QDF_STATUS_E_NOSUPPORT;
  750. }
  751. }
  752. /**
  753. * __qdf_nbuf_len() - return the amount of valid data in the skb
  754. * @skb: Pointer to network buffer
  755. *
  756. * This API returns the amount of valid data in the skb, If there are frags
  757. * then it returns total length.
  758. *
  759. * Return: network buffer length
  760. */
  761. static inline size_t __qdf_nbuf_len(struct sk_buff *skb)
  762. {
  763. int i, extra_frag_len = 0;
  764. i = QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb);
  765. if (i > 0)
  766. extra_frag_len = QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb);
  767. return extra_frag_len + skb->len;
  768. }
  769. /**
  770. * __qdf_nbuf_cat() - link two nbufs
  771. * @dst: Buffer to piggyback into
  772. * @src: Buffer to put
  773. *
  774. * Concat two nbufs, the new buf(src) is piggybacked into the older one.
  775. * It is callers responsibility to free the src skb.
  776. *
  777. * Return: QDF_STATUS (status of the call) if failed the src skb
  778. * is released
  779. */
  780. static inline QDF_STATUS
  781. __qdf_nbuf_cat(struct sk_buff *dst, struct sk_buff *src)
  782. {
  783. QDF_STATUS error = 0;
  784. qdf_assert(dst && src);
  785. /*
  786. * Since pskb_expand_head unconditionally reallocates the skb->head
  787. * buffer, first check whether the current buffer is already large
  788. * enough.
  789. */
  790. if (skb_tailroom(dst) < src->len) {
  791. error = pskb_expand_head(dst, 0, src->len, GFP_ATOMIC);
  792. if (error)
  793. return __qdf_to_status(error);
  794. }
  795. memcpy(skb_tail_pointer(dst), src->data, src->len);
  796. skb_put(dst, src->len);
  797. return __qdf_to_status(error);
  798. }
  799. /*
  800. * nbuf manipulation routines
  801. */
  802. /**
  803. * __qdf_nbuf_headroom() - return the amount of tail space available
  804. * @buf: Pointer to network buffer
  805. *
  806. * Return: amount of tail room
  807. */
  808. static inline int __qdf_nbuf_headroom(struct sk_buff *skb)
  809. {
  810. return skb_headroom(skb);
  811. }
  812. /**
  813. * __qdf_nbuf_tailroom() - return the amount of tail space available
  814. * @buf: Pointer to network buffer
  815. *
  816. * Return: amount of tail room
  817. */
  818. static inline uint32_t __qdf_nbuf_tailroom(struct sk_buff *skb)
  819. {
  820. return skb_tailroom(skb);
  821. }
  822. /**
  823. * __qdf_nbuf_put_tail() - Puts data in the end
  824. * @skb: Pointer to network buffer
  825. * @size: size to be pushed
  826. *
  827. * Return: data pointer of this buf where new data has to be
  828. * put, or NULL if there is not enough room in this buf.
  829. */
  830. static inline uint8_t *__qdf_nbuf_put_tail(struct sk_buff *skb, size_t size)
  831. {
  832. if (skb_tailroom(skb) < size) {
  833. if (unlikely(pskb_expand_head(skb, 0,
  834. size - skb_tailroom(skb), GFP_ATOMIC))) {
  835. dev_kfree_skb_any(skb);
  836. return NULL;
  837. }
  838. }
  839. return skb_put(skb, size);
  840. }
  841. /**
  842. * __qdf_nbuf_trim_tail() - trim data out from the end
  843. * @skb: Pointer to network buffer
  844. * @size: size to be popped
  845. *
  846. * Return: none
  847. */
  848. static inline void __qdf_nbuf_trim_tail(struct sk_buff *skb, size_t size)
  849. {
  850. return skb_trim(skb, skb->len - size);
  851. }
  852. /*
  853. * prototypes. Implemented in qdf_nbuf.c
  854. */
  855. qdf_nbuf_tx_cksum_t __qdf_nbuf_get_tx_cksum(struct sk_buff *skb);
  856. QDF_STATUS __qdf_nbuf_set_rx_cksum(struct sk_buff *skb,
  857. qdf_nbuf_rx_cksum_t *cksum);
  858. uint8_t __qdf_nbuf_get_tid(struct sk_buff *skb);
  859. void __qdf_nbuf_set_tid(struct sk_buff *skb, uint8_t tid);
  860. uint8_t __qdf_nbuf_get_exemption_type(struct sk_buff *skb);
  861. void __qdf_nbuf_ref(struct sk_buff *skb);
  862. int __qdf_nbuf_shared(struct sk_buff *skb);
  863. /*
  864. * qdf_nbuf_pool_delete() implementation - do nothing in linux
  865. */
  866. #define __qdf_nbuf_pool_delete(osdev)
  867. /**
  868. * __qdf_nbuf_clone() - clone the nbuf (copy is readonly)
  869. * @skb: Pointer to network buffer
  870. *
  871. * if GFP_ATOMIC is overkill then we can check whether its
  872. * called from interrupt context and then do it or else in
  873. * normal case use GFP_KERNEL
  874. *
  875. * example use "in_irq() || irqs_disabled()"
  876. *
  877. * Return: cloned skb
  878. */
  879. static inline struct sk_buff *__qdf_nbuf_clone(struct sk_buff *skb)
  880. {
  881. struct sk_buff *skb_new = NULL;
  882. skb_new = skb_clone(skb, GFP_ATOMIC);
  883. if (skb_new)
  884. __qdf_nbuf_count_inc(skb_new);
  885. return skb_new;
  886. }
  887. /**
  888. * __qdf_nbuf_copy() - returns a private copy of the skb
  889. * @skb: Pointer to network buffer
  890. *
  891. * This API returns a private copy of the skb, the skb returned is completely
  892. * modifiable by callers
  893. *
  894. * Return: skb or NULL
  895. */
  896. static inline struct sk_buff *__qdf_nbuf_copy(struct sk_buff *skb)
  897. {
  898. struct sk_buff *skb_new = NULL;
  899. skb_new = skb_copy(skb, GFP_ATOMIC);
  900. if (skb_new)
  901. __qdf_nbuf_count_inc(skb_new);
  902. return skb_new;
  903. }
  904. #define __qdf_nbuf_reserve skb_reserve
  905. /**
  906. * __qdf_nbuf_set_data_pointer() - set buffer data pointer
  907. * @skb: Pointer to network buffer
  908. * @data: data pointer
  909. *
  910. * Return: none
  911. */
  912. static inline void
  913. __qdf_nbuf_set_data_pointer(struct sk_buff *skb, uint8_t *data)
  914. {
  915. skb->data = data;
  916. }
  917. /**
  918. * __qdf_nbuf_set_len() - set buffer data length
  919. * @skb: Pointer to network buffer
  920. * @len: data length
  921. *
  922. * Return: none
  923. */
  924. static inline void
  925. __qdf_nbuf_set_len(struct sk_buff *skb, uint32_t len)
  926. {
  927. skb->len = len;
  928. }
  929. /**
  930. * __qdf_nbuf_set_tail_pointer() - set buffer data tail pointer
  931. * @skb: Pointer to network buffer
  932. * @len: skb data length
  933. *
  934. * Return: none
  935. */
  936. static inline void
  937. __qdf_nbuf_set_tail_pointer(struct sk_buff *skb, int len)
  938. {
  939. skb_set_tail_pointer(skb, len);
  940. }
  941. /**
  942. * __qdf_nbuf_unlink_no_lock() - unlink an skb from skb queue
  943. * @skb: Pointer to network buffer
  944. * @list: list to use
  945. *
  946. * This is a lockless version, driver must acquire locks if it
  947. * needs to synchronize
  948. *
  949. * Return: none
  950. */
  951. static inline void
  952. __qdf_nbuf_unlink_no_lock(struct sk_buff *skb, struct sk_buff_head *list)
  953. {
  954. __skb_unlink(skb, list);
  955. }
  956. /**
  957. * __qdf_nbuf_reset() - reset the buffer data and pointer
  958. * @buf: Network buf instance
  959. * @reserve: reserve
  960. * @align: align
  961. *
  962. * Return: none
  963. */
  964. static inline void
  965. __qdf_nbuf_reset(struct sk_buff *skb, int reserve, int align)
  966. {
  967. int offset;
  968. skb_push(skb, skb_headroom(skb));
  969. skb_put(skb, skb_tailroom(skb));
  970. memset(skb->data, 0x0, skb->len);
  971. skb_trim(skb, 0);
  972. skb_reserve(skb, NET_SKB_PAD);
  973. memset(skb->cb, 0x0, sizeof(skb->cb));
  974. /*
  975. * The default is for netbuf fragments to be interpreted
  976. * as wordstreams rather than bytestreams.
  977. */
  978. QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
  979. QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
  980. /*
  981. * Align & make sure that the tail & data are adjusted properly
  982. */
  983. if (align) {
  984. offset = ((unsigned long)skb->data) % align;
  985. if (offset)
  986. skb_reserve(skb, align - offset);
  987. }
  988. skb_reserve(skb, reserve);
  989. }
  990. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
  991. /**
  992. * qdf_nbuf_dev_scratch_is_supported() - dev_scratch support for network buffer
  993. * in kernel
  994. *
  995. * Return: true if dev_scratch is supported
  996. * false if dev_scratch is not supported
  997. */
  998. static inline bool __qdf_nbuf_is_dev_scratch_supported(void)
  999. {
  1000. return true;
  1001. }
  1002. /**
  1003. * qdf_nbuf_get_dev_scratch() - get dev_scratch of network buffer
  1004. * @skb: Pointer to network buffer
  1005. *
  1006. * Return: dev_scratch if dev_scratch supported
  1007. * 0 if dev_scratch not supported
  1008. */
  1009. static inline unsigned long __qdf_nbuf_get_dev_scratch(struct sk_buff *skb)
  1010. {
  1011. return skb->dev_scratch;
  1012. }
  1013. /**
  1014. * qdf_nbuf_set_dev_scratch() - set dev_scratch of network buffer
  1015. * @skb: Pointer to network buffer
  1016. * @value: value to be set in dev_scratch of network buffer
  1017. *
  1018. * Return: void
  1019. */
  1020. static inline void
  1021. __qdf_nbuf_set_dev_scratch(struct sk_buff *skb, unsigned long value)
  1022. {
  1023. skb->dev_scratch = value;
  1024. }
  1025. #else
  1026. static inline bool __qdf_nbuf_is_dev_scratch_supported(void)
  1027. {
  1028. return false;
  1029. }
  1030. static inline unsigned long __qdf_nbuf_get_dev_scratch(struct sk_buff *skb)
  1031. {
  1032. return 0;
  1033. }
  1034. static inline void
  1035. __qdf_nbuf_set_dev_scratch(struct sk_buff *skb, unsigned long value)
  1036. {
  1037. }
  1038. #endif /* KERNEL_VERSION(4, 14, 0) */
  1039. /**
  1040. * __qdf_nbuf_head() - return the pointer the skb's head pointer
  1041. * @skb: Pointer to network buffer
  1042. *
  1043. * Return: Pointer to head buffer
  1044. */
  1045. static inline uint8_t *__qdf_nbuf_head(struct sk_buff *skb)
  1046. {
  1047. return skb->head;
  1048. }
  1049. /**
  1050. * __qdf_nbuf_data() - return the pointer to data header in the skb
  1051. * @skb: Pointer to network buffer
  1052. *
  1053. * Return: Pointer to skb data
  1054. */
  1055. static inline uint8_t *__qdf_nbuf_data(struct sk_buff *skb)
  1056. {
  1057. return skb->data;
  1058. }
  1059. static inline uint8_t *__qdf_nbuf_data_addr(struct sk_buff *skb)
  1060. {
  1061. return (uint8_t *)&skb->data;
  1062. }
  1063. /**
  1064. * __qdf_nbuf_get_protocol() - return the protocol value of the skb
  1065. * @skb: Pointer to network buffer
  1066. *
  1067. * Return: skb protocol
  1068. */
  1069. static inline uint16_t __qdf_nbuf_get_protocol(struct sk_buff *skb)
  1070. {
  1071. return skb->protocol;
  1072. }
  1073. /**
  1074. * __qdf_nbuf_get_ip_summed() - return the ip checksum value of the skb
  1075. * @skb: Pointer to network buffer
  1076. *
  1077. * Return: skb ip_summed
  1078. */
  1079. static inline uint8_t __qdf_nbuf_get_ip_summed(struct sk_buff *skb)
  1080. {
  1081. return skb->ip_summed;
  1082. }
  1083. /**
  1084. * __qdf_nbuf_set_ip_summed() - sets the ip_summed value of the skb
  1085. * @skb: Pointer to network buffer
  1086. * @ip_summed: ip checksum
  1087. *
  1088. * Return: none
  1089. */
  1090. static inline void __qdf_nbuf_set_ip_summed(struct sk_buff *skb,
  1091. uint8_t ip_summed)
  1092. {
  1093. skb->ip_summed = ip_summed;
  1094. }
  1095. /**
  1096. * __qdf_nbuf_get_priority() - return the priority value of the skb
  1097. * @skb: Pointer to network buffer
  1098. *
  1099. * Return: skb priority
  1100. */
  1101. static inline uint32_t __qdf_nbuf_get_priority(struct sk_buff *skb)
  1102. {
  1103. return skb->priority;
  1104. }
  1105. /**
  1106. * __qdf_nbuf_set_priority() - sets the priority value of the skb
  1107. * @skb: Pointer to network buffer
  1108. * @p: priority
  1109. *
  1110. * Return: none
  1111. */
  1112. static inline void __qdf_nbuf_set_priority(struct sk_buff *skb, uint32_t p)
  1113. {
  1114. skb->priority = p;
  1115. }
  1116. /**
  1117. * __qdf_nbuf_set_next() - sets the next skb pointer of the current skb
  1118. * @skb: Current skb
  1119. * @next_skb: Next skb
  1120. *
  1121. * Return: void
  1122. */
  1123. static inline void
  1124. __qdf_nbuf_set_next(struct sk_buff *skb, struct sk_buff *skb_next)
  1125. {
  1126. skb->next = skb_next;
  1127. }
  1128. /**
  1129. * __qdf_nbuf_next() - return the next skb pointer of the current skb
  1130. * @skb: Current skb
  1131. *
  1132. * Return: the next skb pointed to by the current skb
  1133. */
  1134. static inline struct sk_buff *__qdf_nbuf_next(struct sk_buff *skb)
  1135. {
  1136. return skb->next;
  1137. }
  1138. /**
  1139. * __qdf_nbuf_set_next_ext() - sets the next skb pointer of the current skb
  1140. * @skb: Current skb
  1141. * @next_skb: Next skb
  1142. *
  1143. * This fn is used to link up extensions to the head skb. Does not handle
  1144. * linking to the head
  1145. *
  1146. * Return: none
  1147. */
  1148. static inline void
  1149. __qdf_nbuf_set_next_ext(struct sk_buff *skb, struct sk_buff *skb_next)
  1150. {
  1151. skb->next = skb_next;
  1152. }
  1153. /**
  1154. * __qdf_nbuf_next_ext() - return the next skb pointer of the current skb
  1155. * @skb: Current skb
  1156. *
  1157. * Return: the next skb pointed to by the current skb
  1158. */
  1159. static inline struct sk_buff *__qdf_nbuf_next_ext(struct sk_buff *skb)
  1160. {
  1161. return skb->next;
  1162. }
  1163. /**
  1164. * __qdf_nbuf_append_ext_list() - link list of packet extensions to the head
  1165. * @skb_head: head_buf nbuf holding head segment (single)
  1166. * @ext_list: nbuf list holding linked extensions to the head
  1167. * @ext_len: Total length of all buffers in the extension list
  1168. *
  1169. * This function is used to link up a list of packet extensions (seg1, 2,* ...)
  1170. * to the nbuf holding the head segment (seg0)
  1171. *
  1172. * Return: none
  1173. */
  1174. static inline void
  1175. __qdf_nbuf_append_ext_list(struct sk_buff *skb_head,
  1176. struct sk_buff *ext_list, size_t ext_len)
  1177. {
  1178. skb_shinfo(skb_head)->frag_list = ext_list;
  1179. skb_head->data_len = ext_len;
  1180. skb_head->len += skb_head->data_len;
  1181. }
  1182. /**
  1183. * __qdf_nbuf_get_ext_list() - Get the link to extended nbuf list.
  1184. * @head_buf: Network buf holding head segment (single)
  1185. *
  1186. * This ext_list is populated when we have Jumbo packet, for example in case of
  1187. * monitor mode amsdu packet reception, and are stiched using frags_list.
  1188. *
  1189. * Return: Network buf list holding linked extensions from head buf.
  1190. */
  1191. static inline struct sk_buff *__qdf_nbuf_get_ext_list(struct sk_buff *head_buf)
  1192. {
  1193. return (skb_shinfo(head_buf)->frag_list);
  1194. }
  1195. /**
  1196. * __qdf_nbuf_get_age() - return the checksum value of the skb
  1197. * @skb: Pointer to network buffer
  1198. *
  1199. * Return: checksum value
  1200. */
  1201. static inline uint32_t __qdf_nbuf_get_age(struct sk_buff *skb)
  1202. {
  1203. return skb->csum;
  1204. }
  1205. /**
  1206. * __qdf_nbuf_set_age() - sets the checksum value of the skb
  1207. * @skb: Pointer to network buffer
  1208. * @v: Value
  1209. *
  1210. * Return: none
  1211. */
  1212. static inline void __qdf_nbuf_set_age(struct sk_buff *skb, uint32_t v)
  1213. {
  1214. skb->csum = v;
  1215. }
  1216. /**
  1217. * __qdf_nbuf_adj_age() - adjusts the checksum/age value of the skb
  1218. * @skb: Pointer to network buffer
  1219. * @adj: Adjustment value
  1220. *
  1221. * Return: none
  1222. */
  1223. static inline void __qdf_nbuf_adj_age(struct sk_buff *skb, uint32_t adj)
  1224. {
  1225. skb->csum -= adj;
  1226. }
  1227. /**
  1228. * __qdf_nbuf_copy_bits() - return the length of the copy bits for skb
  1229. * @skb: Pointer to network buffer
  1230. * @offset: Offset value
  1231. * @len: Length
  1232. * @to: Destination pointer
  1233. *
  1234. * Return: length of the copy bits for skb
  1235. */
  1236. static inline int32_t
  1237. __qdf_nbuf_copy_bits(struct sk_buff *skb, int32_t offset, int32_t len, void *to)
  1238. {
  1239. return skb_copy_bits(skb, offset, to, len);
  1240. }
  1241. /**
  1242. * __qdf_nbuf_set_pktlen() - sets the length of the skb and adjust the tail
  1243. * @skb: Pointer to network buffer
  1244. * @len: Packet length
  1245. *
  1246. * Return: none
  1247. */
  1248. static inline void __qdf_nbuf_set_pktlen(struct sk_buff *skb, uint32_t len)
  1249. {
  1250. if (skb->len > len) {
  1251. skb_trim(skb, len);
  1252. } else {
  1253. if (skb_tailroom(skb) < len - skb->len) {
  1254. if (unlikely(pskb_expand_head(skb, 0,
  1255. len - skb->len - skb_tailroom(skb),
  1256. GFP_ATOMIC))) {
  1257. dev_kfree_skb_any(skb);
  1258. qdf_assert(0);
  1259. }
  1260. }
  1261. skb_put(skb, (len - skb->len));
  1262. }
  1263. }
  1264. /**
  1265. * __qdf_nbuf_set_protocol() - sets the protocol value of the skb
  1266. * @skb: Pointer to network buffer
  1267. * @protocol: Protocol type
  1268. *
  1269. * Return: none
  1270. */
  1271. static inline void
  1272. __qdf_nbuf_set_protocol(struct sk_buff *skb, uint16_t protocol)
  1273. {
  1274. skb->protocol = protocol;
  1275. }
  1276. #define __qdf_nbuf_set_tx_htt2_frm(skb, candi) \
  1277. (QDF_NBUF_CB_TX_HL_HTT2_FRM(skb) = (candi))
  1278. #define __qdf_nbuf_get_tx_htt2_frm(skb) \
  1279. QDF_NBUF_CB_TX_HL_HTT2_FRM(skb)
  1280. void __qdf_dmaaddr_to_32s(qdf_dma_addr_t dmaaddr,
  1281. uint32_t *lo, uint32_t *hi);
  1282. uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
  1283. struct qdf_tso_info_t *tso_info);
  1284. void __qdf_nbuf_unmap_tso_segment(qdf_device_t osdev,
  1285. struct qdf_tso_seg_elem_t *tso_seg,
  1286. bool is_last_seg);
  1287. #ifdef FEATURE_TSO
  1288. /**
  1289. * __qdf_nbuf_get_tcp_payload_len() - function to return the tcp
  1290. * payload len
  1291. * @skb: buffer
  1292. *
  1293. * Return: size
  1294. */
  1295. size_t __qdf_nbuf_get_tcp_payload_len(struct sk_buff *skb);
  1296. uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb);
  1297. #else
  1298. static inline
  1299. size_t __qdf_nbuf_get_tcp_payload_len(struct sk_buff *skb)
  1300. {
  1301. return 0;
  1302. }
  1303. static inline uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
  1304. {
  1305. return 0;
  1306. }
  1307. #endif /* FEATURE_TSO */
  1308. static inline bool __qdf_nbuf_is_tso(struct sk_buff *skb)
  1309. {
  1310. if (skb_is_gso(skb) &&
  1311. (skb_is_gso_v6(skb) ||
  1312. (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)))
  1313. return true;
  1314. else
  1315. return false;
  1316. }
  1317. struct sk_buff *__qdf_nbuf_inc_users(struct sk_buff *skb);
  1318. int __qdf_nbuf_get_users(struct sk_buff *skb);
  1319. /**
  1320. * __qdf_nbuf_tx_info_get() - Modify pkt_type, set pkt_subtype,
  1321. * and get hw_classify by peeking
  1322. * into packet
  1323. * @nbuf: Network buffer (skb on Linux)
  1324. * @pkt_type: Pkt type (from enum htt_pkt_type)
  1325. * @pkt_subtype: Bit 4 of this field in HTT descriptor
  1326. * needs to be set in case of CE classification support
  1327. * Is set by this macro.
  1328. * @hw_classify: This is a flag which is set to indicate
  1329. * CE classification is enabled.
  1330. * Do not set this bit for VLAN packets
  1331. * OR for mcast / bcast frames.
  1332. *
  1333. * This macro parses the payload to figure out relevant Tx meta-data e.g.
  1334. * whether to enable tx_classify bit in CE.
  1335. *
  1336. * Overrides pkt_type only if required for 802.3 frames (original ethernet)
  1337. * If protocol is less than ETH_P_802_3_MIN (0x600), then
  1338. * it is the length and a 802.3 frame else it is Ethernet Type II
  1339. * (RFC 894).
  1340. * Bit 4 in pkt_subtype is the tx_classify bit
  1341. *
  1342. * Return: void
  1343. */
  1344. #define __qdf_nbuf_tx_info_get(skb, pkt_type, \
  1345. pkt_subtype, hw_classify) \
  1346. do { \
  1347. struct ethhdr *eh = (struct ethhdr *)skb->data; \
  1348. uint16_t ether_type = ntohs(eh->h_proto); \
  1349. bool is_mc_bc; \
  1350. \
  1351. is_mc_bc = is_broadcast_ether_addr((uint8_t *)eh) || \
  1352. is_multicast_ether_addr((uint8_t *)eh); \
  1353. \
  1354. if (likely((ether_type != ETH_P_8021Q) && !is_mc_bc)) { \
  1355. hw_classify = 1; \
  1356. pkt_subtype = 0x01 << \
  1357. HTT_TX_CLASSIFY_BIT_S; \
  1358. } \
  1359. \
  1360. if (unlikely(ether_type < ETH_P_802_3_MIN)) \
  1361. pkt_type = htt_pkt_type_ethernet; \
  1362. \
  1363. } while (0)
  1364. /**
  1365. * nbuf private buffer routines
  1366. */
  1367. /**
  1368. * __qdf_nbuf_peek_header() - return the header's addr & m_len
  1369. * @skb: Pointer to network buffer
  1370. * @addr: Pointer to store header's addr
  1371. * @m_len: network buffer length
  1372. *
  1373. * Return: none
  1374. */
  1375. static inline void
  1376. __qdf_nbuf_peek_header(struct sk_buff *skb, uint8_t **addr, uint32_t *len)
  1377. {
  1378. *addr = skb->data;
  1379. *len = skb->len;
  1380. }
  1381. /**
  1382. * typedef struct __qdf_nbuf_queue_t - network buffer queue
  1383. * @head: Head pointer
  1384. * @tail: Tail pointer
  1385. * @qlen: Queue length
  1386. */
  1387. typedef struct __qdf_nbuf_qhead {
  1388. struct sk_buff *head;
  1389. struct sk_buff *tail;
  1390. unsigned int qlen;
  1391. } __qdf_nbuf_queue_t;
  1392. /******************Functions *************/
  1393. /**
  1394. * __qdf_nbuf_queue_init() - initiallize the queue head
  1395. * @qhead: Queue head
  1396. *
  1397. * Return: QDF status
  1398. */
  1399. static inline QDF_STATUS __qdf_nbuf_queue_init(__qdf_nbuf_queue_t *qhead)
  1400. {
  1401. memset(qhead, 0, sizeof(struct __qdf_nbuf_qhead));
  1402. return QDF_STATUS_SUCCESS;
  1403. }
  1404. /**
  1405. * __qdf_nbuf_queue_add() - add an skb in the tail of the queue
  1406. * @qhead: Queue head
  1407. * @skb: Pointer to network buffer
  1408. *
  1409. * This is a lockless version, driver must acquire locks if it
  1410. * needs to synchronize
  1411. *
  1412. * Return: none
  1413. */
  1414. static inline void
  1415. __qdf_nbuf_queue_add(__qdf_nbuf_queue_t *qhead, struct sk_buff *skb)
  1416. {
  1417. skb->next = NULL; /*Nullify the next ptr */
  1418. if (!qhead->head)
  1419. qhead->head = skb;
  1420. else
  1421. qhead->tail->next = skb;
  1422. qhead->tail = skb;
  1423. qhead->qlen++;
  1424. }
  1425. /**
  1426. * __qdf_nbuf_queue_append() - Append src list at the end of dest list
  1427. * @dest: target netbuf queue
  1428. * @src: source netbuf queue
  1429. *
  1430. * Return: target netbuf queue
  1431. */
  1432. static inline __qdf_nbuf_queue_t *
  1433. __qdf_nbuf_queue_append(__qdf_nbuf_queue_t *dest, __qdf_nbuf_queue_t *src)
  1434. {
  1435. if (!dest)
  1436. return NULL;
  1437. else if (!src || !(src->head))
  1438. return dest;
  1439. if (!(dest->head))
  1440. dest->head = src->head;
  1441. else
  1442. dest->tail->next = src->head;
  1443. dest->tail = src->tail;
  1444. dest->qlen += src->qlen;
  1445. return dest;
  1446. }
  1447. /**
  1448. * __qdf_nbuf_queue_insert_head() - add an skb at the head of the queue
  1449. * @qhead: Queue head
  1450. * @skb: Pointer to network buffer
  1451. *
  1452. * This is a lockless version, driver must acquire locks if it needs to
  1453. * synchronize
  1454. *
  1455. * Return: none
  1456. */
  1457. static inline void
  1458. __qdf_nbuf_queue_insert_head(__qdf_nbuf_queue_t *qhead, __qdf_nbuf_t skb)
  1459. {
  1460. if (!qhead->head) {
  1461. /*Empty queue Tail pointer Must be updated */
  1462. qhead->tail = skb;
  1463. }
  1464. skb->next = qhead->head;
  1465. qhead->head = skb;
  1466. qhead->qlen++;
  1467. }
  1468. /**
  1469. * __qdf_nbuf_queue_remove() - remove a skb from the head of the queue
  1470. * @qhead: Queue head
  1471. *
  1472. * This is a lockless version. Driver should take care of the locks
  1473. *
  1474. * Return: skb or NULL
  1475. */
  1476. static inline
  1477. struct sk_buff *__qdf_nbuf_queue_remove(__qdf_nbuf_queue_t *qhead)
  1478. {
  1479. __qdf_nbuf_t tmp = NULL;
  1480. if (qhead->head) {
  1481. qhead->qlen--;
  1482. tmp = qhead->head;
  1483. if (qhead->head == qhead->tail) {
  1484. qhead->head = NULL;
  1485. qhead->tail = NULL;
  1486. } else {
  1487. qhead->head = tmp->next;
  1488. }
  1489. tmp->next = NULL;
  1490. }
  1491. return tmp;
  1492. }
  1493. /**
  1494. * __qdf_nbuf_queue_free() - free a queue
  1495. * @qhead: head of queue
  1496. *
  1497. * Return: QDF status
  1498. */
  1499. static inline QDF_STATUS
  1500. __qdf_nbuf_queue_free(__qdf_nbuf_queue_t *qhead)
  1501. {
  1502. __qdf_nbuf_t buf = NULL;
  1503. while ((buf = __qdf_nbuf_queue_remove(qhead)) != NULL)
  1504. __qdf_nbuf_free(buf);
  1505. return QDF_STATUS_SUCCESS;
  1506. }
  1507. /**
  1508. * __qdf_nbuf_queue_first() - returns the first skb in the queue
  1509. * @qhead: head of queue
  1510. *
  1511. * Return: NULL if the queue is empty
  1512. */
  1513. static inline struct sk_buff *
  1514. __qdf_nbuf_queue_first(__qdf_nbuf_queue_t *qhead)
  1515. {
  1516. return qhead->head;
  1517. }
  1518. /**
  1519. * __qdf_nbuf_queue_last() - returns the last skb in the queue
  1520. * @qhead: head of queue
  1521. *
  1522. * Return: NULL if the queue is empty
  1523. */
  1524. static inline struct sk_buff *
  1525. __qdf_nbuf_queue_last(__qdf_nbuf_queue_t *qhead)
  1526. {
  1527. return qhead->tail;
  1528. }
  1529. /**
  1530. * __qdf_nbuf_queue_len() - return the queue length
  1531. * @qhead: Queue head
  1532. *
  1533. * Return: Queue length
  1534. */
  1535. static inline uint32_t __qdf_nbuf_queue_len(__qdf_nbuf_queue_t *qhead)
  1536. {
  1537. return qhead->qlen;
  1538. }
  1539. /**
  1540. * __qdf_nbuf_queue_next() - return the next skb from packet chain
  1541. * @skb: Pointer to network buffer
  1542. *
  1543. * This API returns the next skb from packet chain, remember the skb is
  1544. * still in the queue
  1545. *
  1546. * Return: NULL if no packets are there
  1547. */
  1548. static inline struct sk_buff *__qdf_nbuf_queue_next(struct sk_buff *skb)
  1549. {
  1550. return skb->next;
  1551. }
  1552. /**
  1553. * __qdf_nbuf_is_queue_empty() - check if the queue is empty or not
  1554. * @qhead: Queue head
  1555. *
  1556. * Return: true if length is 0 else false
  1557. */
  1558. static inline bool __qdf_nbuf_is_queue_empty(__qdf_nbuf_queue_t *qhead)
  1559. {
  1560. return qhead->qlen == 0;
  1561. }
  1562. /*
  1563. * Use sk_buff_head as the implementation of qdf_nbuf_queue_t.
  1564. * Because the queue head will most likely put in some structure,
  1565. * we don't use pointer type as the definition.
  1566. */
  1567. /*
  1568. * Use sk_buff_head as the implementation of qdf_nbuf_queue_t.
  1569. * Because the queue head will most likely put in some structure,
  1570. * we don't use pointer type as the definition.
  1571. */
  1572. static inline void
  1573. __qdf_nbuf_set_send_complete_flag(struct sk_buff *skb, bool flag)
  1574. {
  1575. }
  1576. /**
  1577. * __qdf_nbuf_realloc_headroom() - This keeps the skb shell intact
  1578. * expands the headroom
  1579. * in the data region. In case of failure the skb is released.
  1580. * @skb: sk buff
  1581. * @headroom: size of headroom
  1582. *
  1583. * Return: skb or NULL
  1584. */
  1585. static inline struct sk_buff *
  1586. __qdf_nbuf_realloc_headroom(struct sk_buff *skb, uint32_t headroom)
  1587. {
  1588. if (pskb_expand_head(skb, headroom, 0, GFP_ATOMIC)) {
  1589. dev_kfree_skb_any(skb);
  1590. skb = NULL;
  1591. }
  1592. return skb;
  1593. }
  1594. /**
  1595. * __qdf_nbuf_realloc_tailroom() - This keeps the skb shell intact
  1596. * exapnds the tailroom
  1597. * in data region. In case of failure it releases the skb.
  1598. * @skb: sk buff
  1599. * @tailroom: size of tailroom
  1600. *
  1601. * Return: skb or NULL
  1602. */
  1603. static inline struct sk_buff *
  1604. __qdf_nbuf_realloc_tailroom(struct sk_buff *skb, uint32_t tailroom)
  1605. {
  1606. if (likely(!pskb_expand_head(skb, 0, tailroom, GFP_ATOMIC)))
  1607. return skb;
  1608. /**
  1609. * unlikely path
  1610. */
  1611. dev_kfree_skb_any(skb);
  1612. return NULL;
  1613. }
  1614. /**
  1615. * __qdf_nbuf_linearize() - skb linearize
  1616. * @skb: sk buff
  1617. *
  1618. * create a version of the specified nbuf whose contents
  1619. * can be safely modified without affecting other
  1620. * users.If the nbuf is non-linear then this function
  1621. * linearize. if unable to linearize returns -ENOMEM on
  1622. * success 0 is returned
  1623. *
  1624. * Return: 0 on Success, -ENOMEM on failure is returned.
  1625. */
  1626. static inline int
  1627. __qdf_nbuf_linearize(struct sk_buff *skb)
  1628. {
  1629. return skb_linearize(skb);
  1630. }
  1631. /**
  1632. * __qdf_nbuf_unshare() - skb unshare
  1633. * @skb: sk buff
  1634. *
  1635. * create a version of the specified nbuf whose contents
  1636. * can be safely modified without affecting other
  1637. * users.If the nbuf is a clone then this function
  1638. * creates a new copy of the data. If the buffer is not
  1639. * a clone the original buffer is returned.
  1640. *
  1641. * Return: skb or NULL
  1642. */
  1643. static inline struct sk_buff *
  1644. __qdf_nbuf_unshare(struct sk_buff *skb)
  1645. {
  1646. return skb_unshare(skb, GFP_ATOMIC);
  1647. }
  1648. /**
  1649. * __qdf_nbuf_is_cloned() - test whether the nbuf is cloned or not
  1650. *@buf: sk buff
  1651. *
  1652. * Return: true/false
  1653. */
  1654. static inline bool __qdf_nbuf_is_cloned(struct sk_buff *skb)
  1655. {
  1656. return skb_cloned(skb);
  1657. }
  1658. /**
  1659. * __qdf_nbuf_pool_init() - init pool
  1660. * @net: net handle
  1661. *
  1662. * Return: QDF status
  1663. */
  1664. static inline QDF_STATUS __qdf_nbuf_pool_init(qdf_net_handle_t net)
  1665. {
  1666. return QDF_STATUS_SUCCESS;
  1667. }
  1668. /*
  1669. * adf_nbuf_pool_delete() implementation - do nothing in linux
  1670. */
  1671. #define __qdf_nbuf_pool_delete(osdev)
  1672. /**
  1673. * __qdf_nbuf_expand() - Expand both tailroom & headroom. In case of failure
  1674. * release the skb.
  1675. * @skb: sk buff
  1676. * @headroom: size of headroom
  1677. * @tailroom: size of tailroom
  1678. *
  1679. * Return: skb or NULL
  1680. */
  1681. static inline struct sk_buff *
  1682. __qdf_nbuf_expand(struct sk_buff *skb, uint32_t headroom, uint32_t tailroom)
  1683. {
  1684. if (likely(!pskb_expand_head(skb, headroom, tailroom, GFP_ATOMIC)))
  1685. return skb;
  1686. dev_kfree_skb_any(skb);
  1687. return NULL;
  1688. }
  1689. /**
  1690. * __qdf_nbuf_copy_expand() - copy and expand nbuf
  1691. * @buf: Network buf instance
  1692. * @headroom: Additional headroom to be added
  1693. * @tailroom: Additional tailroom to be added
  1694. *
  1695. * Return: New nbuf that is a copy of buf, with additional head and tailroom
  1696. * or NULL if there is no memory
  1697. */
  1698. static inline struct sk_buff *
  1699. __qdf_nbuf_copy_expand(struct sk_buff *buf, int headroom, int tailroom)
  1700. {
  1701. return skb_copy_expand(buf, headroom, tailroom, GFP_ATOMIC);
  1702. }
  1703. /**
  1704. * __qdf_nbuf_get_ref_fraglist() - get reference to fragments
  1705. * @buf: Network buf instance
  1706. *
  1707. * Return: void
  1708. */
  1709. static inline void
  1710. __qdf_nbuf_get_ref_fraglist(struct sk_buff *buf)
  1711. {
  1712. struct sk_buff *list;
  1713. skb_walk_frags(buf, list)
  1714. skb_get(list);
  1715. }
  1716. /**
  1717. * __qdf_nbuf_tx_cksum_info() - tx checksum info
  1718. *
  1719. * Return: true/false
  1720. */
  1721. static inline bool
  1722. __qdf_nbuf_tx_cksum_info(struct sk_buff *skb, uint8_t **hdr_off,
  1723. uint8_t **where)
  1724. {
  1725. qdf_assert(0);
  1726. return false;
  1727. }
  1728. /**
  1729. * __qdf_nbuf_reset_ctxt() - mem zero control block
  1730. * @nbuf: buffer
  1731. *
  1732. * Return: none
  1733. */
  1734. static inline void __qdf_nbuf_reset_ctxt(__qdf_nbuf_t nbuf)
  1735. {
  1736. qdf_mem_zero(nbuf->cb, sizeof(nbuf->cb));
  1737. }
  1738. /**
  1739. * __qdf_nbuf_network_header() - get network header
  1740. * @buf: buffer
  1741. *
  1742. * Return: network header pointer
  1743. */
  1744. static inline void *__qdf_nbuf_network_header(__qdf_nbuf_t buf)
  1745. {
  1746. return skb_network_header(buf);
  1747. }
  1748. /**
  1749. * __qdf_nbuf_transport_header() - get transport header
  1750. * @buf: buffer
  1751. *
  1752. * Return: transport header pointer
  1753. */
  1754. static inline void *__qdf_nbuf_transport_header(__qdf_nbuf_t buf)
  1755. {
  1756. return skb_transport_header(buf);
  1757. }
  1758. /**
  1759. * __qdf_nbuf_tcp_tso_size() - return the size of TCP segment size (MSS),
  1760. * passed as part of network buffer by network stack
  1761. * @skb: sk buff
  1762. *
  1763. * Return: TCP MSS size
  1764. *
  1765. */
  1766. static inline size_t __qdf_nbuf_tcp_tso_size(struct sk_buff *skb)
  1767. {
  1768. return skb_shinfo(skb)->gso_size;
  1769. }
  1770. /**
  1771. * __qdf_nbuf_init() - Re-initializes the skb for re-use
  1772. * @nbuf: sk buff
  1773. *
  1774. * Return: none
  1775. */
  1776. void __qdf_nbuf_init(__qdf_nbuf_t nbuf);
  1777. /*
  1778. * __qdf_nbuf_get_cb() - returns a pointer to skb->cb
  1779. * @nbuf: sk buff
  1780. *
  1781. * Return: void ptr
  1782. */
  1783. static inline void *
  1784. __qdf_nbuf_get_cb(__qdf_nbuf_t nbuf)
  1785. {
  1786. return (void *)nbuf->cb;
  1787. }
  1788. /**
  1789. * __qdf_nbuf_headlen() - return the length of linear buffer of the skb
  1790. * @skb: sk buff
  1791. *
  1792. * Return: head size
  1793. */
  1794. static inline size_t
  1795. __qdf_nbuf_headlen(struct sk_buff *skb)
  1796. {
  1797. return skb_headlen(skb);
  1798. }
  1799. /**
  1800. * __qdf_nbuf_get_nr_frags() - return the number of fragments in an skb,
  1801. * @skb: sk buff
  1802. *
  1803. * Return: number of fragments
  1804. */
  1805. static inline size_t __qdf_nbuf_get_nr_frags(struct sk_buff *skb)
  1806. {
  1807. return skb_shinfo(skb)->nr_frags;
  1808. }
  1809. /**
  1810. * __qdf_nbuf_tso_tcp_v4() - to check if the TSO TCP pkt is a IPv4 or not.
  1811. * @buf: sk buff
  1812. *
  1813. * Return: true/false
  1814. */
  1815. static inline bool __qdf_nbuf_tso_tcp_v4(struct sk_buff *skb)
  1816. {
  1817. return skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4 ? 1 : 0;
  1818. }
  1819. /**
  1820. * __qdf_nbuf_tso_tcp_v6() - to check if the TSO TCP pkt is a IPv6 or not.
  1821. * @buf: sk buff
  1822. *
  1823. * Return: true/false
  1824. */
  1825. static inline bool __qdf_nbuf_tso_tcp_v6(struct sk_buff *skb)
  1826. {
  1827. return skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6 ? 1 : 0;
  1828. }
  1829. /**
  1830. * __qdf_nbuf_l2l3l4_hdr_len() - return the l2+l3+l4 hdr length of the skb
  1831. * @skb: sk buff
  1832. *
  1833. * Return: size of l2+l3+l4 header length
  1834. */
  1835. static inline size_t __qdf_nbuf_l2l3l4_hdr_len(struct sk_buff *skb)
  1836. {
  1837. return skb_transport_offset(skb) + tcp_hdrlen(skb);
  1838. }
  1839. /**
  1840. * __qdf_nbuf_is_nonlinear() - test whether the nbuf is nonlinear or not
  1841. * @buf: sk buff
  1842. *
  1843. * Return: true/false
  1844. */
  1845. static inline bool __qdf_nbuf_is_nonlinear(struct sk_buff *skb)
  1846. {
  1847. if (skb_is_nonlinear(skb))
  1848. return true;
  1849. else
  1850. return false;
  1851. }
  1852. /**
  1853. * __qdf_nbuf_tcp_seq() - get the TCP sequence number of the skb
  1854. * @buf: sk buff
  1855. *
  1856. * Return: TCP sequence number
  1857. */
  1858. static inline uint32_t __qdf_nbuf_tcp_seq(struct sk_buff *skb)
  1859. {
  1860. return ntohl(tcp_hdr(skb)->seq);
  1861. }
  1862. /**
  1863. * __qdf_nbuf_get_priv_ptr() - get the priv pointer from the nbuf'f private space
  1864. *@buf: sk buff
  1865. *
  1866. * Return: data pointer to typecast into your priv structure
  1867. */
  1868. static inline uint8_t *
  1869. __qdf_nbuf_get_priv_ptr(struct sk_buff *skb)
  1870. {
  1871. return &skb->cb[8];
  1872. }
  1873. /**
  1874. * __qdf_nbuf_mark_wakeup_frame() - mark wakeup frame.
  1875. * @buf: Pointer to nbuf
  1876. *
  1877. * Return: None
  1878. */
  1879. static inline void
  1880. __qdf_nbuf_mark_wakeup_frame(__qdf_nbuf_t buf)
  1881. {
  1882. buf->mark |= QDF_MARK_FIRST_WAKEUP_PACKET;
  1883. }
  1884. /**
  1885. * __qdf_nbuf_record_rx_queue() - set rx queue in skb
  1886. *
  1887. * @buf: sk buff
  1888. * @queue_id: Queue id
  1889. *
  1890. * Return: void
  1891. */
  1892. static inline void
  1893. __qdf_nbuf_record_rx_queue(struct sk_buff *skb, uint16_t queue_id)
  1894. {
  1895. skb_record_rx_queue(skb, queue_id);
  1896. }
  1897. /**
  1898. * __qdf_nbuf_get_queue_mapping() - get the queue mapping set by linux kernel
  1899. *
  1900. * @buf: sk buff
  1901. *
  1902. * Return: Queue mapping
  1903. */
  1904. static inline uint16_t
  1905. __qdf_nbuf_get_queue_mapping(struct sk_buff *skb)
  1906. {
  1907. return skb->queue_mapping;
  1908. }
  1909. /**
  1910. * __qdf_nbuf_set_timestamp() - set the timestamp for frame
  1911. *
  1912. * @buf: sk buff
  1913. *
  1914. * Return: void
  1915. */
  1916. static inline void
  1917. __qdf_nbuf_set_timestamp(struct sk_buff *skb)
  1918. {
  1919. __net_timestamp(skb);
  1920. }
  1921. /**
  1922. * __qdf_nbuf_get_timestamp() - get the timestamp for frame
  1923. *
  1924. * @buf: sk buff
  1925. *
  1926. * Return: timestamp stored in skb in ms
  1927. */
  1928. static inline uint64_t
  1929. __qdf_nbuf_get_timestamp(struct sk_buff *skb)
  1930. {
  1931. return ktime_to_ms(skb_get_ktime(skb));
  1932. }
  1933. /**
  1934. * __qdf_nbuf_get_timedelta_ms() - get time difference in ms
  1935. *
  1936. * @buf: sk buff
  1937. *
  1938. * Return: time difference in ms
  1939. */
  1940. static inline uint64_t
  1941. __qdf_nbuf_get_timedelta_ms(struct sk_buff *skb)
  1942. {
  1943. return ktime_to_ms(net_timedelta(skb->tstamp));
  1944. }
  1945. /**
  1946. * __qdf_nbuf_get_timedelta_us() - get time difference in micro seconds
  1947. *
  1948. * @buf: sk buff
  1949. *
  1950. * Return: time difference in micro seconds
  1951. */
  1952. static inline uint64_t
  1953. __qdf_nbuf_get_timedelta_us(struct sk_buff *skb)
  1954. {
  1955. return ktime_to_us(net_timedelta(skb->tstamp));
  1956. }
  1957. /**
  1958. * __qdf_nbuf_orphan() - orphan a nbuf
  1959. * @skb: sk buff
  1960. *
  1961. * If a buffer currently has an owner then we call the
  1962. * owner's destructor function
  1963. *
  1964. * Return: void
  1965. */
  1966. static inline void __qdf_nbuf_orphan(struct sk_buff *skb)
  1967. {
  1968. return skb_orphan(skb);
  1969. }
  1970. /**
  1971. * __qdf_nbuf_map_nbytes_single() - map nbytes
  1972. * @osdev: os device
  1973. * @buf: buffer
  1974. * @dir: direction
  1975. * @nbytes: number of bytes
  1976. *
  1977. * Return: QDF_STATUS
  1978. */
  1979. #ifdef A_SIMOS_DEVHOST
  1980. static inline QDF_STATUS __qdf_nbuf_map_nbytes_single(
  1981. qdf_device_t osdev, struct sk_buff *buf,
  1982. qdf_dma_dir_t dir, int nbytes)
  1983. {
  1984. qdf_dma_addr_t paddr;
  1985. QDF_NBUF_CB_PADDR(buf) = paddr = buf->data;
  1986. return QDF_STATUS_SUCCESS;
  1987. }
  1988. #else
  1989. static inline QDF_STATUS __qdf_nbuf_map_nbytes_single(
  1990. qdf_device_t osdev, struct sk_buff *buf,
  1991. qdf_dma_dir_t dir, int nbytes)
  1992. {
  1993. qdf_dma_addr_t paddr;
  1994. /* assume that the OS only provides a single fragment */
  1995. QDF_NBUF_CB_PADDR(buf) = paddr =
  1996. dma_map_single(osdev->dev, buf->data,
  1997. nbytes, __qdf_dma_dir_to_os(dir));
  1998. return dma_mapping_error(osdev->dev, paddr) ?
  1999. QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS;
  2000. }
  2001. #endif
  2002. /**
  2003. * __qdf_nbuf_unmap_nbytes_single() - unmap nbytes
  2004. * @osdev: os device
  2005. * @buf: buffer
  2006. * @dir: direction
  2007. * @nbytes: number of bytes
  2008. *
  2009. * Return: none
  2010. */
  2011. #if defined(A_SIMOS_DEVHOST)
  2012. static inline void
  2013. __qdf_nbuf_unmap_nbytes_single(qdf_device_t osdev, struct sk_buff *buf,
  2014. qdf_dma_dir_t dir, int nbytes)
  2015. {
  2016. }
  2017. #else
  2018. static inline void
  2019. __qdf_nbuf_unmap_nbytes_single(qdf_device_t osdev, struct sk_buff *buf,
  2020. qdf_dma_dir_t dir, int nbytes)
  2021. {
  2022. qdf_dma_addr_t paddr = QDF_NBUF_CB_PADDR(buf);
  2023. if (qdf_likely(paddr)) {
  2024. dma_unmap_single(osdev->dev, paddr, nbytes,
  2025. __qdf_dma_dir_to_os(dir));
  2026. return;
  2027. }
  2028. }
  2029. #endif
  2030. static inline struct sk_buff *
  2031. __qdf_nbuf_queue_head_dequeue(struct sk_buff_head *skb_queue_head)
  2032. {
  2033. return skb_dequeue(skb_queue_head);
  2034. }
  2035. static inline
  2036. uint32_t __qdf_nbuf_queue_head_qlen(struct sk_buff_head *skb_queue_head)
  2037. {
  2038. return skb_queue_head->qlen;
  2039. }
  2040. static inline
  2041. void __qdf_nbuf_queue_head_enqueue_tail(struct sk_buff_head *skb_queue_head,
  2042. struct sk_buff *skb)
  2043. {
  2044. return skb_queue_tail(skb_queue_head, skb);
  2045. }
  2046. static inline
  2047. void __qdf_nbuf_queue_head_init(struct sk_buff_head *skb_queue_head)
  2048. {
  2049. return skb_queue_head_init(skb_queue_head);
  2050. }
  2051. static inline
  2052. void __qdf_nbuf_queue_head_purge(struct sk_buff_head *skb_queue_head)
  2053. {
  2054. return skb_queue_purge(skb_queue_head);
  2055. }
  2056. /**
  2057. * __qdf_nbuf_queue_head_lock() - Acquire the skb list lock
  2058. * @head: skb list for which lock is to be acquired
  2059. *
  2060. * Return: void
  2061. */
  2062. static inline
  2063. void __qdf_nbuf_queue_head_lock(struct sk_buff_head *skb_queue_head)
  2064. {
  2065. spin_lock_bh(&skb_queue_head->lock);
  2066. }
  2067. /**
  2068. * __qdf_nbuf_queue_head_unlock() - Release the skb list lock
  2069. * @head: skb list for which lock is to be release
  2070. *
  2071. * Return: void
  2072. */
  2073. static inline
  2074. void __qdf_nbuf_queue_head_unlock(struct sk_buff_head *skb_queue_head)
  2075. {
  2076. spin_unlock_bh(&skb_queue_head->lock);
  2077. }
  2078. #ifdef CONFIG_NBUF_AP_PLATFORM
  2079. #include <i_qdf_nbuf_w.h>
  2080. #else
  2081. #include <i_qdf_nbuf_m.h>
  2082. #endif
  2083. #endif /*_I_QDF_NET_BUF_H */