ol_cfg.c 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335
  1. /*
  2. * Copyright (c) 2011-2015 The Linux Foundation. All rights reserved.
  3. *
  4. * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  5. *
  6. *
  7. * Permission to use, copy, modify, and/or distribute this software for
  8. * any purpose with or without fee is hereby granted, provided that the
  9. * above copyright notice and this permission notice appear in all
  10. * copies.
  11. *
  12. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  13. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  14. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  15. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  16. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  17. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  18. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  19. * PERFORMANCE OF THIS SOFTWARE.
  20. */
  21. /*
  22. * This file was originally distributed by Qualcomm Atheros, Inc.
  23. * under proprietary terms before Copyright ownership was assigned
  24. * to the Linux Foundation.
  25. */
  26. #include <ol_cfg.h>
  27. #include <ol_if_athvar.h>
  28. unsigned int vow_config = 0;
  29. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  30. /**
  31. * ol_tx_set_flow_control_parameters() - set flow control parameters
  32. * @cfg_ctx: cfg context
  33. * @cfg_param: cfg parameters
  34. *
  35. * Return: none
  36. */
  37. static
  38. void ol_tx_set_flow_control_parameters(struct txrx_pdev_cfg_t *cfg_ctx,
  39. struct txrx_pdev_cfg_param_t cfg_param)
  40. {
  41. cfg_ctx->tx_flow_start_queue_offset =
  42. cfg_param.tx_flow_start_queue_offset;
  43. cfg_ctx->tx_flow_stop_queue_th =
  44. cfg_param.tx_flow_stop_queue_th;
  45. }
  46. #else
  47. static
  48. void ol_tx_set_flow_control_parameters(struct txrx_pdev_cfg_t *cfg_ctx,
  49. struct txrx_pdev_cfg_param_t cfg_param)
  50. {
  51. return;
  52. }
  53. #endif
  54. #if CFG_TGT_DEFAULT_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK
  55. static inline
  56. uint8_t ol_defrag_timeout_check(void)
  57. {
  58. return 1;
  59. }
  60. #else
  61. static inline
  62. uint8_t ol_defrag_timeout_check(void)
  63. {
  64. return 0;
  65. }
  66. #endif
  67. /* FIX THIS -
  68. * For now, all these configuration parameters are hardcoded.
  69. * Many of these should actually be determined dynamically instead.
  70. */
  71. ol_pdev_handle ol_pdev_cfg_attach(cdf_device_t osdev,
  72. struct txrx_pdev_cfg_param_t cfg_param)
  73. {
  74. struct txrx_pdev_cfg_t *cfg_ctx;
  75. cfg_ctx = cdf_mem_malloc(sizeof(*cfg_ctx));
  76. if (!cfg_ctx) {
  77. printk(KERN_ERR "cfg ctx allocation failed\n");
  78. return NULL;
  79. }
  80. /*
  81. * Need to change HTT_LL_TX_HDR_SIZE_IP accordingly.
  82. * Include payload, up to the end of UDP header for IPv4 case
  83. */
  84. cfg_ctx->tx_download_size = 16;
  85. /* temporarily diabled PN check for Riva/Pronto */
  86. cfg_ctx->rx_pn_check = 1;
  87. cfg_ctx->defrag_timeout_check = ol_defrag_timeout_check();
  88. cfg_ctx->max_peer_id = 511;
  89. cfg_ctx->max_vdev = CFG_TGT_NUM_VDEV;
  90. cfg_ctx->pn_rx_fwd_check = 1;
  91. cfg_ctx->frame_type = wlan_frm_fmt_802_3;
  92. cfg_ctx->max_thruput_mbps = 800;
  93. cfg_ctx->max_nbuf_frags = 1;
  94. cfg_ctx->vow_config = vow_config;
  95. cfg_ctx->target_tx_credit = CFG_TGT_NUM_MSDU_DESC;
  96. cfg_ctx->throttle_period_ms = 40;
  97. cfg_ctx->rx_fwd_disabled = 0;
  98. cfg_ctx->is_packet_log_enabled = 0;
  99. cfg_ctx->is_full_reorder_offload = cfg_param.is_full_reorder_offload;
  100. cfg_ctx->ipa_uc_rsc.uc_offload_enabled =
  101. cfg_param.is_uc_offload_enabled;
  102. cfg_ctx->ipa_uc_rsc.tx_max_buf_cnt = cfg_param.uc_tx_buffer_count;
  103. cfg_ctx->ipa_uc_rsc.tx_buf_size = cfg_param.uc_tx_buffer_size;
  104. cfg_ctx->ipa_uc_rsc.rx_ind_ring_size =
  105. cfg_param.uc_rx_indication_ring_count;
  106. cfg_ctx->ipa_uc_rsc.tx_partition_base = cfg_param.uc_tx_partition_base;
  107. cfg_ctx->enable_rxthread = cfg_param.enable_rxthread;
  108. cfg_ctx->ip_tcp_udp_checksum_offload =
  109. cfg_param.ip_tcp_udp_checksum_offload;
  110. cfg_ctx->ce_classify_enabled = cfg_param.ce_classify_enabled;
  111. ol_tx_set_flow_control_parameters(cfg_ctx, cfg_param);
  112. return (ol_pdev_handle) cfg_ctx;
  113. }
  114. int ol_cfg_is_high_latency(ol_pdev_handle pdev)
  115. {
  116. struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
  117. return cfg->is_high_latency;
  118. }
  119. int ol_cfg_max_peer_id(ol_pdev_handle pdev)
  120. {
  121. struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
  122. /*
  123. * TBDXXX - this value must match the peer table
  124. * size allocated in FW
  125. */
  126. return cfg->max_peer_id;
  127. }
  128. int ol_cfg_max_vdevs(ol_pdev_handle pdev)
  129. {
  130. struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
  131. return cfg->max_vdev;
  132. }
  133. int ol_cfg_rx_pn_check(ol_pdev_handle pdev)
  134. {
  135. struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
  136. return cfg->rx_pn_check;
  137. }
  138. int ol_cfg_rx_fwd_check(ol_pdev_handle pdev)
  139. {
  140. struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
  141. return cfg->pn_rx_fwd_check;
  142. }
  143. void ol_set_cfg_rx_fwd_disabled(ol_pdev_handle pdev, uint8_t disable_rx_fwd)
  144. {
  145. struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
  146. cfg->rx_fwd_disabled = disable_rx_fwd;
  147. }
  148. void ol_set_cfg_packet_log_enabled(ol_pdev_handle pdev, uint8_t val)
  149. {
  150. struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
  151. cfg->is_packet_log_enabled = val;
  152. }
  153. uint8_t ol_cfg_is_packet_log_enabled(ol_pdev_handle pdev)
  154. {
  155. struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
  156. return cfg->is_packet_log_enabled;
  157. }
  158. int ol_cfg_rx_fwd_disabled(ol_pdev_handle pdev)
  159. {
  160. #if defined(ATHR_WIN_NWF)
  161. /* for Windows, let the OS handle the forwarding */
  162. return 1;
  163. #else
  164. struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
  165. return cfg->rx_fwd_disabled;
  166. #endif
  167. }
  168. int ol_cfg_rx_fwd_inter_bss(ol_pdev_handle pdev)
  169. {
  170. struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
  171. return cfg->rx_fwd_inter_bss;
  172. }
  173. enum wlan_frm_fmt ol_cfg_frame_type(ol_pdev_handle pdev)
  174. {
  175. struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
  176. return cfg->frame_type;
  177. }
  178. int ol_cfg_max_thruput_mbps(ol_pdev_handle pdev)
  179. {
  180. struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
  181. return cfg->max_thruput_mbps;
  182. }
  183. int ol_cfg_netbuf_frags_max(ol_pdev_handle pdev)
  184. {
  185. struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
  186. return cfg->max_nbuf_frags;
  187. }
  188. int ol_cfg_tx_free_at_download(ol_pdev_handle pdev)
  189. {
  190. struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
  191. return cfg->tx_free_at_download;
  192. }
  193. uint16_t ol_cfg_target_tx_credit(ol_pdev_handle pdev)
  194. {
  195. struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
  196. uint16_t rc;
  197. uint16_t vow_max_sta = (cfg->vow_config & 0xffff0000) >> 16;
  198. uint16_t vow_max_desc_persta = cfg->vow_config & 0x0000ffff;
  199. rc = (cfg->target_tx_credit + (vow_max_sta * vow_max_desc_persta));
  200. return rc;
  201. }
  202. int ol_cfg_tx_download_size(ol_pdev_handle pdev)
  203. {
  204. struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
  205. return cfg->tx_download_size;
  206. }
  207. int ol_cfg_rx_host_defrag_timeout_duplicate_check(ol_pdev_handle pdev)
  208. {
  209. struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
  210. return cfg->defrag_timeout_check;
  211. }
  212. int ol_cfg_throttle_period_ms(ol_pdev_handle pdev)
  213. {
  214. struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
  215. return cfg->throttle_period_ms;
  216. }
  217. int ol_cfg_is_full_reorder_offload(ol_pdev_handle pdev)
  218. {
  219. struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
  220. return cfg->is_full_reorder_offload;
  221. }
  222. /**
  223. * ol_cfg_is_rx_thread_enabled() - return rx_thread is enable/disable
  224. * @pdev : handle to the physical device
  225. *
  226. * Return: 1 - enable, 0 - disable
  227. */
  228. int ol_cfg_is_rx_thread_enabled(ol_pdev_handle pdev)
  229. {
  230. struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
  231. return cfg->enable_rxthread;
  232. }
  233. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  234. /**
  235. * ol_cfg_get_tx_flow_stop_queue_th() - return stop queue threshold
  236. * @pdev : handle to the physical device
  237. *
  238. * Return: stop queue threshold
  239. */
  240. int ol_cfg_get_tx_flow_stop_queue_th(ol_pdev_handle pdev)
  241. {
  242. struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
  243. return cfg->tx_flow_stop_queue_th;
  244. }
  245. /**
  246. * ol_cfg_get_tx_flow_start_queue_offset() - return start queue offset
  247. * @pdev : handle to the physical device
  248. *
  249. * Return: start queue offset
  250. */
  251. int ol_cfg_get_tx_flow_start_queue_offset(ol_pdev_handle pdev)
  252. {
  253. struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
  254. return cfg->tx_flow_start_queue_offset;
  255. }
  256. #endif
  257. #ifdef IPA_OFFLOAD
  258. unsigned int ol_cfg_ipa_uc_offload_enabled(ol_pdev_handle pdev)
  259. {
  260. struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
  261. return (unsigned int)cfg->ipa_uc_rsc.uc_offload_enabled;
  262. }
  263. unsigned int ol_cfg_ipa_uc_tx_buf_size(ol_pdev_handle pdev)
  264. {
  265. struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
  266. return cfg->ipa_uc_rsc.tx_buf_size;
  267. }
  268. unsigned int ol_cfg_ipa_uc_tx_max_buf_cnt(ol_pdev_handle pdev)
  269. {
  270. struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
  271. return cfg->ipa_uc_rsc.tx_max_buf_cnt;
  272. }
  273. unsigned int ol_cfg_ipa_uc_rx_ind_ring_size(ol_pdev_handle pdev)
  274. {
  275. struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
  276. return cfg->ipa_uc_rsc.rx_ind_ring_size;
  277. }
  278. unsigned int ol_cfg_ipa_uc_tx_partition_base(ol_pdev_handle pdev)
  279. {
  280. struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
  281. return cfg->ipa_uc_rsc.tx_partition_base;
  282. }
  283. #endif /* IPA_OFFLOAD */
  284. /**
  285. * ol_cfg_is_ce_classify_enabled() - Return if CE classification is enabled
  286. * or disabled
  287. * @pdev : handle to the physical device
  288. *
  289. * Return: 1 - enabled, 0 - disabled
  290. */
  291. bool ol_cfg_is_ce_classify_enabled(ol_pdev_handle pdev)
  292. {
  293. struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
  294. return cfg->ce_classify_enabled;
  295. }