hif_exec.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486
  1. /*
  2. * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include <hif_exec.h>
  19. #include <ce_main.h>
  20. #include <hif_irq_affinity.h>
  21. #include "qdf_module.h"
  22. /* mapping NAPI budget 0 to internal budget 0
  23. * NAPI budget 1 to internal budget [1,scaler -1]
  24. * NAPI budget 2 to internal budget [scaler, 2 * scaler - 1], etc
  25. */
  26. #define NAPI_BUDGET_TO_INTERNAL_BUDGET(n, s) \
  27. (((n) << (s)) - 1)
  28. #define INTERNAL_BUDGET_TO_NAPI_BUDGET(n, s) \
  29. (((n) + 1) >> (s))
  30. static struct hif_exec_context *hif_exec_tasklet_create(void);
  31. /**
  32. * hif_print_napi_stats() - print NAPI stats
  33. * @hif_ctx: hif context
  34. *
  35. * return: void
  36. */
  37. void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx)
  38. {
  39. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
  40. struct hif_exec_context *hif_ext_group;
  41. struct qca_napi_stat *napi_stats;
  42. int i, j;
  43. QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL,
  44. "NAPI[#ctx]CPU[#] |schedules |polls |completes |workdone\n");
  45. for (i = 0; i < hif_state->hif_num_extgroup; i++) {
  46. if (hif_state->hif_ext_group[i]) {
  47. hif_ext_group = hif_state->hif_ext_group[i];
  48. for (j = 0; j < num_possible_cpus(); j++) {
  49. napi_stats = &(hif_ext_group->stats[j]);
  50. if (napi_stats->napi_schedules != 0)
  51. QDF_TRACE(QDF_MODULE_ID_HIF,
  52. QDF_TRACE_LEVEL_FATAL,
  53. "NAPI[%2d]CPU[%d]: "
  54. "%7d %7d %7d %7d \n",
  55. i, j,
  56. napi_stats->napi_schedules,
  57. napi_stats->napi_polls,
  58. napi_stats->napi_completes,
  59. napi_stats->napi_workdone);
  60. }
  61. }
  62. }
  63. }
  64. qdf_export_symbol(hif_print_napi_stats);
  65. static void hif_exec_tasklet_schedule(struct hif_exec_context *ctx)
  66. {
  67. struct hif_tasklet_exec_context *t_ctx = hif_exec_get_tasklet(ctx);
  68. tasklet_schedule(&t_ctx->tasklet);
  69. }
  70. /**
  71. * hif_exec_tasklet() - grp tasklet
  72. * data: context
  73. *
  74. * return: void
  75. */
  76. static void hif_exec_tasklet_fn(unsigned long data)
  77. {
  78. struct hif_exec_context *hif_ext_group =
  79. (struct hif_exec_context *)data;
  80. struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
  81. unsigned int work_done;
  82. work_done =
  83. hif_ext_group->handler(hif_ext_group->context, HIF_MAX_BUDGET);
  84. if (hif_ext_group->work_complete(hif_ext_group, work_done)) {
  85. qdf_atomic_dec(&(scn->active_grp_tasklet_cnt));
  86. hif_ext_group->irq_enable(hif_ext_group);
  87. } else {
  88. hif_exec_tasklet_schedule(hif_ext_group);
  89. }
  90. }
  91. /**
  92. * hif_exec_poll() - grp tasklet
  93. * data: context
  94. *
  95. * return: void
  96. */
  97. static int hif_exec_poll(struct napi_struct *napi, int budget)
  98. {
  99. struct hif_napi_exec_context *exec_ctx =
  100. qdf_container_of(napi, struct hif_napi_exec_context, napi);
  101. struct hif_exec_context *hif_ext_group = &exec_ctx->exec_ctx;
  102. struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
  103. int work_done;
  104. int normalized_budget = 0;
  105. int shift = hif_ext_group->scale_bin_shift;
  106. int cpu = smp_processor_id();
  107. if (budget)
  108. normalized_budget = NAPI_BUDGET_TO_INTERNAL_BUDGET(budget, shift);
  109. work_done = hif_ext_group->handler(hif_ext_group->context,
  110. normalized_budget);
  111. if (work_done < normalized_budget) {
  112. napi_complete(napi);
  113. qdf_atomic_dec(&scn->active_grp_tasklet_cnt);
  114. hif_ext_group->irq_enable(hif_ext_group);
  115. hif_ext_group->stats[cpu].napi_completes++;
  116. } else {
  117. /* if the ext_group supports time based yield, claim full work
  118. * done anyways */
  119. work_done = normalized_budget;
  120. }
  121. hif_ext_group->stats[cpu].napi_polls++;
  122. hif_ext_group->stats[cpu].napi_workdone += work_done;
  123. /* map internal budget to NAPI budget */
  124. if (work_done)
  125. work_done = INTERNAL_BUDGET_TO_NAPI_BUDGET(work_done, shift);
  126. return work_done;
  127. }
  128. /**
  129. * hif_exec_napi_schedule() - schedule the napi exec instance
  130. * @ctx: a hif_exec_context known to be of napi type
  131. */
  132. static void hif_exec_napi_schedule(struct hif_exec_context *ctx)
  133. {
  134. struct hif_napi_exec_context *n_ctx = hif_exec_get_napi(ctx);
  135. ctx->stats[smp_processor_id()].napi_schedules++;
  136. napi_schedule(&n_ctx->napi);
  137. }
  138. /**
  139. * hif_exec_napi_kill() - stop a napi exec context from being rescheduled
  140. * @ctx: a hif_exec_context known to be of napi type
  141. */
  142. static void hif_exec_napi_kill(struct hif_exec_context *ctx)
  143. {
  144. struct hif_napi_exec_context *n_ctx = hif_exec_get_napi(ctx);
  145. int irq_ind;
  146. if (ctx->inited) {
  147. napi_disable(&n_ctx->napi);
  148. ctx->inited = 0;
  149. }
  150. for (irq_ind = 0; irq_ind < ctx->numirq; irq_ind++)
  151. hif_irq_affinity_remove(ctx->os_irq[irq_ind]);
  152. netif_napi_del(&(n_ctx->napi));
  153. }
  154. struct hif_execution_ops napi_sched_ops = {
  155. .schedule = &hif_exec_napi_schedule,
  156. .kill = &hif_exec_napi_kill,
  157. };
  158. #ifdef FEATURE_NAPI
  159. /**
  160. * hif_exec_napi_create() - allocate and initialize a napi exec context
  161. * @scale: a binary shift factor to map NAPI budget from\to internal
  162. * budget
  163. */
  164. static struct hif_exec_context *hif_exec_napi_create(uint32_t scale)
  165. {
  166. struct hif_napi_exec_context *ctx;
  167. ctx = qdf_mem_malloc(sizeof(struct hif_napi_exec_context));
  168. if (ctx == NULL)
  169. return NULL;
  170. ctx->exec_ctx.sched_ops = &napi_sched_ops;
  171. ctx->exec_ctx.inited = true;
  172. ctx->exec_ctx.scale_bin_shift = scale;
  173. init_dummy_netdev(&(ctx->netdev));
  174. netif_napi_add(&(ctx->netdev), &(ctx->napi), hif_exec_poll,
  175. QCA_NAPI_BUDGET);
  176. napi_enable(&ctx->napi);
  177. return &ctx->exec_ctx;
  178. }
  179. #else
  180. static struct hif_exec_context *hif_exec_napi_create(uint32_t scale)
  181. {
  182. HIF_WARN("%s: FEATURE_NAPI not defined, making tasklet");
  183. return hif_exec_tasklet_create();
  184. }
  185. #endif
  186. /**
  187. * hif_exec_tasklet_kill() - stop a tasklet exec context from being rescheduled
  188. * @ctx: a hif_exec_context known to be of tasklet type
  189. */
  190. static void hif_exec_tasklet_kill(struct hif_exec_context *ctx)
  191. {
  192. struct hif_tasklet_exec_context *t_ctx = hif_exec_get_tasklet(ctx);
  193. int irq_ind;
  194. if (ctx->inited) {
  195. tasklet_disable(&t_ctx->tasklet);
  196. tasklet_kill(&t_ctx->tasklet);
  197. }
  198. ctx->inited = false;
  199. for (irq_ind = 0; irq_ind < ctx->numirq; irq_ind++)
  200. hif_irq_affinity_remove(ctx->os_irq[irq_ind]);
  201. }
  202. struct hif_execution_ops tasklet_sched_ops = {
  203. .schedule = &hif_exec_tasklet_schedule,
  204. .kill = &hif_exec_tasklet_kill,
  205. };
  206. /**
  207. * hif_exec_tasklet_schedule() - allocate and initialize a tasklet exec context
  208. */
  209. static struct hif_exec_context *hif_exec_tasklet_create(void)
  210. {
  211. struct hif_tasklet_exec_context *ctx;
  212. ctx = qdf_mem_malloc(sizeof(struct hif_tasklet_exec_context));
  213. if (ctx == NULL)
  214. return NULL;
  215. ctx->exec_ctx.sched_ops = &tasklet_sched_ops;
  216. tasklet_init(&ctx->tasklet, hif_exec_tasklet_fn,
  217. (unsigned long)ctx);
  218. ctx->exec_ctx.inited = true;
  219. return &ctx->exec_ctx;
  220. }
  221. /**
  222. * hif_exec_get_ctx() - retrieve an exec context based on an id
  223. * @softc: the hif context owning the exec context
  224. * @id: the id of the exec context
  225. *
  226. * mostly added to make it easier to rename or move the context array
  227. */
  228. struct hif_exec_context *hif_exec_get_ctx(struct hif_opaque_softc *softc,
  229. uint8_t id)
  230. {
  231. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(softc);
  232. if (id < hif_state->hif_num_extgroup)
  233. return hif_state->hif_ext_group[id];
  234. return NULL;
  235. }
  236. /**
  237. * hif_configure_ext_group_interrupts() - API to configure external group
  238. * interrpts
  239. * @hif_ctx : HIF Context
  240. *
  241. * Return: status
  242. */
  243. uint32_t hif_configure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx)
  244. {
  245. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  246. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
  247. struct hif_exec_context *hif_ext_group;
  248. int i, status;
  249. if (scn->ext_grp_irq_configured) {
  250. HIF_ERROR("%s Called after ext grp irq configured\n", __func__);
  251. return QDF_STATUS_E_FAILURE;
  252. }
  253. for (i = 0; i < hif_state->hif_num_extgroup; i++) {
  254. hif_ext_group = hif_state->hif_ext_group[i];
  255. status = 0;
  256. qdf_spinlock_create(&hif_ext_group->irq_lock);
  257. if (hif_ext_group->configured &&
  258. hif_ext_group->irq_requested == false) {
  259. hif_ext_group->irq_enabled = true;
  260. status = hif_grp_irq_configure(scn, hif_ext_group);
  261. }
  262. if (status != 0) {
  263. HIF_ERROR("%s: failed for group %d", __func__, i);
  264. hif_ext_group->irq_enabled = false;
  265. }
  266. }
  267. scn->ext_grp_irq_configured = true;
  268. return QDF_STATUS_SUCCESS;
  269. }
  270. qdf_export_symbol(hif_configure_ext_group_interrupts);
  271. /**
  272. * hif_ext_group_interrupt_handler() - handler for related interrupts
  273. * @irq: irq number of the interrupt
  274. * @context: the associated hif_exec_group context
  275. *
  276. * This callback function takes care of dissabling the associated interrupts
  277. * and scheduling the expected bottom half for the exec_context.
  278. * This callback function also helps keep track of the count running contexts.
  279. */
  280. irqreturn_t hif_ext_group_interrupt_handler(int irq, void *context)
  281. {
  282. struct hif_exec_context *hif_ext_group = context;
  283. struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
  284. hif_ext_group->irq_disable(hif_ext_group);
  285. qdf_atomic_inc(&scn->active_grp_tasklet_cnt);
  286. hif_ext_group->sched_ops->schedule(hif_ext_group);
  287. return IRQ_HANDLED;
  288. }
  289. /**
  290. * hif_exec_kill() - grp tasklet kill
  291. * scn: hif_softc
  292. *
  293. * return: void
  294. */
  295. void hif_exec_kill(struct hif_opaque_softc *hif_ctx)
  296. {
  297. int i;
  298. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
  299. for (i = 0; i < hif_state->hif_num_extgroup; i++)
  300. hif_state->hif_ext_group[i]->sched_ops->kill(
  301. hif_state->hif_ext_group[i]);
  302. qdf_atomic_set(&hif_state->ol_sc.active_grp_tasklet_cnt, 0);
  303. }
  304. /**
  305. * hif_register_ext_group() - API to register external group
  306. * interrupt handler.
  307. * @hif_ctx : HIF Context
  308. * @numirq: number of irq's in the group
  309. * @irq: array of irq values
  310. * @handler: callback interrupt handler function
  311. * @cb_ctx: context to passed in callback
  312. * @type: napi vs tasklet
  313. *
  314. * Return: status
  315. */
  316. uint32_t hif_register_ext_group(struct hif_opaque_softc *hif_ctx,
  317. uint32_t numirq, uint32_t irq[], ext_intr_handler handler,
  318. void *cb_ctx, const char *context_name,
  319. enum hif_exec_type type, uint32_t scale)
  320. {
  321. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  322. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
  323. struct hif_exec_context *hif_ext_group;
  324. if (scn->ext_grp_irq_configured) {
  325. HIF_ERROR("%s Called after ext grp irq configured\n", __func__);
  326. return QDF_STATUS_E_FAILURE;
  327. }
  328. if (hif_state->hif_num_extgroup >= HIF_MAX_GROUP) {
  329. HIF_ERROR("%s Max groups reached\n", __func__);
  330. return QDF_STATUS_E_FAILURE;
  331. }
  332. if (numirq >= HIF_MAX_GRP_IRQ) {
  333. HIF_ERROR("%s invalid numirq\n", __func__);
  334. return QDF_STATUS_E_FAILURE;
  335. }
  336. hif_ext_group = hif_exec_create(type, scale);
  337. if (hif_ext_group == NULL)
  338. return QDF_STATUS_E_FAILURE;
  339. hif_state->hif_ext_group[hif_state->hif_num_extgroup] =
  340. hif_ext_group;
  341. hif_ext_group->numirq = numirq;
  342. qdf_mem_copy(&hif_ext_group->irq[0], irq, numirq * sizeof(irq[0]));
  343. hif_ext_group->context = cb_ctx;
  344. hif_ext_group->handler = handler;
  345. hif_ext_group->configured = true;
  346. hif_ext_group->grp_id = hif_state->hif_num_extgroup;
  347. hif_ext_group->hif = hif_ctx;
  348. hif_ext_group->context_name = context_name;
  349. hif_state->hif_num_extgroup++;
  350. return QDF_STATUS_SUCCESS;
  351. }
  352. qdf_export_symbol(hif_register_ext_group);
  353. /**
  354. * hif_exec_create() - create an execution context
  355. * @type: the type of execution context to create
  356. */
  357. struct hif_exec_context *hif_exec_create(enum hif_exec_type type,
  358. uint32_t scale)
  359. {
  360. HIF_INFO("%s: create exec_type %d budget %d\n",
  361. __func__, type, QCA_NAPI_BUDGET * scale);
  362. switch (type) {
  363. case HIF_EXEC_NAPI_TYPE:
  364. return hif_exec_napi_create(scale);
  365. case HIF_EXEC_TASKLET_TYPE:
  366. return hif_exec_tasklet_create();
  367. default:
  368. return NULL;
  369. }
  370. }
  371. /**
  372. * hif_exec_destroy() - free the hif_exec context
  373. * @ctx: context to free
  374. *
  375. * please kill the context before freeing it to avoid a use after free.
  376. */
  377. void hif_exec_destroy(struct hif_exec_context *ctx)
  378. {
  379. qdf_spinlock_destroy(&ctx->irq_lock);
  380. qdf_mem_free(ctx);
  381. }
  382. /**
  383. * hif_deregister_exec_group() - API to free the exec contexts
  384. * @hif_ctx: HIF context
  385. * @context_name: name of the module whose contexts need to be deregistered
  386. *
  387. * This function deregisters the contexts of the requestor identified
  388. * based on the context_name & frees the memory.
  389. *
  390. * Return: void
  391. */
  392. void hif_deregister_exec_group(struct hif_opaque_softc *hif_ctx,
  393. const char *context_name)
  394. {
  395. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  396. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
  397. struct hif_exec_context *hif_ext_group;
  398. int i;
  399. for (i = 0; i < HIF_MAX_GROUP; i++) {
  400. hif_ext_group = hif_state->hif_ext_group[i];
  401. if (!hif_ext_group)
  402. continue;
  403. HIF_INFO("%s: Deregistering grp id %d name %s\n",
  404. __func__,
  405. hif_ext_group->grp_id,
  406. hif_ext_group->context_name);
  407. if (strcmp(hif_ext_group->context_name, context_name) == 0) {
  408. hif_ext_group->sched_ops->kill(hif_ext_group);
  409. hif_state->hif_ext_group[i] = NULL;
  410. hif_exec_destroy(hif_ext_group);
  411. hif_state->hif_num_extgroup--;
  412. }
  413. }
  414. }
  415. qdf_export_symbol(hif_deregister_exec_group);