hif_napi.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191
  1. /*
  2. * Copyright (c) 2015-2016 The Linux Foundation. All rights reserved.
  3. *
  4. * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  5. *
  6. *
  7. * Permission to use, copy, modify, and/or distribute this software for
  8. * any purpose with or without fee is hereby granted, provided that the
  9. * above copyright notice and this permission notice appear in all
  10. * copies.
  11. *
  12. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  13. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  14. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  15. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  16. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  17. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  18. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  19. * PERFORMANCE OF THIS SOFTWARE.
  20. */
  21. /*
  22. * This file was originally distributed by Qualcomm Atheros, Inc.
  23. * under proprietary terms before Copyright ownership was assigned
  24. * to the Linux Foundation.
  25. */
  26. /**
  27. * DOC: hif_napi.c
  28. *
  29. * HIF NAPI interface implementation
  30. */
  31. #include <string.h> /* memset */
  32. /* Linux headers */
  33. #include <linux/cpumask.h>
  34. #include <linux/cpufreq.h>
  35. #include <linux/cpu.h>
  36. #include <linux/topology.h>
  37. #include <linux/interrupt.h>
  38. #ifdef HELIUMPLUS
  39. #include <soc/qcom/irq-helper.h>
  40. #include <pld_snoc.h>
  41. #endif
  42. #include <linux/pm.h>
  43. /* Driver headers */
  44. #include <cds_api.h>
  45. #include <hif_napi.h>
  46. #include <hif_debug.h>
  47. #include <hif_io32.h>
  48. #include <ce_api.h>
  49. #include <ce_internal.h>
  50. enum napi_decision_vector {
  51. HIF_NAPI_NOEVENT = 0,
  52. HIF_NAPI_INITED = 1,
  53. HIF_NAPI_CONF_UP = 2
  54. };
  55. #define ENABLE_NAPI_MASK (HIF_NAPI_INITED | HIF_NAPI_CONF_UP)
  56. #ifdef HELIUMPLUS
  57. static inline int hif_get_irq_for_ce(int ce_id)
  58. {
  59. return pld_snoc_get_irq(ce_id);
  60. }
  61. #else /* HELIUMPLUS */
  62. static inline int hif_get_irq_for_ce(int ce_id)
  63. {
  64. return -EINVAL;
  65. }
  66. static int hif_napi_cpu_migrate(struct qca_napi_data *napid, int cpu,
  67. int action)
  68. {
  69. return 0;
  70. }
  71. int hif_napi_cpu_blacklist(bool is_on) { return 0; }
  72. #endif /* HELIUMPLUS */
  73. /**
  74. * hif_napi_create() - creates the NAPI structures for a given CE
  75. * @hif : pointer to hif context
  76. * @pipe_id: the CE id on which the instance will be created
  77. * @poll : poll function to be used for this NAPI instance
  78. * @budget : budget to be registered with the NAPI instance
  79. * @scale : scale factor on the weight (to scaler budget to 1000)
  80. *
  81. * Description:
  82. * Creates NAPI instances. This function is called
  83. * unconditionally during initialization. It creates
  84. * napi structures through the proper HTC/HIF calls.
  85. * The structures are disabled on creation.
  86. * Note that for each NAPI instance a separate dummy netdev is used
  87. *
  88. * Return:
  89. * < 0: error
  90. * = 0: <should never happen>
  91. * > 0: id of the created object (for multi-NAPI, number of objects created)
  92. */
  93. int hif_napi_create(struct hif_opaque_softc *hif_ctx,
  94. int (*poll)(struct napi_struct *, int),
  95. int budget,
  96. int scale)
  97. {
  98. int i;
  99. struct qca_napi_data *napid;
  100. struct qca_napi_info *napii;
  101. struct CE_state *ce_state;
  102. struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx);
  103. int rc = 0;
  104. NAPI_DEBUG("-->(budget=%d, scale=%d)",
  105. budget, scale);
  106. NAPI_DEBUG("hif->napi_data.state = 0x%08x",
  107. hif->napi_data.state);
  108. NAPI_DEBUG("hif->napi_data.ce_map = 0x%08x",
  109. hif->napi_data.ce_map);
  110. napid = &(hif->napi_data);
  111. if (0 == (napid->state & HIF_NAPI_INITED)) {
  112. memset(napid, 0, sizeof(struct qca_napi_data));
  113. spin_lock_init(&(napid->lock));
  114. napid->state |= HIF_NAPI_INITED;
  115. rc = hif_napi_cpu_init(napid);
  116. if (rc != 0) {
  117. HIF_ERROR("NAPI_initialization failed,. %d", rc);
  118. goto hnc_err;
  119. }
  120. HIF_INFO("%s: NAPI structures initialized, rc=%d",
  121. __func__, rc);
  122. }
  123. for (i = 0; i < hif->ce_count; i++) {
  124. ce_state = hif->ce_id_to_state[i];
  125. NAPI_DEBUG("ce %d: htt_rx=%d htt_tx=%d",
  126. i, ce_state->htt_rx_data,
  127. ce_state->htt_tx_data);
  128. if (!ce_state->htt_rx_data)
  129. continue;
  130. /* Now this is a CE where we need NAPI on */
  131. NAPI_DEBUG("Creating NAPI on pipe %d", i);
  132. napii = &(napid->napis[i]);
  133. memset(napii, 0, sizeof(struct qca_napi_info));
  134. napii->scale = scale;
  135. napii->id = NAPI_PIPE2ID(i);
  136. napii->hif_ctx = hif_ctx;
  137. napii->irq = hif_get_irq_for_ce(i);
  138. if (napii->irq < 0)
  139. HIF_WARN("%s: bad IRQ value for CE %d: %d",
  140. __func__, i, napii->irq);
  141. init_dummy_netdev(&(napii->netdev));
  142. NAPI_DEBUG("adding napi=%p to netdev=%p (poll=%p, bdgt=%d)",
  143. &(napii->napi), &(napii->netdev), poll, budget);
  144. netif_napi_add(&(napii->netdev), &(napii->napi), poll, budget);
  145. NAPI_DEBUG("after napi_add");
  146. NAPI_DEBUG("napi=0x%p, netdev=0x%p",
  147. &(napii->napi), &(napii->netdev));
  148. NAPI_DEBUG("napi.dev_list.prev=0x%p, next=0x%p",
  149. napii->napi.dev_list.prev,
  150. napii->napi.dev_list.next);
  151. NAPI_DEBUG("dev.napi_list.prev=0x%p, next=0x%p",
  152. napii->netdev.napi_list.prev,
  153. napii->netdev.napi_list.next);
  154. /* It is OK to change the state variable below without
  155. * protection as there should be no-one around yet
  156. */
  157. napid->ce_map |= (0x01 << i);
  158. HIF_INFO("%s: NAPI id %d created for pipe %d", __func__,
  159. napii->id, i);
  160. }
  161. NAPI_DEBUG("NAPI idscreated for pipe all applicable pipes");
  162. hnc_err:
  163. NAPI_DEBUG("<--napi_instances_map=%x]", napid->ce_map);
  164. return napid->ce_map;
  165. }
  166. /**
  167. *
  168. * hif_napi_destroy() - destroys the NAPI structures for a given instance
  169. * @hif : pointer to hif context
  170. * @ce_id : the CE id whose napi instance will be destroyed
  171. * @force : if set, will destroy even if entry is active (de-activates)
  172. *
  173. * Description:
  174. * Destroy a given NAPI instance. This function is called
  175. * unconditionally during cleanup.
  176. * Refuses to destroy an entry of it is still enabled (unless force=1)
  177. * Marks the whole napi_data invalid if all instances are destroyed.
  178. *
  179. * Return:
  180. * -EINVAL: specific entry has not been created
  181. * -EPERM : specific entry is still active
  182. * 0 < : error
  183. * 0 = : success
  184. */
  185. int hif_napi_destroy(struct hif_opaque_softc *hif_ctx,
  186. uint8_t id,
  187. int force)
  188. {
  189. uint8_t ce = NAPI_ID2PIPE(id);
  190. int rc = 0;
  191. struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx);
  192. NAPI_DEBUG("-->(id=%d, force=%d)", id, force);
  193. if (0 == (hif->napi_data.state & HIF_NAPI_INITED)) {
  194. HIF_ERROR("%s: NAPI not initialized or entry %d not created",
  195. __func__, id);
  196. rc = -EINVAL;
  197. } else if (0 == (hif->napi_data.ce_map & (0x01 << ce))) {
  198. HIF_ERROR("%s: NAPI instance %d (pipe %d) not created",
  199. __func__, id, ce);
  200. rc = -EINVAL;
  201. } else {
  202. struct qca_napi_data *napid;
  203. struct qca_napi_info *napii;
  204. napid = &(hif->napi_data);
  205. napii = &(napid->napis[ce]);
  206. if (hif->napi_data.state == HIF_NAPI_CONF_UP) {
  207. if (force) {
  208. napi_disable(&(napii->napi));
  209. HIF_INFO("%s: NAPI entry %d force disabled",
  210. __func__, id);
  211. NAPI_DEBUG("NAPI %d force disabled", id);
  212. } else {
  213. HIF_ERROR("%s: Cannot destroy active NAPI %d",
  214. __func__, id);
  215. rc = -EPERM;
  216. }
  217. }
  218. if (0 == rc) {
  219. NAPI_DEBUG("before napi_del");
  220. NAPI_DEBUG("napi.dlist.prv=0x%p, next=0x%p",
  221. napii->napi.dev_list.prev,
  222. napii->napi.dev_list.next);
  223. NAPI_DEBUG("dev.napi_l.prv=0x%p, next=0x%p",
  224. napii->netdev.napi_list.prev,
  225. napii->netdev.napi_list.next);
  226. netif_napi_del(&(napii->napi));
  227. napid->ce_map &= ~(0x01 << ce);
  228. napii->scale = 0;
  229. HIF_INFO("%s: NAPI %d destroyed\n", __func__, id);
  230. /* if there are no active instances and
  231. * if they are all destroyed,
  232. * set the whole structure to uninitialized state
  233. */
  234. if (napid->ce_map == 0) {
  235. rc = hif_napi_cpu_deinit(napid);
  236. /* caller is tolerant to receiving !=0 rc */
  237. memset(napid,
  238. 0, sizeof(struct qca_napi_data));
  239. HIF_INFO("%s: no NAPI instances. Zapped.",
  240. __func__);
  241. }
  242. }
  243. }
  244. return rc;
  245. }
  246. /**
  247. *
  248. * hif_napi_get_all() - returns the address of the whole HIF NAPI structure
  249. * @hif: pointer to hif context
  250. *
  251. * Description:
  252. * Returns the address of the whole structure
  253. *
  254. * Return:
  255. * <addr>: address of the whole HIF NAPI structure
  256. */
  257. inline struct qca_napi_data *hif_napi_get_all(struct hif_opaque_softc *hif_ctx)
  258. {
  259. struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx);
  260. return &(hif->napi_data);
  261. }
  262. /**
  263. *
  264. * hif_napi_event() - reacts to events that impact NAPI
  265. * @hif : pointer to hif context
  266. * @evnt: event that has been detected
  267. * @data: more data regarding the event
  268. *
  269. * Description:
  270. * This function handles two types of events:
  271. * 1- Events that change the state of NAPI (enabled/disabled):
  272. * {NAPI_EVT_INI_FILE, NAPI_EVT_CMD_STATE}
  273. * The state is retrievable by "hdd_napi_enabled(-1)"
  274. * - NAPI will be on if either INI file is on and it has not been disabled
  275. * by a subsequent vendor CMD,
  276. * or it has been enabled by a vendor CMD.
  277. * 2- Events that change the CPU affinity of a NAPI instance/IRQ:
  278. * {NAPI_EVT_TPUT_STATE, NAPI_EVT_CPU_STATE}
  279. * - NAPI will support a throughput mode (HI/LO), kept at napid->napi_mode
  280. * - NAPI will switch throughput mode based on hdd_napi_throughput_policy()
  281. * - In LO tput mode, NAPI will yield control if its interrupts to the system
  282. * management functions. However in HI throughput mode, NAPI will actively
  283. * manage its interrupts/instances (by trying to disperse them out to
  284. * separate performance cores).
  285. * - CPU eligibility is kept up-to-date by NAPI_EVT_CPU_STATE events.
  286. *
  287. * Return:
  288. * < 0: some error
  289. * = 0: event handled successfully
  290. */
  291. int hif_napi_event(struct hif_opaque_softc *hif_ctx, enum qca_napi_event event,
  292. void *data)
  293. {
  294. int rc = 0;
  295. uint32_t prev_state;
  296. int i;
  297. struct napi_struct *napi;
  298. struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx);
  299. struct qca_napi_data *napid = &(hif->napi_data);
  300. enum qca_napi_tput_state tput_mode = QCA_NAPI_TPUT_UNINITIALIZED;
  301. NAPI_DEBUG("%s: -->(event=%d, aux=%p)", __func__, event, data);
  302. spin_lock_bh(&(napid->lock));
  303. prev_state = napid->state;
  304. switch (event) {
  305. case NAPI_EVT_INI_FILE:
  306. case NAPI_EVT_CMD_STATE: {
  307. int on = (data != ((void *)0));
  308. HIF_INFO("%s: received evnt: CONF %s; v = %d (state=0x%0x)",
  309. __func__,
  310. (event == NAPI_EVT_INI_FILE)?".ini file":"cmd",
  311. on, prev_state);
  312. if (on)
  313. if (prev_state & HIF_NAPI_CONF_UP) {
  314. HIF_INFO("%s: duplicate NAPI conf ON msg",
  315. __func__);
  316. } else {
  317. HIF_INFO("%s: setting configuration to ON",
  318. __func__);
  319. napid->state |= HIF_NAPI_CONF_UP;
  320. }
  321. else /* off request */
  322. if (prev_state & HIF_NAPI_CONF_UP) {
  323. HIF_INFO("%s: setting configuration to OFF",
  324. __func__);
  325. napid->state &= ~HIF_NAPI_CONF_UP;
  326. } else {
  327. HIF_INFO("%s: duplicate NAPI conf OFF msg",
  328. __func__);
  329. }
  330. break;
  331. }
  332. /* case NAPI_INIT_FILE/CMD_STATE */
  333. case NAPI_EVT_CPU_STATE: {
  334. int cpu = ((unsigned long int)data >> 16);
  335. int val = ((unsigned long int)data & 0x0ff);
  336. NAPI_DEBUG("%s: evt=CPU_STATE on CPU %d value=%d",
  337. __func__, cpu, val);
  338. /* state has already been set by hnc_cpu_notify_cb */
  339. if ((val == QCA_NAPI_CPU_DOWN) &&
  340. (napid->napi_mode == QCA_NAPI_TPUT_HI) && /* we manage */
  341. (napid->napi_cpu[cpu].napis != 0)) {
  342. NAPI_DEBUG("%s: Migrating NAPIs out of cpu %d",
  343. __func__, cpu);
  344. rc = hif_napi_cpu_migrate(napid,
  345. cpu,
  346. HNC_ACT_RELOCATE);
  347. napid->napi_cpu[cpu].napis = 0;
  348. }
  349. /* in QCA_NAPI_TPUT_LO case, napis MUST == 0 */
  350. break;
  351. }
  352. case NAPI_EVT_TPUT_STATE: {
  353. tput_mode = (enum qca_napi_tput_state)data;
  354. if (tput_mode == QCA_NAPI_TPUT_LO) {
  355. /* from TPUT_HI -> TPUT_LO */
  356. NAPI_DEBUG("%s: Moving to napi_tput_LO state",
  357. __func__);
  358. rc = hif_napi_cpu_migrate(napid,
  359. HNC_ANY_CPU,
  360. HNC_ACT_COLLAPSE);
  361. } else {
  362. /* from TPUT_LO -> TPUT->HI */
  363. NAPI_DEBUG("%s: Moving to napi_tput_HI state",
  364. __func__);
  365. rc = hif_napi_cpu_migrate(napid,
  366. HNC_ANY_CPU,
  367. HNC_ACT_DISPERSE);
  368. }
  369. napid->napi_mode = tput_mode;
  370. break;
  371. }
  372. default: {
  373. HIF_ERROR("%s: unknown event: %d (data=0x%0lx)",
  374. __func__, event, (unsigned long) data);
  375. break;
  376. } /* default */
  377. }; /* switch */
  378. spin_unlock_bh(&(napid->lock));
  379. /* Call this API without spin_locks hif_napi_cpu_blacklist */
  380. if (tput_mode == QCA_NAPI_TPUT_LO)
  381. /* yield control of IRQs to kernel */
  382. hif_napi_cpu_blacklist(false);
  383. else if (tput_mode == QCA_NAPI_TPUT_HI)
  384. hif_napi_cpu_blacklist(true);
  385. if (prev_state != hif->napi_data.state) {
  386. if (hif->napi_data.state == ENABLE_NAPI_MASK) {
  387. rc = 1;
  388. for (i = 0; i < CE_COUNT_MAX; i++)
  389. if ((hif->napi_data.ce_map & (0x01 << i))) {
  390. napi = &(hif->napi_data.napis[i].napi);
  391. NAPI_DEBUG("%s: enabling NAPI %d",
  392. __func__, i);
  393. napi_enable(napi);
  394. }
  395. } else {
  396. rc = 0;
  397. for (i = 0; i < CE_COUNT_MAX; i++)
  398. if (hif->napi_data.ce_map & (0x01 << i)) {
  399. napi = &(hif->napi_data.napis[i].napi);
  400. NAPI_DEBUG("%s: disabling NAPI %d",
  401. __func__, i);
  402. napi_disable(napi);
  403. }
  404. }
  405. } else {
  406. HIF_INFO("%s: no change in hif napi state (still %d)",
  407. __func__, prev_state);
  408. }
  409. NAPI_DEBUG("<--[rc=%d]", rc);
  410. return rc;
  411. }
  412. /**
  413. * hif_napi_enabled() - checks whether NAPI is enabled for given ce or not
  414. * @hif: hif context
  415. * @ce : CE instance (or -1, to check if any CEs are enabled)
  416. *
  417. * Return: bool
  418. */
  419. int hif_napi_enabled(struct hif_opaque_softc *hif_ctx, int ce)
  420. {
  421. int rc;
  422. struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx);
  423. if (-1 == ce)
  424. rc = ((hif->napi_data.state == ENABLE_NAPI_MASK));
  425. else
  426. rc = ((hif->napi_data.state == ENABLE_NAPI_MASK) &&
  427. (hif->napi_data.ce_map & (0x01 << ce)));
  428. return rc;
  429. };
  430. /**
  431. * hif_napi_enable_irq() - enables bus interrupts after napi_complete
  432. *
  433. * @hif: hif context
  434. * @id : id of NAPI instance calling this (used to determine the CE)
  435. *
  436. * Return: void
  437. */
  438. inline void hif_napi_enable_irq(struct hif_opaque_softc *hif, int id)
  439. {
  440. struct hif_softc *scn = HIF_GET_SOFTC(hif);
  441. hif_irq_enable(scn, NAPI_ID2PIPE(id));
  442. }
  443. /**
  444. * hif_napi_schedule() - schedules napi, updates stats
  445. * @scn: hif context
  446. * @ce_id: index of napi instance
  447. *
  448. * Return: void
  449. */
  450. int hif_napi_schedule(struct hif_opaque_softc *hif_ctx, int ce_id)
  451. {
  452. int cpu = smp_processor_id();
  453. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  454. hif_record_ce_desc_event(scn, ce_id, NAPI_SCHEDULE,
  455. NULL, NULL, 0);
  456. scn->napi_data.napis[ce_id].stats[cpu].napi_schedules++;
  457. NAPI_DEBUG("scheduling napi %d (ce:%d)",
  458. scn->napi_data.napis[ce_id].id, ce_id);
  459. napi_schedule(&(scn->napi_data.napis[ce_id].napi));
  460. return true;
  461. }
  462. /**
  463. * hif_napi_poll() - NAPI poll routine
  464. * @napi : pointer to NAPI struct as kernel holds it
  465. * @budget:
  466. *
  467. * This is the body of the poll function.
  468. * The poll function is called by kernel. So, there is a wrapper
  469. * function in HDD, which in turn calls this function.
  470. * Two main reasons why the whole thing is not implemented in HDD:
  471. * a) references to things like ce_service that HDD is not aware of
  472. * b) proximity to the implementation of ce_tasklet, which the body
  473. * of this function should be very close to.
  474. *
  475. * NOTE TO THE MAINTAINER:
  476. * Consider this function and ce_tasklet very tightly coupled pairs.
  477. * Any changes to ce_tasklet or this function may likely need to be
  478. * reflected in the counterpart.
  479. *
  480. * Returns:
  481. * int: the amount of work done in this poll ( <= budget)
  482. */
  483. int hif_napi_poll(struct hif_opaque_softc *hif_ctx, struct napi_struct *napi,
  484. int budget)
  485. {
  486. int rc = 0; /* default: no work done, also takes care of error */
  487. int normalized, bucket;
  488. int cpu = smp_processor_id();
  489. struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx);
  490. struct qca_napi_info *napi_info;
  491. struct CE_state *ce_state = NULL;
  492. napi_info = (struct qca_napi_info *)
  493. container_of(napi, struct qca_napi_info, napi);
  494. NAPI_DEBUG("%s -->(napi(%d, irq=%d), budget=%d)",
  495. __func__, napi_info->id, napi_info->irq, budget);
  496. napi_info->stats[cpu].napi_polls++;
  497. hif_record_ce_desc_event(hif, NAPI_ID2PIPE(napi_info->id),
  498. NAPI_POLL_ENTER, NULL, NULL, cpu);
  499. if (unlikely(NULL == hif))
  500. QDF_ASSERT(hif != NULL); /* emit a warning if hif NULL */
  501. else {
  502. rc = ce_per_engine_service(hif, NAPI_ID2PIPE(napi_info->id));
  503. NAPI_DEBUG("%s: ce_per_engine_service processed %d msgs",
  504. __func__, rc);
  505. }
  506. napi_info->stats[cpu].napi_workdone += rc;
  507. normalized = (rc / napi_info->scale);
  508. if (NULL != hif) {
  509. ce_state = hif->ce_id_to_state[NAPI_ID2PIPE(napi_info->id)];
  510. if (ce_state && ce_state->lro_flush_cb)
  511. ce_state->lro_flush_cb(ce_state->lro_data);
  512. }
  513. /* do not return 0, if there was some work done,
  514. * even if it is below the scale
  515. */
  516. if (rc)
  517. normalized++;
  518. bucket = (normalized / QCA_NAPI_DEF_SCALE);
  519. napi_info->stats[cpu].napi_budget_uses[bucket]++;
  520. /* if ce_per engine reports 0, then poll should be terminated */
  521. if (0 == rc)
  522. NAPI_DEBUG("%s:%d: nothing processed by CE. Completing NAPI",
  523. __func__, __LINE__);
  524. if (ce_state && (!ce_check_rx_pending(ce_state) || 0 == rc)) {
  525. napi_info->stats[cpu].napi_completes++;
  526. hif_record_ce_desc_event(hif, ce_state->id, NAPI_COMPLETE,
  527. NULL, NULL, 0);
  528. if (normalized >= budget)
  529. normalized = budget - 1;
  530. /* enable interrupts */
  531. napi_complete(napi);
  532. if (NULL != hif) {
  533. hif_napi_enable_irq(hif_ctx, napi_info->id);
  534. /* support suspend/resume */
  535. qdf_atomic_dec(&(hif->active_tasklet_cnt));
  536. }
  537. NAPI_DEBUG("%s:%d: napi_complete + enabling the interrupts",
  538. __func__, __LINE__);
  539. } else {
  540. /* 4.4 kernel NAPI implementation requires drivers to
  541. * return full work when they ask to be re-scheduled,
  542. * or napi_complete and re-start with a fresh interrupt
  543. */
  544. normalized = budget;
  545. }
  546. hif_record_ce_desc_event(hif, NAPI_ID2PIPE(napi_info->id),
  547. NAPI_POLL_EXIT, NULL, NULL, normalized);
  548. NAPI_DEBUG("%s <--[normalized=%d]", __func__, normalized);
  549. return normalized;
  550. }
  551. #ifdef HELIUMPLUS
  552. /*
  553. * Local functions
  554. * - no argument checks, all internal/trusted callers
  555. */
  556. #ifdef FEATURE_NAPI_DEBUG
  557. static void hnc_dump_cpus(struct qca_napi_data *napid)
  558. {
  559. int i;
  560. struct qca_napi_cpu *cpu = napid->napi_cpu;
  561. NAPI_DEBUG("%s: NAPI CPU TABLE", __func__);
  562. NAPI_DEBUG("lilclhead=%d, bigclhead=%d",
  563. napid->lilcl_head, napid->bigcl_head);
  564. for (i = 0; i < NR_CPUS; i++) {
  565. NAPI_DEBUG("CPU[%02d]: state:%d crid=%02d clid=%02d "
  566. "crmk:0x%0lx thmk:0x%0lx frq:%d eff:%ld "
  567. "napi = 0x%08x lnk:%d",
  568. i,
  569. cpu[i].state, cpu[i].core_id, cpu[i].cluster_id,
  570. cpu[i].core_mask.bits[0],
  571. cpu[i].thread_mask.bits[0],
  572. cpu[i].max_freq, cpu[i].efficiency, cpu[i].napis,
  573. cpu[i].cluster_nxt);
  574. }
  575. /* return; -- Linus does not like it, I do. */
  576. }
  577. #else
  578. static void hnc_dump_cpus(struct qca_napi_data *napid) { /* no-op */ };
  579. #endif /* FEATURE_NAPI_DEBUG */
  580. /**
  581. * hnc_link_clusters() - partitions to cpu table into clusters
  582. * @napid: pointer to NAPI data
  583. *
  584. * Takes in a CPU topology table and builds two linked lists
  585. * (big cluster cores, list-head at bigcl_head, and little cluster
  586. * cores, list-head at lilcl_head) out of it.
  587. *
  588. * If there are more than two clusters:
  589. * - bigcl_head and lilcl_head will be different,
  590. * - the cluster with highest cpufreq will be considered the "big" cluster.
  591. * If there are more than one with the highest frequency, the *last* of such
  592. * clusters will be designated as the "big cluster"
  593. * - the cluster with lowest cpufreq will be considered the "li'l" cluster.
  594. * If there are more than one clusters with the lowest cpu freq, the *first*
  595. * of such clusters will be designated as the "little cluster"
  596. * - We only support up to 32 clusters
  597. * Return: 0 : OK
  598. * !0: error (at least one of lil/big clusters could not be found)
  599. */
  600. #define HNC_MIN_CLUSTER 0
  601. #define HNC_MAX_CLUSTER 31
  602. static int hnc_link_clusters(struct qca_napi_data *napid)
  603. {
  604. int rc = 0;
  605. int i;
  606. int it = 0;
  607. uint32_t cl_done = 0x0;
  608. int cl, curcl, curclhead;
  609. int more;
  610. unsigned int lilfrq = INT_MAX;
  611. unsigned int bigfrq = 0;
  612. unsigned int clfrq;
  613. unsigned long cleff;
  614. int prev;
  615. struct qca_napi_cpu *cpus = napid->napi_cpu;
  616. napid->lilcl_head = napid->bigcl_head = -1;
  617. do {
  618. more = 0;
  619. it++; curcl = -1;
  620. for (i = 0; i < NR_CPUS; i++) {
  621. cl = cpus[i].cluster_id;
  622. NAPI_DEBUG("Processing cpu[%d], cluster=%d\n",
  623. i, cl);
  624. if ((cl < HNC_MIN_CLUSTER) || (cl > HNC_MAX_CLUSTER)) {
  625. NAPI_DEBUG("Bad cluster (%d). SKIPPED\n", cl);
  626. QDF_ASSERT(0);
  627. /* continue if ASSERTs are disabled */
  628. continue;
  629. };
  630. if (cpumask_weight(&(cpus[i].core_mask)) == 0) {
  631. NAPI_DEBUG("Core mask 0. SKIPPED\n");
  632. continue;
  633. }
  634. if (cl_done & (0x01 << cl)) {
  635. NAPI_DEBUG("Cluster already processed. "
  636. "SKIPPED\n");
  637. continue;
  638. } else {
  639. if (more == 0) {
  640. more = 1;
  641. curcl = cl;
  642. curclhead = i; /* row */
  643. clfrq = cpus[i].max_freq;
  644. cleff = cpus[i].efficiency;
  645. prev = -1;
  646. };
  647. if ((curcl >= 0) && (curcl != cl)) {
  648. NAPI_DEBUG("Entry cl(%d) != curcl(%d). "
  649. "SKIPPED\n",
  650. cl, curcl);
  651. continue;
  652. }
  653. if (cpus[i].efficiency != cleff)
  654. NAPI_DEBUG("WARN: ef(%ld)!=clef(%ld)\n",
  655. cpus[i].efficiency, cleff);
  656. if (cpus[i].max_freq != clfrq)
  657. NAPI_DEBUG("WARN: frq(%d)!=clfrq(%d)\n",
  658. cpus[i].max_freq, clfrq);
  659. if (clfrq >= bigfrq) {
  660. bigfrq = clfrq;
  661. napid->bigcl_head = curclhead;
  662. NAPI_DEBUG("bigcl=%d\n", curclhead);
  663. }
  664. if (clfrq < lilfrq) {
  665. lilfrq = clfrq;
  666. napid->lilcl_head = curclhead;
  667. NAPI_DEBUG("lilcl=%d\n", curclhead);
  668. }
  669. if (prev != -1)
  670. cpus[prev].cluster_nxt = i;
  671. prev = i;
  672. }
  673. }
  674. if (curcl >= 0)
  675. cl_done |= (0x01 << curcl);
  676. } while (more);
  677. if (qdf_unlikely((napid->lilcl_head < 0) && (napid->bigcl_head < 0)))
  678. rc = -EFAULT;
  679. hnc_dump_cpus(napid); /* if NAPI_DEBUG */
  680. return rc;
  681. }
  682. #undef HNC_MIN_CLUSTER
  683. #undef HNC_MAX_CLUSTER
  684. /*
  685. * hotplug function group
  686. */
  687. /**
  688. * hnc_cpu_notify_cb() - handles CPU hotplug events
  689. *
  690. * On transitions to online, we onlu handle the ONLINE event,
  691. * and ignore the PREP events, because we dont want to act too
  692. * early.
  693. * On transtion to offline, we act on PREP events, because
  694. * we may need to move the irqs/NAPIs to another CPU before
  695. * it is actually off-lined.
  696. *
  697. * Return: NOTIFY_OK (dont block action)
  698. */
  699. static int hnc_cpu_notify_cb(struct notifier_block *nb,
  700. unsigned long action,
  701. void *hcpu)
  702. {
  703. int rc = NOTIFY_OK;
  704. unsigned long cpu = (unsigned long)hcpu;
  705. struct hif_opaque_softc *hif;
  706. struct qca_napi_data *napid = NULL;
  707. NAPI_DEBUG("-->%s(act=%ld, cpu=%ld)", __func__, action, cpu);
  708. hif = (struct hif_opaque_softc *)cds_get_context(QDF_MODULE_ID_HIF);
  709. if (qdf_likely(hif != NULL))
  710. napid = hif_napi_get_all(hif);
  711. if (qdf_unlikely(napid == NULL)) {
  712. NAPI_DEBUG("%s: hif/napid NULL (%p/%p)",
  713. __func__, hif, napid);
  714. goto lab_hnc_notify;
  715. }
  716. switch (action) {
  717. case CPU_ONLINE:
  718. napid->napi_cpu[cpu].state = QCA_NAPI_CPU_UP;
  719. NAPI_DEBUG("%s: CPU %ld marked %d",
  720. __func__, cpu, napid->napi_cpu[cpu].state);
  721. break;
  722. case CPU_DEAD: /* already dead; we have marked it before, but ... */
  723. case CPU_DEAD_FROZEN:
  724. napid->napi_cpu[cpu].state = QCA_NAPI_CPU_DOWN;
  725. NAPI_DEBUG("%s: CPU %ld marked %d",
  726. __func__, cpu, napid->napi_cpu[cpu].state);
  727. break;
  728. case CPU_DOWN_PREPARE:
  729. case CPU_DOWN_PREPARE_FROZEN:
  730. napid->napi_cpu[cpu].state = QCA_NAPI_CPU_DOWN;
  731. NAPI_DEBUG("%s: CPU %ld marked %d; updating affinity",
  732. __func__, cpu, napid->napi_cpu[cpu].state);
  733. /**
  734. * we need to move any NAPIs on this CPU out.
  735. * if we are in LO throughput mode, then this is valid
  736. * if the CPU is the the low designated CPU.
  737. */
  738. hif_napi_event(hif,
  739. NAPI_EVT_CPU_STATE,
  740. (void *)
  741. ((cpu << 16) | napid->napi_cpu[cpu].state));
  742. break;
  743. default:
  744. NAPI_DEBUG("%s: ignored. action: %ld", __func__, action);
  745. break;
  746. } /* switch */
  747. lab_hnc_notify:
  748. NAPI_DEBUG("<--%s [%d]", __func__, rc);
  749. return rc;
  750. }
  751. /**
  752. * hnc_hotplug_hook() - installs a hotplug notifier
  753. * @register: !0 => register , =0 => deregister
  754. * Note that this is different from the cpu notifier used by
  755. * rx_thread (cds_schedule.c).
  756. * We may consider combining these modifiers in the future.
  757. *
  758. * Return: 0: success
  759. * <0: error
  760. */
  761. static struct notifier_block hnc_cpu_notifier = {
  762. .notifier_call = hnc_cpu_notify_cb,
  763. };
  764. static int hnc_hotplug_hook(int install)
  765. {
  766. int rc = 0;
  767. NAPI_DEBUG("-->%s(%d)", __func__, install);
  768. if (install)
  769. rc = register_hotcpu_notifier(&hnc_cpu_notifier);
  770. else
  771. unregister_hotcpu_notifier(&hnc_cpu_notifier);
  772. NAPI_DEBUG("<--%s()[%d]", __func__, rc);
  773. return rc;
  774. }
  775. /**
  776. * hnc_install_tput() - installs a callback in the throughput detector
  777. * @register: !0 => register; =0: unregister
  778. *
  779. * installs a callback to be called when wifi driver throughput (tx+rx)
  780. * crosses a threshold. Currently, we are using the same criteria as
  781. * TCP ack suppression (500 packets/100ms by default).
  782. *
  783. * Return: 0 : success
  784. * <0: failure
  785. */
  786. static int hnc_tput_hook(int install)
  787. {
  788. int rc = 0;
  789. /*
  790. * Nothing, until the bw_calculation accepts registration
  791. * it is now hardcoded in the wlan_hdd_main.c::hdd_bus_bw_compute_cbk
  792. * hdd_napi_throughput_policy(...)
  793. */
  794. return rc;
  795. }
  796. /*
  797. * Implementation of hif_napi_cpu API
  798. */
  799. /**
  800. * hif_napi_cpu_init() - initialization of irq affinity block
  801. * @ctx: pointer to qca_napi_data
  802. *
  803. * called by hif_napi_create, after the first instance is called
  804. * - builds napi_rss_cpus table from cpu topology
  805. * - links cores of the same clusters together
  806. * - installs hot-plug notifier
  807. * - installs throughput trigger notifier (when such mechanism exists)
  808. *
  809. * Return: 0: OK
  810. * <0: error code
  811. */
  812. int hif_napi_cpu_init(void *ctx)
  813. {
  814. int rc = 0;
  815. int i;
  816. struct qca_napi_data *napid = (struct qca_napi_data *)ctx;
  817. struct qca_napi_cpu *cpus = napid->napi_cpu;
  818. NAPI_DEBUG("--> ");
  819. if (cpus[0].state != QCA_NAPI_CPU_UNINITIALIZED) {
  820. NAPI_DEBUG("NAPI RSS table already initialized.\n");
  821. rc = -EALREADY;
  822. goto lab_rss_init;
  823. }
  824. /* build CPU topology table */
  825. for_each_possible_cpu(i) {
  826. cpus[i].state = ((cpumask_test_cpu(i, cpu_online_mask)
  827. ? QCA_NAPI_CPU_UP
  828. : QCA_NAPI_CPU_DOWN));
  829. cpus[i].core_id = topology_core_id(i);
  830. cpus[i].cluster_id = topology_physical_package_id(i);
  831. cpumask_copy(&(cpus[i].core_mask),
  832. topology_core_cpumask(i));
  833. cpumask_copy(&(cpus[i].thread_mask),
  834. topology_sibling_cpumask(i));
  835. cpus[i].max_freq = cpufreq_quick_get_max(i);
  836. cpus[i].efficiency = arch_get_cpu_efficiency(i);
  837. cpus[i].napis = 0x0;
  838. cpus[i].cluster_nxt = -1; /* invalid */
  839. }
  840. /* link clusters together */
  841. rc = hnc_link_clusters(napid);
  842. if (0 != rc)
  843. goto lab_err_topology;
  844. /* install hotplug notifier */
  845. rc = hnc_hotplug_hook(1);
  846. if (0 != rc)
  847. goto lab_err_hotplug;
  848. /* install throughput notifier */
  849. rc = hnc_tput_hook(1);
  850. if (0 == rc)
  851. goto lab_rss_init;
  852. lab_err_hotplug:
  853. hnc_tput_hook(0);
  854. hnc_hotplug_hook(0);
  855. lab_err_topology:
  856. memset(napid->napi_cpu, sizeof(struct qca_napi_cpu) * NR_CPUS, 0);
  857. lab_rss_init:
  858. NAPI_DEBUG("<-- [rc=%d]", rc);
  859. return rc;
  860. }
  861. /**
  862. * hif_napi_cpu_deinit() - clean-up of irq affinity block
  863. *
  864. * called by hif_napi_destroy, when the last instance is removed
  865. * - uninstalls throughput and hotplug notifiers
  866. * - clears cpu topology table
  867. * Return: 0: OK
  868. */
  869. int hif_napi_cpu_deinit(void *ctx)
  870. {
  871. int rc = 0;
  872. struct qca_napi_data *napid = (struct qca_napi_data *)ctx;
  873. NAPI_DEBUG("-->%s(...)", __func__);
  874. /* uninstall tput notifier */
  875. rc = hnc_tput_hook(0);
  876. /* uninstall hotplug notifier */
  877. rc = hnc_hotplug_hook(0);
  878. /* clear the topology table */
  879. memset(napid->napi_cpu, sizeof(struct qca_napi_cpu) * NR_CPUS, 0);
  880. NAPI_DEBUG("<--%s[rc=%d]", __func__, rc);
  881. return rc;
  882. }
  883. /**
  884. * hncm_migrate_to() - migrates a NAPI to a CPU
  885. * @napid: pointer to NAPI block
  886. * @ce_id: CE_id of the NAPI instance
  887. * @didx : index in the CPU topology table for the CPU to migrate to
  888. *
  889. * Migrates NAPI (identified by the CE_id) to the destination core
  890. * Updates the napi_map of the destination entry
  891. *
  892. * Return:
  893. * =0 : success
  894. * <0 : error
  895. */
  896. int hncm_migrate_to(struct qca_napi_data *napid,
  897. int napi_ce,
  898. int didx)
  899. {
  900. int rc = 0;
  901. cpumask_t cpumask;
  902. NAPI_DEBUG("-->%s(napi_cd=%d, didx=%d)", __func__, napi_ce, didx);
  903. cpumask.bits[0] = (1 << didx);
  904. rc = irq_set_affinity_hint(napid->napis[napi_ce].irq, &cpumask);
  905. napid->napi_cpu[didx].napis |= (1 << napi_ce);
  906. NAPI_DEBUG("<--%s[%d]", __func__, rc);
  907. return rc;
  908. }
  909. /**
  910. * hncm_dest_cpu() - finds a destination CPU for NAPI
  911. * @napid: pointer to NAPI block
  912. * @act : RELOCATE | COLLAPSE | DISPERSE
  913. *
  914. * Finds the designated destionation for the next IRQ.
  915. * RELOCATE: translated to either COLLAPSE or DISPERSE based
  916. * on napid->napi_mode (throughput state)
  917. * COLLAPSE: All have the same destination: the first online CPU in lilcl
  918. * DISPERSE: One of the CPU in bigcl, which has the smallest number of
  919. * NAPIs on it
  920. *
  921. * Return: >=0 : index in the cpu topology table
  922. * : < 0 : error
  923. */
  924. int hncm_dest_cpu(struct qca_napi_data *napid, int act)
  925. {
  926. int destidx = -1;
  927. int head, i;
  928. NAPI_DEBUG("-->%s(act=%d)", __func__, act);
  929. if (act == HNC_ACT_RELOCATE) {
  930. if (napid->napi_mode == QCA_NAPI_TPUT_LO)
  931. act = HNC_ACT_COLLAPSE;
  932. else
  933. act = HNC_ACT_DISPERSE;
  934. NAPI_DEBUG("%s: act changed from HNC_ACT_RELOCATE to %d",
  935. __func__, act);
  936. }
  937. if (act == HNC_ACT_COLLAPSE) {
  938. head = i = napid->lilcl_head;
  939. retry_collapse:
  940. while (i >= 0) {
  941. if (napid->napi_cpu[i].state == QCA_NAPI_CPU_UP) {
  942. destidx = i;
  943. break;
  944. } else {
  945. i = napid->napi_cpu[i].cluster_nxt;
  946. }
  947. }
  948. if ((destidx < 0) && (head == napid->lilcl_head)) {
  949. NAPI_DEBUG("%s: COLLAPSE: no lilcl dest, try bigcl",
  950. __func__);
  951. head = i = napid->bigcl_head;
  952. goto retry_collapse;
  953. }
  954. } else { /* HNC_ACT_DISPERSE */
  955. int smallest = 99; /* all 32 bits full */
  956. int smallidx = -1;
  957. head = i = napid->bigcl_head;
  958. retry_disperse:
  959. while (i >= 0) {
  960. if ((napid->napi_cpu[i].state == QCA_NAPI_CPU_UP) &&
  961. (hweight32(napid->napi_cpu[i].napis) <= smallest)) {
  962. smallest = napid->napi_cpu[i].napis;
  963. smallidx = i;
  964. }
  965. i = napid->napi_cpu[i].cluster_nxt;
  966. }
  967. destidx = smallidx;
  968. if ((destidx < 0) && (head == napid->bigcl_head)) {
  969. NAPI_DEBUG("%s: DISPERSE: no bigcl dest, try lilcl",
  970. __func__);
  971. head = i = napid->lilcl_head;
  972. goto retry_disperse;
  973. }
  974. }
  975. NAPI_DEBUG("<--%s[dest=%d]", __func__, destidx);
  976. return destidx;
  977. }
  978. /**
  979. * hif_napi_cpu_migrate() - migrate IRQs away
  980. * @cpu: -1: all CPUs <n> specific CPU
  981. * @act: COLLAPSE | DISPERSE
  982. *
  983. * Moves IRQs/NAPIs from specific or all CPUs (specified by @cpu) to eligible
  984. * cores. Eligible cores are:
  985. * act=COLLAPSE -> the first online core of the little cluster
  986. * act=DISPERSE -> separate cores of the big cluster, so that each core will
  987. * host minimum number of NAPIs/IRQs (napid->cpus[cpu].napis)
  988. *
  989. * Note that this function is called with a spinlock acquired already.
  990. *
  991. * Return: =0: success
  992. * <0: error
  993. */
  994. int hif_napi_cpu_migrate(struct qca_napi_data *napid, int cpu, int action)
  995. {
  996. int rc = 0;
  997. struct qca_napi_cpu *cpup;
  998. int i, dind;
  999. uint32_t napis;
  1000. NAPI_DEBUG("-->%s(.., cpu=%d, act=%d)",
  1001. __func__, cpu, action);
  1002. /* the following is really: hif_napi_enabled() with less overhead */
  1003. if (napid->ce_map == 0) {
  1004. NAPI_DEBUG("%s: NAPI disabled. Not migrating.", __func__);
  1005. goto hncm_return;
  1006. }
  1007. cpup = napid->napi_cpu;
  1008. switch (action) {
  1009. case HNC_ACT_RELOCATE:
  1010. case HNC_ACT_DISPERSE:
  1011. case HNC_ACT_COLLAPSE: {
  1012. /* first find the src napi set */
  1013. if (cpu == HNC_ANY_CPU)
  1014. napis = napid->ce_map;
  1015. else
  1016. napis = cpup[cpu].napis;
  1017. /* then clear the napi bitmap on each CPU */
  1018. for (i = 0; i < NR_CPUS; i++)
  1019. cpup[i].napis = 0;
  1020. /* then for each of the NAPIs to disperse: */
  1021. for (i = 0; i < CE_COUNT_MAX; i++)
  1022. if (napis & (1 << i)) {
  1023. /* find a destination CPU */
  1024. dind = hncm_dest_cpu(napid, action);
  1025. if (dind >= 0) {
  1026. NAPI_DEBUG("Migrating NAPI ce%d to %d",
  1027. i, dind);
  1028. rc = hncm_migrate_to(napid, i, dind);
  1029. } else {
  1030. NAPI_DEBUG("No dest for NAPI ce%d", i);
  1031. hnc_dump_cpus(napid);
  1032. rc = -1;
  1033. }
  1034. }
  1035. break;
  1036. }
  1037. default: {
  1038. NAPI_DEBUG("%s: bad action: %d\n", __func__, action);
  1039. QDF_BUG(0);
  1040. break;
  1041. }
  1042. } /* switch action */
  1043. hncm_return:
  1044. hnc_dump_cpus(napid);
  1045. return rc;
  1046. }
  1047. /**
  1048. * hif_napi_cpu_blacklist() - calls kernel API to enable/disable blacklisting
  1049. *
  1050. * Return: from the API
  1051. */
  1052. int hif_napi_cpu_blacklist(bool is_on)
  1053. {
  1054. int rc = 0;
  1055. NAPI_DEBUG("-->%s(%d)", __func__, is_on);
  1056. if (is_on)
  1057. rc = irq_blacklist_on();
  1058. else
  1059. rc = irq_blacklist_off();
  1060. NAPI_DEBUG("<--%s[%d]", __func__, rc);
  1061. return rc;
  1062. }
  1063. #endif /* ifdef HELIUMPLUS */