sched.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693
  1. /*
  2. * This file is part of the Chelsio T4 Ethernet driver for Linux.
  3. *
  4. * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
  5. *
  6. * This software is available to you under a choice of one of two
  7. * licenses. You may choose to be licensed under the terms of the GNU
  8. * General Public License (GPL) Version 2, available from the file
  9. * COPYING in the main directory of this source tree, or the
  10. * OpenIB.org BSD license below:
  11. *
  12. * Redistribution and use in source and binary forms, with or
  13. * without modification, are permitted provided that the following
  14. * conditions are met:
  15. *
  16. * - Redistributions of source code must retain the above
  17. * copyright notice, this list of conditions and the following
  18. * disclaimer.
  19. *
  20. * - Redistributions in binary form must reproduce the above
  21. * copyright notice, this list of conditions and the following
  22. * disclaimer in the documentation and/or other materials
  23. * provided with the distribution.
  24. *
  25. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32. * SOFTWARE.
  33. */
  34. #include <linux/module.h>
  35. #include <linux/netdevice.h>
  36. #include "cxgb4.h"
  37. #include "sched.h"
  38. static int t4_sched_class_fw_cmd(struct port_info *pi,
  39. struct ch_sched_params *p,
  40. enum sched_fw_ops op)
  41. {
  42. struct adapter *adap = pi->adapter;
  43. struct sched_table *s = pi->sched_tbl;
  44. struct sched_class *e;
  45. int err = 0;
  46. e = &s->tab[p->u.params.class];
  47. switch (op) {
  48. case SCHED_FW_OP_ADD:
  49. case SCHED_FW_OP_DEL:
  50. err = t4_sched_params(adap, p->type,
  51. p->u.params.level, p->u.params.mode,
  52. p->u.params.rateunit,
  53. p->u.params.ratemode,
  54. p->u.params.channel, e->idx,
  55. p->u.params.minrate, p->u.params.maxrate,
  56. p->u.params.weight, p->u.params.pktsize,
  57. p->u.params.burstsize);
  58. break;
  59. default:
  60. err = -ENOTSUPP;
  61. break;
  62. }
  63. return err;
  64. }
  65. static int t4_sched_bind_unbind_op(struct port_info *pi, void *arg,
  66. enum sched_bind_type type, bool bind)
  67. {
  68. struct adapter *adap = pi->adapter;
  69. u32 fw_mnem, fw_class, fw_param;
  70. unsigned int pf = adap->pf;
  71. unsigned int vf = 0;
  72. int err = 0;
  73. switch (type) {
  74. case SCHED_QUEUE: {
  75. struct sched_queue_entry *qe;
  76. qe = (struct sched_queue_entry *)arg;
  77. /* Create a template for the FW_PARAMS_CMD mnemonic and
  78. * value (TX Scheduling Class in this case).
  79. */
  80. fw_mnem = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
  81. FW_PARAMS_PARAM_X_V(
  82. FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH));
  83. fw_class = bind ? qe->param.class : FW_SCHED_CLS_NONE;
  84. fw_param = (fw_mnem | FW_PARAMS_PARAM_YZ_V(qe->cntxt_id));
  85. pf = adap->pf;
  86. vf = 0;
  87. err = t4_set_params(adap, adap->mbox, pf, vf, 1,
  88. &fw_param, &fw_class);
  89. break;
  90. }
  91. case SCHED_FLOWC: {
  92. struct sched_flowc_entry *fe;
  93. fe = (struct sched_flowc_entry *)arg;
  94. fw_class = bind ? fe->param.class : FW_SCHED_CLS_NONE;
  95. err = cxgb4_ethofld_send_flowc(adap->port[pi->port_id],
  96. fe->param.tid, fw_class);
  97. break;
  98. }
  99. default:
  100. err = -ENOTSUPP;
  101. break;
  102. }
  103. return err;
  104. }
  105. static void *t4_sched_entry_lookup(struct port_info *pi,
  106. enum sched_bind_type type,
  107. const u32 val)
  108. {
  109. struct sched_table *s = pi->sched_tbl;
  110. struct sched_class *e, *end;
  111. void *found = NULL;
  112. /* Look for an entry with matching @val */
  113. end = &s->tab[s->sched_size];
  114. for (e = &s->tab[0]; e != end; ++e) {
  115. if (e->state == SCHED_STATE_UNUSED ||
  116. e->bind_type != type)
  117. continue;
  118. switch (type) {
  119. case SCHED_QUEUE: {
  120. struct sched_queue_entry *qe;
  121. list_for_each_entry(qe, &e->entry_list, list) {
  122. if (qe->cntxt_id == val) {
  123. found = qe;
  124. break;
  125. }
  126. }
  127. break;
  128. }
  129. case SCHED_FLOWC: {
  130. struct sched_flowc_entry *fe;
  131. list_for_each_entry(fe, &e->entry_list, list) {
  132. if (fe->param.tid == val) {
  133. found = fe;
  134. break;
  135. }
  136. }
  137. break;
  138. }
  139. default:
  140. return NULL;
  141. }
  142. if (found)
  143. break;
  144. }
  145. return found;
  146. }
  147. struct sched_class *cxgb4_sched_queue_lookup(struct net_device *dev,
  148. struct ch_sched_queue *p)
  149. {
  150. struct port_info *pi = netdev2pinfo(dev);
  151. struct sched_queue_entry *qe = NULL;
  152. struct adapter *adap = pi->adapter;
  153. struct sge_eth_txq *txq;
  154. if (p->queue < 0 || p->queue >= pi->nqsets)
  155. return NULL;
  156. txq = &adap->sge.ethtxq[pi->first_qset + p->queue];
  157. qe = t4_sched_entry_lookup(pi, SCHED_QUEUE, txq->q.cntxt_id);
  158. return qe ? &pi->sched_tbl->tab[qe->param.class] : NULL;
  159. }
  160. static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p)
  161. {
  162. struct sched_queue_entry *qe = NULL;
  163. struct adapter *adap = pi->adapter;
  164. struct sge_eth_txq *txq;
  165. struct sched_class *e;
  166. int err = 0;
  167. if (p->queue < 0 || p->queue >= pi->nqsets)
  168. return -ERANGE;
  169. txq = &adap->sge.ethtxq[pi->first_qset + p->queue];
  170. /* Find the existing entry that the queue is bound to */
  171. qe = t4_sched_entry_lookup(pi, SCHED_QUEUE, txq->q.cntxt_id);
  172. if (qe) {
  173. err = t4_sched_bind_unbind_op(pi, (void *)qe, SCHED_QUEUE,
  174. false);
  175. if (err)
  176. return err;
  177. e = &pi->sched_tbl->tab[qe->param.class];
  178. list_del(&qe->list);
  179. kvfree(qe);
  180. if (atomic_dec_and_test(&e->refcnt))
  181. cxgb4_sched_class_free(adap->port[pi->port_id], e->idx);
  182. }
  183. return err;
  184. }
  185. static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p)
  186. {
  187. struct sched_table *s = pi->sched_tbl;
  188. struct sched_queue_entry *qe = NULL;
  189. struct adapter *adap = pi->adapter;
  190. struct sge_eth_txq *txq;
  191. struct sched_class *e;
  192. unsigned int qid;
  193. int err = 0;
  194. if (p->queue < 0 || p->queue >= pi->nqsets)
  195. return -ERANGE;
  196. qe = kvzalloc(sizeof(struct sched_queue_entry), GFP_KERNEL);
  197. if (!qe)
  198. return -ENOMEM;
  199. txq = &adap->sge.ethtxq[pi->first_qset + p->queue];
  200. qid = txq->q.cntxt_id;
  201. /* Unbind queue from any existing class */
  202. err = t4_sched_queue_unbind(pi, p);
  203. if (err)
  204. goto out_err;
  205. /* Bind queue to specified class */
  206. qe->cntxt_id = qid;
  207. memcpy(&qe->param, p, sizeof(qe->param));
  208. e = &s->tab[qe->param.class];
  209. err = t4_sched_bind_unbind_op(pi, (void *)qe, SCHED_QUEUE, true);
  210. if (err)
  211. goto out_err;
  212. list_add_tail(&qe->list, &e->entry_list);
  213. e->bind_type = SCHED_QUEUE;
  214. atomic_inc(&e->refcnt);
  215. return err;
  216. out_err:
  217. kvfree(qe);
  218. return err;
  219. }
  220. static int t4_sched_flowc_unbind(struct port_info *pi, struct ch_sched_flowc *p)
  221. {
  222. struct sched_flowc_entry *fe = NULL;
  223. struct adapter *adap = pi->adapter;
  224. struct sched_class *e;
  225. int err = 0;
  226. if (p->tid < 0 || p->tid >= adap->tids.neotids)
  227. return -ERANGE;
  228. /* Find the existing entry that the flowc is bound to */
  229. fe = t4_sched_entry_lookup(pi, SCHED_FLOWC, p->tid);
  230. if (fe) {
  231. err = t4_sched_bind_unbind_op(pi, (void *)fe, SCHED_FLOWC,
  232. false);
  233. if (err)
  234. return err;
  235. e = &pi->sched_tbl->tab[fe->param.class];
  236. list_del(&fe->list);
  237. kvfree(fe);
  238. if (atomic_dec_and_test(&e->refcnt))
  239. cxgb4_sched_class_free(adap->port[pi->port_id], e->idx);
  240. }
  241. return err;
  242. }
  243. static int t4_sched_flowc_bind(struct port_info *pi, struct ch_sched_flowc *p)
  244. {
  245. struct sched_table *s = pi->sched_tbl;
  246. struct sched_flowc_entry *fe = NULL;
  247. struct adapter *adap = pi->adapter;
  248. struct sched_class *e;
  249. int err = 0;
  250. if (p->tid < 0 || p->tid >= adap->tids.neotids)
  251. return -ERANGE;
  252. fe = kvzalloc(sizeof(*fe), GFP_KERNEL);
  253. if (!fe)
  254. return -ENOMEM;
  255. /* Unbind flowc from any existing class */
  256. err = t4_sched_flowc_unbind(pi, p);
  257. if (err)
  258. goto out_err;
  259. /* Bind flowc to specified class */
  260. memcpy(&fe->param, p, sizeof(fe->param));
  261. e = &s->tab[fe->param.class];
  262. err = t4_sched_bind_unbind_op(pi, (void *)fe, SCHED_FLOWC, true);
  263. if (err)
  264. goto out_err;
  265. list_add_tail(&fe->list, &e->entry_list);
  266. e->bind_type = SCHED_FLOWC;
  267. atomic_inc(&e->refcnt);
  268. return err;
  269. out_err:
  270. kvfree(fe);
  271. return err;
  272. }
  273. static void t4_sched_class_unbind_all(struct port_info *pi,
  274. struct sched_class *e,
  275. enum sched_bind_type type)
  276. {
  277. if (!e)
  278. return;
  279. switch (type) {
  280. case SCHED_QUEUE: {
  281. struct sched_queue_entry *qe;
  282. list_for_each_entry(qe, &e->entry_list, list)
  283. t4_sched_queue_unbind(pi, &qe->param);
  284. break;
  285. }
  286. case SCHED_FLOWC: {
  287. struct sched_flowc_entry *fe;
  288. list_for_each_entry(fe, &e->entry_list, list)
  289. t4_sched_flowc_unbind(pi, &fe->param);
  290. break;
  291. }
  292. default:
  293. break;
  294. }
  295. }
  296. static int t4_sched_class_bind_unbind_op(struct port_info *pi, void *arg,
  297. enum sched_bind_type type, bool bind)
  298. {
  299. int err = 0;
  300. if (!arg)
  301. return -EINVAL;
  302. switch (type) {
  303. case SCHED_QUEUE: {
  304. struct ch_sched_queue *qe = (struct ch_sched_queue *)arg;
  305. if (bind)
  306. err = t4_sched_queue_bind(pi, qe);
  307. else
  308. err = t4_sched_queue_unbind(pi, qe);
  309. break;
  310. }
  311. case SCHED_FLOWC: {
  312. struct ch_sched_flowc *fe = (struct ch_sched_flowc *)arg;
  313. if (bind)
  314. err = t4_sched_flowc_bind(pi, fe);
  315. else
  316. err = t4_sched_flowc_unbind(pi, fe);
  317. break;
  318. }
  319. default:
  320. err = -ENOTSUPP;
  321. break;
  322. }
  323. return err;
  324. }
  325. /**
  326. * cxgb4_sched_class_bind - Bind an entity to a scheduling class
  327. * @dev: net_device pointer
  328. * @arg: Entity opaque data
  329. * @type: Entity type (Queue)
  330. *
  331. * Binds an entity (queue) to a scheduling class. If the entity
  332. * is bound to another class, it will be unbound from the other class
  333. * and bound to the class specified in @arg.
  334. */
  335. int cxgb4_sched_class_bind(struct net_device *dev, void *arg,
  336. enum sched_bind_type type)
  337. {
  338. struct port_info *pi = netdev2pinfo(dev);
  339. u8 class_id;
  340. if (!can_sched(dev))
  341. return -ENOTSUPP;
  342. if (!arg)
  343. return -EINVAL;
  344. switch (type) {
  345. case SCHED_QUEUE: {
  346. struct ch_sched_queue *qe = (struct ch_sched_queue *)arg;
  347. class_id = qe->class;
  348. break;
  349. }
  350. case SCHED_FLOWC: {
  351. struct ch_sched_flowc *fe = (struct ch_sched_flowc *)arg;
  352. class_id = fe->class;
  353. break;
  354. }
  355. default:
  356. return -ENOTSUPP;
  357. }
  358. if (!valid_class_id(dev, class_id))
  359. return -EINVAL;
  360. if (class_id == SCHED_CLS_NONE)
  361. return -ENOTSUPP;
  362. return t4_sched_class_bind_unbind_op(pi, arg, type, true);
  363. }
  364. /**
  365. * cxgb4_sched_class_unbind - Unbind an entity from a scheduling class
  366. * @dev: net_device pointer
  367. * @arg: Entity opaque data
  368. * @type: Entity type (Queue)
  369. *
  370. * Unbinds an entity (queue) from a scheduling class.
  371. */
  372. int cxgb4_sched_class_unbind(struct net_device *dev, void *arg,
  373. enum sched_bind_type type)
  374. {
  375. struct port_info *pi = netdev2pinfo(dev);
  376. u8 class_id;
  377. if (!can_sched(dev))
  378. return -ENOTSUPP;
  379. if (!arg)
  380. return -EINVAL;
  381. switch (type) {
  382. case SCHED_QUEUE: {
  383. struct ch_sched_queue *qe = (struct ch_sched_queue *)arg;
  384. class_id = qe->class;
  385. break;
  386. }
  387. case SCHED_FLOWC: {
  388. struct ch_sched_flowc *fe = (struct ch_sched_flowc *)arg;
  389. class_id = fe->class;
  390. break;
  391. }
  392. default:
  393. return -ENOTSUPP;
  394. }
  395. if (!valid_class_id(dev, class_id))
  396. return -EINVAL;
  397. return t4_sched_class_bind_unbind_op(pi, arg, type, false);
  398. }
  399. /* If @p is NULL, fetch any available unused class */
  400. static struct sched_class *t4_sched_class_lookup(struct port_info *pi,
  401. const struct ch_sched_params *p)
  402. {
  403. struct sched_table *s = pi->sched_tbl;
  404. struct sched_class *found = NULL;
  405. struct sched_class *e, *end;
  406. if (!p) {
  407. /* Get any available unused class */
  408. end = &s->tab[s->sched_size];
  409. for (e = &s->tab[0]; e != end; ++e) {
  410. if (e->state == SCHED_STATE_UNUSED) {
  411. found = e;
  412. break;
  413. }
  414. }
  415. } else {
  416. /* Look for a class with matching scheduling parameters */
  417. struct ch_sched_params info;
  418. struct ch_sched_params tp;
  419. memcpy(&tp, p, sizeof(tp));
  420. /* Don't try to match class parameter */
  421. tp.u.params.class = SCHED_CLS_NONE;
  422. end = &s->tab[s->sched_size];
  423. for (e = &s->tab[0]; e != end; ++e) {
  424. if (e->state == SCHED_STATE_UNUSED)
  425. continue;
  426. memcpy(&info, &e->info, sizeof(info));
  427. /* Don't try to match class parameter */
  428. info.u.params.class = SCHED_CLS_NONE;
  429. if ((info.type == tp.type) &&
  430. (!memcmp(&info.u.params, &tp.u.params,
  431. sizeof(info.u.params)))) {
  432. found = e;
  433. break;
  434. }
  435. }
  436. }
  437. return found;
  438. }
  439. static struct sched_class *t4_sched_class_alloc(struct port_info *pi,
  440. struct ch_sched_params *p)
  441. {
  442. struct sched_class *e = NULL;
  443. u8 class_id;
  444. int err;
  445. if (!p)
  446. return NULL;
  447. class_id = p->u.params.class;
  448. /* Only accept search for existing class with matching params
  449. * or allocation of new class with specified params
  450. */
  451. if (class_id != SCHED_CLS_NONE)
  452. return NULL;
  453. /* See if there's an exisiting class with same requested sched
  454. * params. Classes can only be shared among FLOWC types. For
  455. * other types, always request a new class.
  456. */
  457. if (p->u.params.mode == SCHED_CLASS_MODE_FLOW)
  458. e = t4_sched_class_lookup(pi, p);
  459. if (!e) {
  460. struct ch_sched_params np;
  461. /* Fetch any available unused class */
  462. e = t4_sched_class_lookup(pi, NULL);
  463. if (!e)
  464. return NULL;
  465. memcpy(&np, p, sizeof(np));
  466. np.u.params.class = e->idx;
  467. /* New class */
  468. err = t4_sched_class_fw_cmd(pi, &np, SCHED_FW_OP_ADD);
  469. if (err)
  470. return NULL;
  471. memcpy(&e->info, &np, sizeof(e->info));
  472. atomic_set(&e->refcnt, 0);
  473. e->state = SCHED_STATE_ACTIVE;
  474. }
  475. return e;
  476. }
  477. /**
  478. * cxgb4_sched_class_alloc - allocate a scheduling class
  479. * @dev: net_device pointer
  480. * @p: new scheduling class to create.
  481. *
  482. * Returns pointer to the scheduling class created. If @p is NULL, then
  483. * it allocates and returns any available unused scheduling class. If a
  484. * scheduling class with matching @p is found, then the matching class is
  485. * returned.
  486. */
  487. struct sched_class *cxgb4_sched_class_alloc(struct net_device *dev,
  488. struct ch_sched_params *p)
  489. {
  490. struct port_info *pi = netdev2pinfo(dev);
  491. u8 class_id;
  492. if (!can_sched(dev))
  493. return NULL;
  494. class_id = p->u.params.class;
  495. if (!valid_class_id(dev, class_id))
  496. return NULL;
  497. return t4_sched_class_alloc(pi, p);
  498. }
  499. /**
  500. * cxgb4_sched_class_free - free a scheduling class
  501. * @dev: net_device pointer
  502. * @classid: scheduling class id to free
  503. *
  504. * Frees a scheduling class if there are no users.
  505. */
  506. void cxgb4_sched_class_free(struct net_device *dev, u8 classid)
  507. {
  508. struct port_info *pi = netdev2pinfo(dev);
  509. struct sched_table *s = pi->sched_tbl;
  510. struct ch_sched_params p;
  511. struct sched_class *e;
  512. u32 speed;
  513. int ret;
  514. e = &s->tab[classid];
  515. if (!atomic_read(&e->refcnt) && e->state != SCHED_STATE_UNUSED) {
  516. /* Port based rate limiting needs explicit reset back
  517. * to max rate. But, we'll do explicit reset for all
  518. * types, instead of just port based type, to be on
  519. * the safer side.
  520. */
  521. memcpy(&p, &e->info, sizeof(p));
  522. /* Always reset mode to 0. Otherwise, FLOWC mode will
  523. * still be enabled even after resetting the traffic
  524. * class.
  525. */
  526. p.u.params.mode = 0;
  527. p.u.params.minrate = 0;
  528. p.u.params.pktsize = 0;
  529. ret = t4_get_link_params(pi, NULL, &speed, NULL);
  530. if (!ret)
  531. p.u.params.maxrate = speed * 1000; /* Mbps to Kbps */
  532. else
  533. p.u.params.maxrate = SCHED_MAX_RATE_KBPS;
  534. t4_sched_class_fw_cmd(pi, &p, SCHED_FW_OP_DEL);
  535. e->state = SCHED_STATE_UNUSED;
  536. memset(&e->info, 0, sizeof(e->info));
  537. }
  538. }
  539. static void t4_sched_class_free(struct net_device *dev, struct sched_class *e)
  540. {
  541. struct port_info *pi = netdev2pinfo(dev);
  542. t4_sched_class_unbind_all(pi, e, e->bind_type);
  543. cxgb4_sched_class_free(dev, e->idx);
  544. }
  545. struct sched_table *t4_init_sched(unsigned int sched_size)
  546. {
  547. struct sched_table *s;
  548. unsigned int i;
  549. s = kvzalloc(struct_size(s, tab, sched_size), GFP_KERNEL);
  550. if (!s)
  551. return NULL;
  552. s->sched_size = sched_size;
  553. for (i = 0; i < s->sched_size; i++) {
  554. memset(&s->tab[i], 0, sizeof(struct sched_class));
  555. s->tab[i].idx = i;
  556. s->tab[i].state = SCHED_STATE_UNUSED;
  557. INIT_LIST_HEAD(&s->tab[i].entry_list);
  558. atomic_set(&s->tab[i].refcnt, 0);
  559. }
  560. return s;
  561. }
  562. void t4_cleanup_sched(struct adapter *adap)
  563. {
  564. struct sched_table *s;
  565. unsigned int j, i;
  566. for_each_port(adap, j) {
  567. struct port_info *pi = netdev2pinfo(adap->port[j]);
  568. s = pi->sched_tbl;
  569. if (!s)
  570. continue;
  571. for (i = 0; i < s->sched_size; i++) {
  572. struct sched_class *e;
  573. e = &s->tab[i];
  574. if (e->state == SCHED_STATE_ACTIVE)
  575. t4_sched_class_free(adap->port[j], e);
  576. }
  577. kvfree(s);
  578. }
  579. }