ncsi-manage.c 47 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Copyright Gavin Shan, IBM Corporation 2016.
  4. */
  5. #include <linux/module.h>
  6. #include <linux/kernel.h>
  7. #include <linux/init.h>
  8. #include <linux/netdevice.h>
  9. #include <linux/skbuff.h>
  10. #include <linux/of.h>
  11. #include <linux/platform_device.h>
  12. #include <net/ncsi.h>
  13. #include <net/net_namespace.h>
  14. #include <net/sock.h>
  15. #include <net/addrconf.h>
  16. #include <net/ipv6.h>
  17. #include <net/genetlink.h>
  18. #include "internal.h"
  19. #include "ncsi-pkt.h"
  20. #include "ncsi-netlink.h"
  21. LIST_HEAD(ncsi_dev_list);
  22. DEFINE_SPINLOCK(ncsi_dev_lock);
  23. bool ncsi_channel_has_link(struct ncsi_channel *channel)
  24. {
  25. return !!(channel->modes[NCSI_MODE_LINK].data[2] & 0x1);
  26. }
  27. bool ncsi_channel_is_last(struct ncsi_dev_priv *ndp,
  28. struct ncsi_channel *channel)
  29. {
  30. struct ncsi_package *np;
  31. struct ncsi_channel *nc;
  32. NCSI_FOR_EACH_PACKAGE(ndp, np)
  33. NCSI_FOR_EACH_CHANNEL(np, nc) {
  34. if (nc == channel)
  35. continue;
  36. if (nc->state == NCSI_CHANNEL_ACTIVE &&
  37. ncsi_channel_has_link(nc))
  38. return false;
  39. }
  40. return true;
  41. }
  42. static void ncsi_report_link(struct ncsi_dev_priv *ndp, bool force_down)
  43. {
  44. struct ncsi_dev *nd = &ndp->ndev;
  45. struct ncsi_package *np;
  46. struct ncsi_channel *nc;
  47. unsigned long flags;
  48. nd->state = ncsi_dev_state_functional;
  49. if (force_down) {
  50. nd->link_up = 0;
  51. goto report;
  52. }
  53. nd->link_up = 0;
  54. NCSI_FOR_EACH_PACKAGE(ndp, np) {
  55. NCSI_FOR_EACH_CHANNEL(np, nc) {
  56. spin_lock_irqsave(&nc->lock, flags);
  57. if (!list_empty(&nc->link) ||
  58. nc->state != NCSI_CHANNEL_ACTIVE) {
  59. spin_unlock_irqrestore(&nc->lock, flags);
  60. continue;
  61. }
  62. if (ncsi_channel_has_link(nc)) {
  63. spin_unlock_irqrestore(&nc->lock, flags);
  64. nd->link_up = 1;
  65. goto report;
  66. }
  67. spin_unlock_irqrestore(&nc->lock, flags);
  68. }
  69. }
  70. report:
  71. nd->handler(nd);
  72. }
  73. static void ncsi_channel_monitor(struct timer_list *t)
  74. {
  75. struct ncsi_channel *nc = from_timer(nc, t, monitor.timer);
  76. struct ncsi_package *np = nc->package;
  77. struct ncsi_dev_priv *ndp = np->ndp;
  78. struct ncsi_channel_mode *ncm;
  79. struct ncsi_cmd_arg nca;
  80. bool enabled, chained;
  81. unsigned int monitor_state;
  82. unsigned long flags;
  83. int state, ret;
  84. spin_lock_irqsave(&nc->lock, flags);
  85. state = nc->state;
  86. chained = !list_empty(&nc->link);
  87. enabled = nc->monitor.enabled;
  88. monitor_state = nc->monitor.state;
  89. spin_unlock_irqrestore(&nc->lock, flags);
  90. if (!enabled)
  91. return; /* expected race disabling timer */
  92. if (WARN_ON_ONCE(chained))
  93. goto bad_state;
  94. if (state != NCSI_CHANNEL_INACTIVE &&
  95. state != NCSI_CHANNEL_ACTIVE) {
  96. bad_state:
  97. netdev_warn(ndp->ndev.dev,
  98. "Bad NCSI monitor state channel %d 0x%x %s queue\n",
  99. nc->id, state, chained ? "on" : "off");
  100. spin_lock_irqsave(&nc->lock, flags);
  101. nc->monitor.enabled = false;
  102. spin_unlock_irqrestore(&nc->lock, flags);
  103. return;
  104. }
  105. switch (monitor_state) {
  106. case NCSI_CHANNEL_MONITOR_START:
  107. case NCSI_CHANNEL_MONITOR_RETRY:
  108. nca.ndp = ndp;
  109. nca.package = np->id;
  110. nca.channel = nc->id;
  111. nca.type = NCSI_PKT_CMD_GLS;
  112. nca.req_flags = 0;
  113. ret = ncsi_xmit_cmd(&nca);
  114. if (ret)
  115. netdev_err(ndp->ndev.dev, "Error %d sending GLS\n",
  116. ret);
  117. break;
  118. case NCSI_CHANNEL_MONITOR_WAIT ... NCSI_CHANNEL_MONITOR_WAIT_MAX:
  119. break;
  120. default:
  121. netdev_err(ndp->ndev.dev, "NCSI Channel %d timed out!\n",
  122. nc->id);
  123. ncsi_report_link(ndp, true);
  124. ndp->flags |= NCSI_DEV_RESHUFFLE;
  125. ncm = &nc->modes[NCSI_MODE_LINK];
  126. spin_lock_irqsave(&nc->lock, flags);
  127. nc->monitor.enabled = false;
  128. nc->state = NCSI_CHANNEL_INVISIBLE;
  129. ncm->data[2] &= ~0x1;
  130. spin_unlock_irqrestore(&nc->lock, flags);
  131. spin_lock_irqsave(&ndp->lock, flags);
  132. nc->state = NCSI_CHANNEL_ACTIVE;
  133. list_add_tail_rcu(&nc->link, &ndp->channel_queue);
  134. spin_unlock_irqrestore(&ndp->lock, flags);
  135. ncsi_process_next_channel(ndp);
  136. return;
  137. }
  138. spin_lock_irqsave(&nc->lock, flags);
  139. nc->monitor.state++;
  140. spin_unlock_irqrestore(&nc->lock, flags);
  141. mod_timer(&nc->monitor.timer, jiffies + HZ);
  142. }
  143. void ncsi_start_channel_monitor(struct ncsi_channel *nc)
  144. {
  145. unsigned long flags;
  146. spin_lock_irqsave(&nc->lock, flags);
  147. WARN_ON_ONCE(nc->monitor.enabled);
  148. nc->monitor.enabled = true;
  149. nc->monitor.state = NCSI_CHANNEL_MONITOR_START;
  150. spin_unlock_irqrestore(&nc->lock, flags);
  151. mod_timer(&nc->monitor.timer, jiffies + HZ);
  152. }
  153. void ncsi_stop_channel_monitor(struct ncsi_channel *nc)
  154. {
  155. unsigned long flags;
  156. spin_lock_irqsave(&nc->lock, flags);
  157. if (!nc->monitor.enabled) {
  158. spin_unlock_irqrestore(&nc->lock, flags);
  159. return;
  160. }
  161. nc->monitor.enabled = false;
  162. spin_unlock_irqrestore(&nc->lock, flags);
  163. del_timer_sync(&nc->monitor.timer);
  164. }
  165. struct ncsi_channel *ncsi_find_channel(struct ncsi_package *np,
  166. unsigned char id)
  167. {
  168. struct ncsi_channel *nc;
  169. NCSI_FOR_EACH_CHANNEL(np, nc) {
  170. if (nc->id == id)
  171. return nc;
  172. }
  173. return NULL;
  174. }
  175. struct ncsi_channel *ncsi_add_channel(struct ncsi_package *np, unsigned char id)
  176. {
  177. struct ncsi_channel *nc, *tmp;
  178. int index;
  179. unsigned long flags;
  180. nc = kzalloc(sizeof(*nc), GFP_ATOMIC);
  181. if (!nc)
  182. return NULL;
  183. nc->id = id;
  184. nc->package = np;
  185. nc->state = NCSI_CHANNEL_INACTIVE;
  186. nc->monitor.enabled = false;
  187. timer_setup(&nc->monitor.timer, ncsi_channel_monitor, 0);
  188. spin_lock_init(&nc->lock);
  189. INIT_LIST_HEAD(&nc->link);
  190. for (index = 0; index < NCSI_CAP_MAX; index++)
  191. nc->caps[index].index = index;
  192. for (index = 0; index < NCSI_MODE_MAX; index++)
  193. nc->modes[index].index = index;
  194. spin_lock_irqsave(&np->lock, flags);
  195. tmp = ncsi_find_channel(np, id);
  196. if (tmp) {
  197. spin_unlock_irqrestore(&np->lock, flags);
  198. kfree(nc);
  199. return tmp;
  200. }
  201. list_add_tail_rcu(&nc->node, &np->channels);
  202. np->channel_num++;
  203. spin_unlock_irqrestore(&np->lock, flags);
  204. return nc;
  205. }
  206. static void ncsi_remove_channel(struct ncsi_channel *nc)
  207. {
  208. struct ncsi_package *np = nc->package;
  209. unsigned long flags;
  210. spin_lock_irqsave(&nc->lock, flags);
  211. /* Release filters */
  212. kfree(nc->mac_filter.addrs);
  213. kfree(nc->vlan_filter.vids);
  214. nc->state = NCSI_CHANNEL_INACTIVE;
  215. spin_unlock_irqrestore(&nc->lock, flags);
  216. ncsi_stop_channel_monitor(nc);
  217. /* Remove and free channel */
  218. spin_lock_irqsave(&np->lock, flags);
  219. list_del_rcu(&nc->node);
  220. np->channel_num--;
  221. spin_unlock_irqrestore(&np->lock, flags);
  222. kfree(nc);
  223. }
  224. struct ncsi_package *ncsi_find_package(struct ncsi_dev_priv *ndp,
  225. unsigned char id)
  226. {
  227. struct ncsi_package *np;
  228. NCSI_FOR_EACH_PACKAGE(ndp, np) {
  229. if (np->id == id)
  230. return np;
  231. }
  232. return NULL;
  233. }
  234. struct ncsi_package *ncsi_add_package(struct ncsi_dev_priv *ndp,
  235. unsigned char id)
  236. {
  237. struct ncsi_package *np, *tmp;
  238. unsigned long flags;
  239. np = kzalloc(sizeof(*np), GFP_ATOMIC);
  240. if (!np)
  241. return NULL;
  242. np->id = id;
  243. np->ndp = ndp;
  244. spin_lock_init(&np->lock);
  245. INIT_LIST_HEAD(&np->channels);
  246. np->channel_whitelist = UINT_MAX;
  247. spin_lock_irqsave(&ndp->lock, flags);
  248. tmp = ncsi_find_package(ndp, id);
  249. if (tmp) {
  250. spin_unlock_irqrestore(&ndp->lock, flags);
  251. kfree(np);
  252. return tmp;
  253. }
  254. list_add_tail_rcu(&np->node, &ndp->packages);
  255. ndp->package_num++;
  256. spin_unlock_irqrestore(&ndp->lock, flags);
  257. return np;
  258. }
  259. void ncsi_remove_package(struct ncsi_package *np)
  260. {
  261. struct ncsi_dev_priv *ndp = np->ndp;
  262. struct ncsi_channel *nc, *tmp;
  263. unsigned long flags;
  264. /* Release all child channels */
  265. list_for_each_entry_safe(nc, tmp, &np->channels, node)
  266. ncsi_remove_channel(nc);
  267. /* Remove and free package */
  268. spin_lock_irqsave(&ndp->lock, flags);
  269. list_del_rcu(&np->node);
  270. ndp->package_num--;
  271. spin_unlock_irqrestore(&ndp->lock, flags);
  272. kfree(np);
  273. }
  274. void ncsi_find_package_and_channel(struct ncsi_dev_priv *ndp,
  275. unsigned char id,
  276. struct ncsi_package **np,
  277. struct ncsi_channel **nc)
  278. {
  279. struct ncsi_package *p;
  280. struct ncsi_channel *c;
  281. p = ncsi_find_package(ndp, NCSI_PACKAGE_INDEX(id));
  282. c = p ? ncsi_find_channel(p, NCSI_CHANNEL_INDEX(id)) : NULL;
  283. if (np)
  284. *np = p;
  285. if (nc)
  286. *nc = c;
  287. }
  288. /* For two consecutive NCSI commands, the packet IDs shouldn't
  289. * be same. Otherwise, the bogus response might be replied. So
  290. * the available IDs are allocated in round-robin fashion.
  291. */
  292. struct ncsi_request *ncsi_alloc_request(struct ncsi_dev_priv *ndp,
  293. unsigned int req_flags)
  294. {
  295. struct ncsi_request *nr = NULL;
  296. int i, limit = ARRAY_SIZE(ndp->requests);
  297. unsigned long flags;
  298. /* Check if there is one available request until the ceiling */
  299. spin_lock_irqsave(&ndp->lock, flags);
  300. for (i = ndp->request_id; i < limit; i++) {
  301. if (ndp->requests[i].used)
  302. continue;
  303. nr = &ndp->requests[i];
  304. nr->used = true;
  305. nr->flags = req_flags;
  306. ndp->request_id = i + 1;
  307. goto found;
  308. }
  309. /* Fail back to check from the starting cursor */
  310. for (i = NCSI_REQ_START_IDX; i < ndp->request_id; i++) {
  311. if (ndp->requests[i].used)
  312. continue;
  313. nr = &ndp->requests[i];
  314. nr->used = true;
  315. nr->flags = req_flags;
  316. ndp->request_id = i + 1;
  317. goto found;
  318. }
  319. found:
  320. spin_unlock_irqrestore(&ndp->lock, flags);
  321. return nr;
  322. }
  323. void ncsi_free_request(struct ncsi_request *nr)
  324. {
  325. struct ncsi_dev_priv *ndp = nr->ndp;
  326. struct sk_buff *cmd, *rsp;
  327. unsigned long flags;
  328. bool driven;
  329. if (nr->enabled) {
  330. nr->enabled = false;
  331. del_timer_sync(&nr->timer);
  332. }
  333. spin_lock_irqsave(&ndp->lock, flags);
  334. cmd = nr->cmd;
  335. rsp = nr->rsp;
  336. nr->cmd = NULL;
  337. nr->rsp = NULL;
  338. nr->used = false;
  339. driven = !!(nr->flags & NCSI_REQ_FLAG_EVENT_DRIVEN);
  340. spin_unlock_irqrestore(&ndp->lock, flags);
  341. if (driven && cmd && --ndp->pending_req_num == 0)
  342. schedule_work(&ndp->work);
  343. /* Release command and response */
  344. consume_skb(cmd);
  345. consume_skb(rsp);
  346. }
  347. struct ncsi_dev *ncsi_find_dev(struct net_device *dev)
  348. {
  349. struct ncsi_dev_priv *ndp;
  350. NCSI_FOR_EACH_DEV(ndp) {
  351. if (ndp->ndev.dev == dev)
  352. return &ndp->ndev;
  353. }
  354. return NULL;
  355. }
  356. static void ncsi_request_timeout(struct timer_list *t)
  357. {
  358. struct ncsi_request *nr = from_timer(nr, t, timer);
  359. struct ncsi_dev_priv *ndp = nr->ndp;
  360. struct ncsi_cmd_pkt *cmd;
  361. struct ncsi_package *np;
  362. struct ncsi_channel *nc;
  363. unsigned long flags;
  364. /* If the request already had associated response,
  365. * let the response handler to release it.
  366. */
  367. spin_lock_irqsave(&ndp->lock, flags);
  368. nr->enabled = false;
  369. if (nr->rsp || !nr->cmd) {
  370. spin_unlock_irqrestore(&ndp->lock, flags);
  371. return;
  372. }
  373. spin_unlock_irqrestore(&ndp->lock, flags);
  374. if (nr->flags == NCSI_REQ_FLAG_NETLINK_DRIVEN) {
  375. if (nr->cmd) {
  376. /* Find the package */
  377. cmd = (struct ncsi_cmd_pkt *)
  378. skb_network_header(nr->cmd);
  379. ncsi_find_package_and_channel(ndp,
  380. cmd->cmd.common.channel,
  381. &np, &nc);
  382. ncsi_send_netlink_timeout(nr, np, nc);
  383. }
  384. }
  385. /* Release the request */
  386. ncsi_free_request(nr);
  387. }
  388. static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp)
  389. {
  390. struct ncsi_dev *nd = &ndp->ndev;
  391. struct ncsi_package *np;
  392. struct ncsi_channel *nc, *tmp;
  393. struct ncsi_cmd_arg nca;
  394. unsigned long flags;
  395. int ret;
  396. np = ndp->active_package;
  397. nc = ndp->active_channel;
  398. nca.ndp = ndp;
  399. nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
  400. switch (nd->state) {
  401. case ncsi_dev_state_suspend:
  402. nd->state = ncsi_dev_state_suspend_select;
  403. fallthrough;
  404. case ncsi_dev_state_suspend_select:
  405. ndp->pending_req_num = 1;
  406. nca.type = NCSI_PKT_CMD_SP;
  407. nca.package = np->id;
  408. nca.channel = NCSI_RESERVED_CHANNEL;
  409. if (ndp->flags & NCSI_DEV_HWA)
  410. nca.bytes[0] = 0;
  411. else
  412. nca.bytes[0] = 1;
  413. /* To retrieve the last link states of channels in current
  414. * package when current active channel needs fail over to
  415. * another one. It means we will possibly select another
  416. * channel as next active one. The link states of channels
  417. * are most important factor of the selection. So we need
  418. * accurate link states. Unfortunately, the link states on
  419. * inactive channels can't be updated with LSC AEN in time.
  420. */
  421. if (ndp->flags & NCSI_DEV_RESHUFFLE)
  422. nd->state = ncsi_dev_state_suspend_gls;
  423. else
  424. nd->state = ncsi_dev_state_suspend_dcnt;
  425. ret = ncsi_xmit_cmd(&nca);
  426. if (ret)
  427. goto error;
  428. break;
  429. case ncsi_dev_state_suspend_gls:
  430. ndp->pending_req_num = np->channel_num;
  431. nca.type = NCSI_PKT_CMD_GLS;
  432. nca.package = np->id;
  433. nd->state = ncsi_dev_state_suspend_dcnt;
  434. NCSI_FOR_EACH_CHANNEL(np, nc) {
  435. nca.channel = nc->id;
  436. ret = ncsi_xmit_cmd(&nca);
  437. if (ret)
  438. goto error;
  439. }
  440. break;
  441. case ncsi_dev_state_suspend_dcnt:
  442. ndp->pending_req_num = 1;
  443. nca.type = NCSI_PKT_CMD_DCNT;
  444. nca.package = np->id;
  445. nca.channel = nc->id;
  446. nd->state = ncsi_dev_state_suspend_dc;
  447. ret = ncsi_xmit_cmd(&nca);
  448. if (ret)
  449. goto error;
  450. break;
  451. case ncsi_dev_state_suspend_dc:
  452. ndp->pending_req_num = 1;
  453. nca.type = NCSI_PKT_CMD_DC;
  454. nca.package = np->id;
  455. nca.channel = nc->id;
  456. nca.bytes[0] = 1;
  457. nd->state = ncsi_dev_state_suspend_deselect;
  458. ret = ncsi_xmit_cmd(&nca);
  459. if (ret)
  460. goto error;
  461. NCSI_FOR_EACH_CHANNEL(np, tmp) {
  462. /* If there is another channel active on this package
  463. * do not deselect the package.
  464. */
  465. if (tmp != nc && tmp->state == NCSI_CHANNEL_ACTIVE) {
  466. nd->state = ncsi_dev_state_suspend_done;
  467. break;
  468. }
  469. }
  470. break;
  471. case ncsi_dev_state_suspend_deselect:
  472. ndp->pending_req_num = 1;
  473. nca.type = NCSI_PKT_CMD_DP;
  474. nca.package = np->id;
  475. nca.channel = NCSI_RESERVED_CHANNEL;
  476. nd->state = ncsi_dev_state_suspend_done;
  477. ret = ncsi_xmit_cmd(&nca);
  478. if (ret)
  479. goto error;
  480. break;
  481. case ncsi_dev_state_suspend_done:
  482. spin_lock_irqsave(&nc->lock, flags);
  483. nc->state = NCSI_CHANNEL_INACTIVE;
  484. spin_unlock_irqrestore(&nc->lock, flags);
  485. if (ndp->flags & NCSI_DEV_RESET)
  486. ncsi_reset_dev(nd);
  487. else
  488. ncsi_process_next_channel(ndp);
  489. break;
  490. default:
  491. netdev_warn(nd->dev, "Wrong NCSI state 0x%x in suspend\n",
  492. nd->state);
  493. }
  494. return;
  495. error:
  496. nd->state = ncsi_dev_state_functional;
  497. }
  498. /* Check the VLAN filter bitmap for a set filter, and construct a
  499. * "Set VLAN Filter - Disable" packet if found.
  500. */
  501. static int clear_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc,
  502. struct ncsi_cmd_arg *nca)
  503. {
  504. struct ncsi_channel_vlan_filter *ncf;
  505. unsigned long flags;
  506. void *bitmap;
  507. int index;
  508. u16 vid;
  509. ncf = &nc->vlan_filter;
  510. bitmap = &ncf->bitmap;
  511. spin_lock_irqsave(&nc->lock, flags);
  512. index = find_first_bit(bitmap, ncf->n_vids);
  513. if (index >= ncf->n_vids) {
  514. spin_unlock_irqrestore(&nc->lock, flags);
  515. return -1;
  516. }
  517. vid = ncf->vids[index];
  518. clear_bit(index, bitmap);
  519. ncf->vids[index] = 0;
  520. spin_unlock_irqrestore(&nc->lock, flags);
  521. nca->type = NCSI_PKT_CMD_SVF;
  522. nca->words[1] = vid;
  523. /* HW filter index starts at 1 */
  524. nca->bytes[6] = index + 1;
  525. nca->bytes[7] = 0x00;
  526. return 0;
  527. }
  528. /* Find an outstanding VLAN tag and construct a "Set VLAN Filter - Enable"
  529. * packet.
  530. */
  531. static int set_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc,
  532. struct ncsi_cmd_arg *nca)
  533. {
  534. struct ncsi_channel_vlan_filter *ncf;
  535. struct vlan_vid *vlan = NULL;
  536. unsigned long flags;
  537. int i, index;
  538. void *bitmap;
  539. u16 vid;
  540. if (list_empty(&ndp->vlan_vids))
  541. return -1;
  542. ncf = &nc->vlan_filter;
  543. bitmap = &ncf->bitmap;
  544. spin_lock_irqsave(&nc->lock, flags);
  545. rcu_read_lock();
  546. list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) {
  547. vid = vlan->vid;
  548. for (i = 0; i < ncf->n_vids; i++)
  549. if (ncf->vids[i] == vid) {
  550. vid = 0;
  551. break;
  552. }
  553. if (vid)
  554. break;
  555. }
  556. rcu_read_unlock();
  557. if (!vid) {
  558. /* No VLAN ID is not set */
  559. spin_unlock_irqrestore(&nc->lock, flags);
  560. return -1;
  561. }
  562. index = find_first_zero_bit(bitmap, ncf->n_vids);
  563. if (index < 0 || index >= ncf->n_vids) {
  564. netdev_err(ndp->ndev.dev,
  565. "Channel %u already has all VLAN filters set\n",
  566. nc->id);
  567. spin_unlock_irqrestore(&nc->lock, flags);
  568. return -1;
  569. }
  570. ncf->vids[index] = vid;
  571. set_bit(index, bitmap);
  572. spin_unlock_irqrestore(&nc->lock, flags);
  573. nca->type = NCSI_PKT_CMD_SVF;
  574. nca->words[1] = vid;
  575. /* HW filter index starts at 1 */
  576. nca->bytes[6] = index + 1;
  577. nca->bytes[7] = 0x01;
  578. return 0;
  579. }
  580. #if IS_ENABLED(CONFIG_NCSI_OEM_CMD_KEEP_PHY)
  581. static int ncsi_oem_keep_phy_intel(struct ncsi_cmd_arg *nca)
  582. {
  583. unsigned char data[NCSI_OEM_INTEL_CMD_KEEP_PHY_LEN];
  584. int ret = 0;
  585. nca->payload = NCSI_OEM_INTEL_CMD_KEEP_PHY_LEN;
  586. memset(data, 0, NCSI_OEM_INTEL_CMD_KEEP_PHY_LEN);
  587. *(unsigned int *)data = ntohl((__force __be32)NCSI_OEM_MFR_INTEL_ID);
  588. data[4] = NCSI_OEM_INTEL_CMD_KEEP_PHY;
  589. /* PHY Link up attribute */
  590. data[6] = 0x1;
  591. nca->data = data;
  592. ret = ncsi_xmit_cmd(nca);
  593. if (ret)
  594. netdev_err(nca->ndp->ndev.dev,
  595. "NCSI: Failed to transmit cmd 0x%x during configure\n",
  596. nca->type);
  597. return ret;
  598. }
  599. #endif
  600. #if IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC)
  601. /* NCSI OEM Command APIs */
  602. static int ncsi_oem_gma_handler_bcm(struct ncsi_cmd_arg *nca)
  603. {
  604. unsigned char data[NCSI_OEM_BCM_CMD_GMA_LEN];
  605. int ret = 0;
  606. nca->payload = NCSI_OEM_BCM_CMD_GMA_LEN;
  607. memset(data, 0, NCSI_OEM_BCM_CMD_GMA_LEN);
  608. *(unsigned int *)data = ntohl((__force __be32)NCSI_OEM_MFR_BCM_ID);
  609. data[5] = NCSI_OEM_BCM_CMD_GMA;
  610. nca->data = data;
  611. ret = ncsi_xmit_cmd(nca);
  612. if (ret)
  613. netdev_err(nca->ndp->ndev.dev,
  614. "NCSI: Failed to transmit cmd 0x%x during configure\n",
  615. nca->type);
  616. return ret;
  617. }
  618. static int ncsi_oem_gma_handler_mlx(struct ncsi_cmd_arg *nca)
  619. {
  620. union {
  621. u8 data_u8[NCSI_OEM_MLX_CMD_GMA_LEN];
  622. u32 data_u32[NCSI_OEM_MLX_CMD_GMA_LEN / sizeof(u32)];
  623. } u;
  624. int ret = 0;
  625. nca->payload = NCSI_OEM_MLX_CMD_GMA_LEN;
  626. memset(&u, 0, sizeof(u));
  627. u.data_u32[0] = ntohl((__force __be32)NCSI_OEM_MFR_MLX_ID);
  628. u.data_u8[5] = NCSI_OEM_MLX_CMD_GMA;
  629. u.data_u8[6] = NCSI_OEM_MLX_CMD_GMA_PARAM;
  630. nca->data = u.data_u8;
  631. ret = ncsi_xmit_cmd(nca);
  632. if (ret)
  633. netdev_err(nca->ndp->ndev.dev,
  634. "NCSI: Failed to transmit cmd 0x%x during configure\n",
  635. nca->type);
  636. return ret;
  637. }
  638. static int ncsi_oem_smaf_mlx(struct ncsi_cmd_arg *nca)
  639. {
  640. union {
  641. u8 data_u8[NCSI_OEM_MLX_CMD_SMAF_LEN];
  642. u32 data_u32[NCSI_OEM_MLX_CMD_SMAF_LEN / sizeof(u32)];
  643. } u;
  644. int ret = 0;
  645. memset(&u, 0, sizeof(u));
  646. u.data_u32[0] = ntohl((__force __be32)NCSI_OEM_MFR_MLX_ID);
  647. u.data_u8[5] = NCSI_OEM_MLX_CMD_SMAF;
  648. u.data_u8[6] = NCSI_OEM_MLX_CMD_SMAF_PARAM;
  649. memcpy(&u.data_u8[MLX_SMAF_MAC_ADDR_OFFSET],
  650. nca->ndp->ndev.dev->dev_addr, ETH_ALEN);
  651. u.data_u8[MLX_SMAF_MED_SUPPORT_OFFSET] =
  652. (MLX_MC_RBT_AVL | MLX_MC_RBT_SUPPORT);
  653. nca->payload = NCSI_OEM_MLX_CMD_SMAF_LEN;
  654. nca->data = u.data_u8;
  655. ret = ncsi_xmit_cmd(nca);
  656. if (ret)
  657. netdev_err(nca->ndp->ndev.dev,
  658. "NCSI: Failed to transmit cmd 0x%x during probe\n",
  659. nca->type);
  660. return ret;
  661. }
  662. static int ncsi_oem_gma_handler_intel(struct ncsi_cmd_arg *nca)
  663. {
  664. unsigned char data[NCSI_OEM_INTEL_CMD_GMA_LEN];
  665. int ret = 0;
  666. nca->payload = NCSI_OEM_INTEL_CMD_GMA_LEN;
  667. memset(data, 0, NCSI_OEM_INTEL_CMD_GMA_LEN);
  668. *(unsigned int *)data = ntohl((__force __be32)NCSI_OEM_MFR_INTEL_ID);
  669. data[4] = NCSI_OEM_INTEL_CMD_GMA;
  670. nca->data = data;
  671. ret = ncsi_xmit_cmd(nca);
  672. if (ret)
  673. netdev_err(nca->ndp->ndev.dev,
  674. "NCSI: Failed to transmit cmd 0x%x during configure\n",
  675. nca->type);
  676. return ret;
  677. }
  678. /* OEM Command handlers initialization */
  679. static struct ncsi_oem_gma_handler {
  680. unsigned int mfr_id;
  681. int (*handler)(struct ncsi_cmd_arg *nca);
  682. } ncsi_oem_gma_handlers[] = {
  683. { NCSI_OEM_MFR_BCM_ID, ncsi_oem_gma_handler_bcm },
  684. { NCSI_OEM_MFR_MLX_ID, ncsi_oem_gma_handler_mlx },
  685. { NCSI_OEM_MFR_INTEL_ID, ncsi_oem_gma_handler_intel }
  686. };
  687. static int ncsi_gma_handler(struct ncsi_cmd_arg *nca, unsigned int mf_id)
  688. {
  689. struct ncsi_oem_gma_handler *nch = NULL;
  690. int i;
  691. /* This function should only be called once, return if flag set */
  692. if (nca->ndp->gma_flag == 1)
  693. return -1;
  694. /* Find gma handler for given manufacturer id */
  695. for (i = 0; i < ARRAY_SIZE(ncsi_oem_gma_handlers); i++) {
  696. if (ncsi_oem_gma_handlers[i].mfr_id == mf_id) {
  697. if (ncsi_oem_gma_handlers[i].handler)
  698. nch = &ncsi_oem_gma_handlers[i];
  699. break;
  700. }
  701. }
  702. if (!nch) {
  703. netdev_err(nca->ndp->ndev.dev,
  704. "NCSI: No GMA handler available for MFR-ID (0x%x)\n",
  705. mf_id);
  706. return -1;
  707. }
  708. /* Get Mac address from NCSI device */
  709. return nch->handler(nca);
  710. }
  711. #endif /* CONFIG_NCSI_OEM_CMD_GET_MAC */
  712. /* Determine if a given channel from the channel_queue should be used for Tx */
  713. static bool ncsi_channel_is_tx(struct ncsi_dev_priv *ndp,
  714. struct ncsi_channel *nc)
  715. {
  716. struct ncsi_channel_mode *ncm;
  717. struct ncsi_channel *channel;
  718. struct ncsi_package *np;
  719. /* Check if any other channel has Tx enabled; a channel may have already
  720. * been configured and removed from the channel queue.
  721. */
  722. NCSI_FOR_EACH_PACKAGE(ndp, np) {
  723. if (!ndp->multi_package && np != nc->package)
  724. continue;
  725. NCSI_FOR_EACH_CHANNEL(np, channel) {
  726. ncm = &channel->modes[NCSI_MODE_TX_ENABLE];
  727. if (ncm->enable)
  728. return false;
  729. }
  730. }
  731. /* This channel is the preferred channel and has link */
  732. list_for_each_entry_rcu(channel, &ndp->channel_queue, link) {
  733. np = channel->package;
  734. if (np->preferred_channel &&
  735. ncsi_channel_has_link(np->preferred_channel)) {
  736. return np->preferred_channel == nc;
  737. }
  738. }
  739. /* This channel has link */
  740. if (ncsi_channel_has_link(nc))
  741. return true;
  742. list_for_each_entry_rcu(channel, &ndp->channel_queue, link)
  743. if (ncsi_channel_has_link(channel))
  744. return false;
  745. /* No other channel has link; default to this one */
  746. return true;
  747. }
  748. /* Change the active Tx channel in a multi-channel setup */
  749. int ncsi_update_tx_channel(struct ncsi_dev_priv *ndp,
  750. struct ncsi_package *package,
  751. struct ncsi_channel *disable,
  752. struct ncsi_channel *enable)
  753. {
  754. struct ncsi_cmd_arg nca;
  755. struct ncsi_channel *nc;
  756. struct ncsi_package *np;
  757. int ret = 0;
  758. if (!package->multi_channel && !ndp->multi_package)
  759. netdev_warn(ndp->ndev.dev,
  760. "NCSI: Trying to update Tx channel in single-channel mode\n");
  761. nca.ndp = ndp;
  762. nca.req_flags = 0;
  763. /* Find current channel with Tx enabled */
  764. NCSI_FOR_EACH_PACKAGE(ndp, np) {
  765. if (disable)
  766. break;
  767. if (!ndp->multi_package && np != package)
  768. continue;
  769. NCSI_FOR_EACH_CHANNEL(np, nc)
  770. if (nc->modes[NCSI_MODE_TX_ENABLE].enable) {
  771. disable = nc;
  772. break;
  773. }
  774. }
  775. /* Find a suitable channel for Tx */
  776. NCSI_FOR_EACH_PACKAGE(ndp, np) {
  777. if (enable)
  778. break;
  779. if (!ndp->multi_package && np != package)
  780. continue;
  781. if (!(ndp->package_whitelist & (0x1 << np->id)))
  782. continue;
  783. if (np->preferred_channel &&
  784. ncsi_channel_has_link(np->preferred_channel)) {
  785. enable = np->preferred_channel;
  786. break;
  787. }
  788. NCSI_FOR_EACH_CHANNEL(np, nc) {
  789. if (!(np->channel_whitelist & 0x1 << nc->id))
  790. continue;
  791. if (nc->state != NCSI_CHANNEL_ACTIVE)
  792. continue;
  793. if (ncsi_channel_has_link(nc)) {
  794. enable = nc;
  795. break;
  796. }
  797. }
  798. }
  799. if (disable == enable)
  800. return -1;
  801. if (!enable)
  802. return -1;
  803. if (disable) {
  804. nca.channel = disable->id;
  805. nca.package = disable->package->id;
  806. nca.type = NCSI_PKT_CMD_DCNT;
  807. ret = ncsi_xmit_cmd(&nca);
  808. if (ret)
  809. netdev_err(ndp->ndev.dev,
  810. "Error %d sending DCNT\n",
  811. ret);
  812. }
  813. netdev_info(ndp->ndev.dev, "NCSI: channel %u enables Tx\n", enable->id);
  814. nca.channel = enable->id;
  815. nca.package = enable->package->id;
  816. nca.type = NCSI_PKT_CMD_ECNT;
  817. ret = ncsi_xmit_cmd(&nca);
  818. if (ret)
  819. netdev_err(ndp->ndev.dev,
  820. "Error %d sending ECNT\n",
  821. ret);
  822. return ret;
  823. }
  824. static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
  825. {
  826. struct ncsi_package *np = ndp->active_package;
  827. struct ncsi_channel *nc = ndp->active_channel;
  828. struct ncsi_channel *hot_nc = NULL;
  829. struct ncsi_dev *nd = &ndp->ndev;
  830. struct net_device *dev = nd->dev;
  831. struct ncsi_cmd_arg nca;
  832. unsigned char index;
  833. unsigned long flags;
  834. int ret;
  835. nca.ndp = ndp;
  836. nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
  837. switch (nd->state) {
  838. case ncsi_dev_state_config:
  839. case ncsi_dev_state_config_sp:
  840. ndp->pending_req_num = 1;
  841. /* Select the specific package */
  842. nca.type = NCSI_PKT_CMD_SP;
  843. if (ndp->flags & NCSI_DEV_HWA)
  844. nca.bytes[0] = 0;
  845. else
  846. nca.bytes[0] = 1;
  847. nca.package = np->id;
  848. nca.channel = NCSI_RESERVED_CHANNEL;
  849. ret = ncsi_xmit_cmd(&nca);
  850. if (ret) {
  851. netdev_err(ndp->ndev.dev,
  852. "NCSI: Failed to transmit CMD_SP\n");
  853. goto error;
  854. }
  855. nd->state = ncsi_dev_state_config_cis;
  856. break;
  857. case ncsi_dev_state_config_cis:
  858. ndp->pending_req_num = 1;
  859. /* Clear initial state */
  860. nca.type = NCSI_PKT_CMD_CIS;
  861. nca.package = np->id;
  862. nca.channel = nc->id;
  863. ret = ncsi_xmit_cmd(&nca);
  864. if (ret) {
  865. netdev_err(ndp->ndev.dev,
  866. "NCSI: Failed to transmit CMD_CIS\n");
  867. goto error;
  868. }
  869. nd->state = ncsi_dev_state_config_oem_gma;
  870. break;
  871. case ncsi_dev_state_config_oem_gma:
  872. nd->state = ncsi_dev_state_config_clear_vids;
  873. ret = -1;
  874. #if IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC)
  875. nca.type = NCSI_PKT_CMD_OEM;
  876. nca.package = np->id;
  877. nca.channel = nc->id;
  878. ndp->pending_req_num = 1;
  879. ret = ncsi_gma_handler(&nca, nc->version.mf_id);
  880. #endif /* CONFIG_NCSI_OEM_CMD_GET_MAC */
  881. if (ret < 0)
  882. schedule_work(&ndp->work);
  883. break;
  884. case ncsi_dev_state_config_clear_vids:
  885. case ncsi_dev_state_config_svf:
  886. case ncsi_dev_state_config_ev:
  887. case ncsi_dev_state_config_sma:
  888. case ncsi_dev_state_config_ebf:
  889. case ncsi_dev_state_config_dgmf:
  890. case ncsi_dev_state_config_ecnt:
  891. case ncsi_dev_state_config_ec:
  892. case ncsi_dev_state_config_ae:
  893. case ncsi_dev_state_config_gls:
  894. ndp->pending_req_num = 1;
  895. nca.package = np->id;
  896. nca.channel = nc->id;
  897. /* Clear any active filters on the channel before setting */
  898. if (nd->state == ncsi_dev_state_config_clear_vids) {
  899. ret = clear_one_vid(ndp, nc, &nca);
  900. if (ret) {
  901. nd->state = ncsi_dev_state_config_svf;
  902. schedule_work(&ndp->work);
  903. break;
  904. }
  905. /* Repeat */
  906. nd->state = ncsi_dev_state_config_clear_vids;
  907. /* Add known VLAN tags to the filter */
  908. } else if (nd->state == ncsi_dev_state_config_svf) {
  909. ret = set_one_vid(ndp, nc, &nca);
  910. if (ret) {
  911. nd->state = ncsi_dev_state_config_ev;
  912. schedule_work(&ndp->work);
  913. break;
  914. }
  915. /* Repeat */
  916. nd->state = ncsi_dev_state_config_svf;
  917. /* Enable/Disable the VLAN filter */
  918. } else if (nd->state == ncsi_dev_state_config_ev) {
  919. if (list_empty(&ndp->vlan_vids)) {
  920. nca.type = NCSI_PKT_CMD_DV;
  921. } else {
  922. nca.type = NCSI_PKT_CMD_EV;
  923. nca.bytes[3] = NCSI_CAP_VLAN_NO;
  924. }
  925. nd->state = ncsi_dev_state_config_sma;
  926. } else if (nd->state == ncsi_dev_state_config_sma) {
  927. /* Use first entry in unicast filter table. Note that
  928. * the MAC filter table starts from entry 1 instead of
  929. * 0.
  930. */
  931. nca.type = NCSI_PKT_CMD_SMA;
  932. for (index = 0; index < 6; index++)
  933. nca.bytes[index] = dev->dev_addr[index];
  934. nca.bytes[6] = 0x1;
  935. nca.bytes[7] = 0x1;
  936. nd->state = ncsi_dev_state_config_ebf;
  937. } else if (nd->state == ncsi_dev_state_config_ebf) {
  938. nca.type = NCSI_PKT_CMD_EBF;
  939. nca.dwords[0] = nc->caps[NCSI_CAP_BC].cap;
  940. /* if multicast global filtering is supported then
  941. * disable it so that all multicast packet will be
  942. * forwarded to management controller
  943. */
  944. if (nc->caps[NCSI_CAP_GENERIC].cap &
  945. NCSI_CAP_GENERIC_MC)
  946. nd->state = ncsi_dev_state_config_dgmf;
  947. else if (ncsi_channel_is_tx(ndp, nc))
  948. nd->state = ncsi_dev_state_config_ecnt;
  949. else
  950. nd->state = ncsi_dev_state_config_ec;
  951. } else if (nd->state == ncsi_dev_state_config_dgmf) {
  952. nca.type = NCSI_PKT_CMD_DGMF;
  953. if (ncsi_channel_is_tx(ndp, nc))
  954. nd->state = ncsi_dev_state_config_ecnt;
  955. else
  956. nd->state = ncsi_dev_state_config_ec;
  957. } else if (nd->state == ncsi_dev_state_config_ecnt) {
  958. if (np->preferred_channel &&
  959. nc != np->preferred_channel)
  960. netdev_info(ndp->ndev.dev,
  961. "NCSI: Tx failed over to channel %u\n",
  962. nc->id);
  963. nca.type = NCSI_PKT_CMD_ECNT;
  964. nd->state = ncsi_dev_state_config_ec;
  965. } else if (nd->state == ncsi_dev_state_config_ec) {
  966. /* Enable AEN if it's supported */
  967. nca.type = NCSI_PKT_CMD_EC;
  968. nd->state = ncsi_dev_state_config_ae;
  969. if (!(nc->caps[NCSI_CAP_AEN].cap & NCSI_CAP_AEN_MASK))
  970. nd->state = ncsi_dev_state_config_gls;
  971. } else if (nd->state == ncsi_dev_state_config_ae) {
  972. nca.type = NCSI_PKT_CMD_AE;
  973. nca.bytes[0] = 0;
  974. nca.dwords[1] = nc->caps[NCSI_CAP_AEN].cap;
  975. nd->state = ncsi_dev_state_config_gls;
  976. } else if (nd->state == ncsi_dev_state_config_gls) {
  977. nca.type = NCSI_PKT_CMD_GLS;
  978. nd->state = ncsi_dev_state_config_done;
  979. }
  980. ret = ncsi_xmit_cmd(&nca);
  981. if (ret) {
  982. netdev_err(ndp->ndev.dev,
  983. "NCSI: Failed to transmit CMD %x\n",
  984. nca.type);
  985. goto error;
  986. }
  987. break;
  988. case ncsi_dev_state_config_done:
  989. netdev_dbg(ndp->ndev.dev, "NCSI: channel %u config done\n",
  990. nc->id);
  991. spin_lock_irqsave(&nc->lock, flags);
  992. nc->state = NCSI_CHANNEL_ACTIVE;
  993. if (ndp->flags & NCSI_DEV_RESET) {
  994. /* A reset event happened during config, start it now */
  995. nc->reconfigure_needed = false;
  996. spin_unlock_irqrestore(&nc->lock, flags);
  997. ncsi_reset_dev(nd);
  998. break;
  999. }
  1000. if (nc->reconfigure_needed) {
  1001. /* This channel's configuration has been updated
  1002. * part-way during the config state - start the
  1003. * channel configuration over
  1004. */
  1005. nc->reconfigure_needed = false;
  1006. nc->state = NCSI_CHANNEL_INACTIVE;
  1007. spin_unlock_irqrestore(&nc->lock, flags);
  1008. spin_lock_irqsave(&ndp->lock, flags);
  1009. list_add_tail_rcu(&nc->link, &ndp->channel_queue);
  1010. spin_unlock_irqrestore(&ndp->lock, flags);
  1011. netdev_dbg(dev, "Dirty NCSI channel state reset\n");
  1012. ncsi_process_next_channel(ndp);
  1013. break;
  1014. }
  1015. if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1) {
  1016. hot_nc = nc;
  1017. } else {
  1018. hot_nc = NULL;
  1019. netdev_dbg(ndp->ndev.dev,
  1020. "NCSI: channel %u link down after config\n",
  1021. nc->id);
  1022. }
  1023. spin_unlock_irqrestore(&nc->lock, flags);
  1024. /* Update the hot channel */
  1025. spin_lock_irqsave(&ndp->lock, flags);
  1026. ndp->hot_channel = hot_nc;
  1027. spin_unlock_irqrestore(&ndp->lock, flags);
  1028. ncsi_start_channel_monitor(nc);
  1029. ncsi_process_next_channel(ndp);
  1030. break;
  1031. default:
  1032. netdev_alert(dev, "Wrong NCSI state 0x%x in config\n",
  1033. nd->state);
  1034. }
  1035. return;
  1036. error:
  1037. ncsi_report_link(ndp, true);
  1038. }
  1039. static int ncsi_choose_active_channel(struct ncsi_dev_priv *ndp)
  1040. {
  1041. struct ncsi_channel *nc, *found, *hot_nc;
  1042. struct ncsi_channel_mode *ncm;
  1043. unsigned long flags, cflags;
  1044. struct ncsi_package *np;
  1045. bool with_link;
  1046. spin_lock_irqsave(&ndp->lock, flags);
  1047. hot_nc = ndp->hot_channel;
  1048. spin_unlock_irqrestore(&ndp->lock, flags);
  1049. /* By default the search is done once an inactive channel with up
  1050. * link is found, unless a preferred channel is set.
  1051. * If multi_package or multi_channel are configured all channels in the
  1052. * whitelist are added to the channel queue.
  1053. */
  1054. found = NULL;
  1055. with_link = false;
  1056. NCSI_FOR_EACH_PACKAGE(ndp, np) {
  1057. if (!(ndp->package_whitelist & (0x1 << np->id)))
  1058. continue;
  1059. NCSI_FOR_EACH_CHANNEL(np, nc) {
  1060. if (!(np->channel_whitelist & (0x1 << nc->id)))
  1061. continue;
  1062. spin_lock_irqsave(&nc->lock, cflags);
  1063. if (!list_empty(&nc->link) ||
  1064. nc->state != NCSI_CHANNEL_INACTIVE) {
  1065. spin_unlock_irqrestore(&nc->lock, cflags);
  1066. continue;
  1067. }
  1068. if (!found)
  1069. found = nc;
  1070. if (nc == hot_nc)
  1071. found = nc;
  1072. ncm = &nc->modes[NCSI_MODE_LINK];
  1073. if (ncm->data[2] & 0x1) {
  1074. found = nc;
  1075. with_link = true;
  1076. }
  1077. /* If multi_channel is enabled configure all valid
  1078. * channels whether or not they currently have link
  1079. * so they will have AENs enabled.
  1080. */
  1081. if (with_link || np->multi_channel) {
  1082. spin_lock_irqsave(&ndp->lock, flags);
  1083. list_add_tail_rcu(&nc->link,
  1084. &ndp->channel_queue);
  1085. spin_unlock_irqrestore(&ndp->lock, flags);
  1086. netdev_dbg(ndp->ndev.dev,
  1087. "NCSI: Channel %u added to queue (link %s)\n",
  1088. nc->id,
  1089. ncm->data[2] & 0x1 ? "up" : "down");
  1090. }
  1091. spin_unlock_irqrestore(&nc->lock, cflags);
  1092. if (with_link && !np->multi_channel)
  1093. break;
  1094. }
  1095. if (with_link && !ndp->multi_package)
  1096. break;
  1097. }
  1098. if (list_empty(&ndp->channel_queue) && found) {
  1099. netdev_info(ndp->ndev.dev,
  1100. "NCSI: No channel with link found, configuring channel %u\n",
  1101. found->id);
  1102. spin_lock_irqsave(&ndp->lock, flags);
  1103. list_add_tail_rcu(&found->link, &ndp->channel_queue);
  1104. spin_unlock_irqrestore(&ndp->lock, flags);
  1105. } else if (!found) {
  1106. netdev_warn(ndp->ndev.dev,
  1107. "NCSI: No channel found to configure!\n");
  1108. ncsi_report_link(ndp, true);
  1109. return -ENODEV;
  1110. }
  1111. return ncsi_process_next_channel(ndp);
  1112. }
  1113. static bool ncsi_check_hwa(struct ncsi_dev_priv *ndp)
  1114. {
  1115. struct ncsi_package *np;
  1116. struct ncsi_channel *nc;
  1117. unsigned int cap;
  1118. bool has_channel = false;
  1119. /* The hardware arbitration is disabled if any one channel
  1120. * doesn't support explicitly.
  1121. */
  1122. NCSI_FOR_EACH_PACKAGE(ndp, np) {
  1123. NCSI_FOR_EACH_CHANNEL(np, nc) {
  1124. has_channel = true;
  1125. cap = nc->caps[NCSI_CAP_GENERIC].cap;
  1126. if (!(cap & NCSI_CAP_GENERIC_HWA) ||
  1127. (cap & NCSI_CAP_GENERIC_HWA_MASK) !=
  1128. NCSI_CAP_GENERIC_HWA_SUPPORT) {
  1129. ndp->flags &= ~NCSI_DEV_HWA;
  1130. return false;
  1131. }
  1132. }
  1133. }
  1134. if (has_channel) {
  1135. ndp->flags |= NCSI_DEV_HWA;
  1136. return true;
  1137. }
  1138. ndp->flags &= ~NCSI_DEV_HWA;
  1139. return false;
  1140. }
  1141. static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
  1142. {
  1143. struct ncsi_dev *nd = &ndp->ndev;
  1144. struct ncsi_package *np;
  1145. struct ncsi_channel *nc;
  1146. struct ncsi_cmd_arg nca;
  1147. unsigned char index;
  1148. int ret;
  1149. nca.ndp = ndp;
  1150. nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
  1151. switch (nd->state) {
  1152. case ncsi_dev_state_probe:
  1153. nd->state = ncsi_dev_state_probe_deselect;
  1154. fallthrough;
  1155. case ncsi_dev_state_probe_deselect:
  1156. ndp->pending_req_num = 8;
  1157. /* Deselect all possible packages */
  1158. nca.type = NCSI_PKT_CMD_DP;
  1159. nca.channel = NCSI_RESERVED_CHANNEL;
  1160. for (index = 0; index < 8; index++) {
  1161. nca.package = index;
  1162. ret = ncsi_xmit_cmd(&nca);
  1163. if (ret)
  1164. goto error;
  1165. }
  1166. nd->state = ncsi_dev_state_probe_package;
  1167. break;
  1168. case ncsi_dev_state_probe_package:
  1169. ndp->pending_req_num = 1;
  1170. nca.type = NCSI_PKT_CMD_SP;
  1171. nca.bytes[0] = 1;
  1172. nca.package = ndp->package_probe_id;
  1173. nca.channel = NCSI_RESERVED_CHANNEL;
  1174. ret = ncsi_xmit_cmd(&nca);
  1175. if (ret)
  1176. goto error;
  1177. nd->state = ncsi_dev_state_probe_channel;
  1178. break;
  1179. case ncsi_dev_state_probe_channel:
  1180. ndp->active_package = ncsi_find_package(ndp,
  1181. ndp->package_probe_id);
  1182. if (!ndp->active_package) {
  1183. /* No response */
  1184. nd->state = ncsi_dev_state_probe_dp;
  1185. schedule_work(&ndp->work);
  1186. break;
  1187. }
  1188. nd->state = ncsi_dev_state_probe_cis;
  1189. if (IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC) &&
  1190. ndp->mlx_multi_host)
  1191. nd->state = ncsi_dev_state_probe_mlx_gma;
  1192. schedule_work(&ndp->work);
  1193. break;
  1194. #if IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC)
  1195. case ncsi_dev_state_probe_mlx_gma:
  1196. ndp->pending_req_num = 1;
  1197. nca.type = NCSI_PKT_CMD_OEM;
  1198. nca.package = ndp->active_package->id;
  1199. nca.channel = 0;
  1200. ret = ncsi_oem_gma_handler_mlx(&nca);
  1201. if (ret)
  1202. goto error;
  1203. nd->state = ncsi_dev_state_probe_mlx_smaf;
  1204. break;
  1205. case ncsi_dev_state_probe_mlx_smaf:
  1206. ndp->pending_req_num = 1;
  1207. nca.type = NCSI_PKT_CMD_OEM;
  1208. nca.package = ndp->active_package->id;
  1209. nca.channel = 0;
  1210. ret = ncsi_oem_smaf_mlx(&nca);
  1211. if (ret)
  1212. goto error;
  1213. nd->state = ncsi_dev_state_probe_cis;
  1214. break;
  1215. #endif /* CONFIG_NCSI_OEM_CMD_GET_MAC */
  1216. case ncsi_dev_state_probe_cis:
  1217. ndp->pending_req_num = NCSI_RESERVED_CHANNEL;
  1218. /* Clear initial state */
  1219. nca.type = NCSI_PKT_CMD_CIS;
  1220. nca.package = ndp->active_package->id;
  1221. for (index = 0; index < NCSI_RESERVED_CHANNEL; index++) {
  1222. nca.channel = index;
  1223. ret = ncsi_xmit_cmd(&nca);
  1224. if (ret)
  1225. goto error;
  1226. }
  1227. nd->state = ncsi_dev_state_probe_gvi;
  1228. if (IS_ENABLED(CONFIG_NCSI_OEM_CMD_KEEP_PHY))
  1229. nd->state = ncsi_dev_state_probe_keep_phy;
  1230. break;
  1231. #if IS_ENABLED(CONFIG_NCSI_OEM_CMD_KEEP_PHY)
  1232. case ncsi_dev_state_probe_keep_phy:
  1233. ndp->pending_req_num = 1;
  1234. nca.type = NCSI_PKT_CMD_OEM;
  1235. nca.package = ndp->active_package->id;
  1236. nca.channel = 0;
  1237. ret = ncsi_oem_keep_phy_intel(&nca);
  1238. if (ret)
  1239. goto error;
  1240. nd->state = ncsi_dev_state_probe_gvi;
  1241. break;
  1242. #endif /* CONFIG_NCSI_OEM_CMD_KEEP_PHY */
  1243. case ncsi_dev_state_probe_gvi:
  1244. case ncsi_dev_state_probe_gc:
  1245. case ncsi_dev_state_probe_gls:
  1246. np = ndp->active_package;
  1247. ndp->pending_req_num = np->channel_num;
  1248. /* Retrieve version, capability or link status */
  1249. if (nd->state == ncsi_dev_state_probe_gvi)
  1250. nca.type = NCSI_PKT_CMD_GVI;
  1251. else if (nd->state == ncsi_dev_state_probe_gc)
  1252. nca.type = NCSI_PKT_CMD_GC;
  1253. else
  1254. nca.type = NCSI_PKT_CMD_GLS;
  1255. nca.package = np->id;
  1256. NCSI_FOR_EACH_CHANNEL(np, nc) {
  1257. nca.channel = nc->id;
  1258. ret = ncsi_xmit_cmd(&nca);
  1259. if (ret)
  1260. goto error;
  1261. }
  1262. if (nd->state == ncsi_dev_state_probe_gvi)
  1263. nd->state = ncsi_dev_state_probe_gc;
  1264. else if (nd->state == ncsi_dev_state_probe_gc)
  1265. nd->state = ncsi_dev_state_probe_gls;
  1266. else
  1267. nd->state = ncsi_dev_state_probe_dp;
  1268. break;
  1269. case ncsi_dev_state_probe_dp:
  1270. ndp->pending_req_num = 1;
  1271. /* Deselect the current package */
  1272. nca.type = NCSI_PKT_CMD_DP;
  1273. nca.package = ndp->package_probe_id;
  1274. nca.channel = NCSI_RESERVED_CHANNEL;
  1275. ret = ncsi_xmit_cmd(&nca);
  1276. if (ret)
  1277. goto error;
  1278. /* Probe next package */
  1279. ndp->package_probe_id++;
  1280. if (ndp->package_probe_id >= 8) {
  1281. /* Probe finished */
  1282. ndp->flags |= NCSI_DEV_PROBED;
  1283. break;
  1284. }
  1285. nd->state = ncsi_dev_state_probe_package;
  1286. ndp->active_package = NULL;
  1287. break;
  1288. default:
  1289. netdev_warn(nd->dev, "Wrong NCSI state 0x%0x in enumeration\n",
  1290. nd->state);
  1291. }
  1292. if (ndp->flags & NCSI_DEV_PROBED) {
  1293. /* Check if all packages have HWA support */
  1294. ncsi_check_hwa(ndp);
  1295. ncsi_choose_active_channel(ndp);
  1296. }
  1297. return;
  1298. error:
  1299. netdev_err(ndp->ndev.dev,
  1300. "NCSI: Failed to transmit cmd 0x%x during probe\n",
  1301. nca.type);
  1302. ncsi_report_link(ndp, true);
  1303. }
  1304. static void ncsi_dev_work(struct work_struct *work)
  1305. {
  1306. struct ncsi_dev_priv *ndp = container_of(work,
  1307. struct ncsi_dev_priv, work);
  1308. struct ncsi_dev *nd = &ndp->ndev;
  1309. switch (nd->state & ncsi_dev_state_major) {
  1310. case ncsi_dev_state_probe:
  1311. ncsi_probe_channel(ndp);
  1312. break;
  1313. case ncsi_dev_state_suspend:
  1314. ncsi_suspend_channel(ndp);
  1315. break;
  1316. case ncsi_dev_state_config:
  1317. ncsi_configure_channel(ndp);
  1318. break;
  1319. default:
  1320. netdev_warn(nd->dev, "Wrong NCSI state 0x%x in workqueue\n",
  1321. nd->state);
  1322. }
  1323. }
  1324. int ncsi_process_next_channel(struct ncsi_dev_priv *ndp)
  1325. {
  1326. struct ncsi_channel *nc;
  1327. int old_state;
  1328. unsigned long flags;
  1329. spin_lock_irqsave(&ndp->lock, flags);
  1330. nc = list_first_or_null_rcu(&ndp->channel_queue,
  1331. struct ncsi_channel, link);
  1332. if (!nc) {
  1333. spin_unlock_irqrestore(&ndp->lock, flags);
  1334. goto out;
  1335. }
  1336. list_del_init(&nc->link);
  1337. spin_unlock_irqrestore(&ndp->lock, flags);
  1338. spin_lock_irqsave(&nc->lock, flags);
  1339. old_state = nc->state;
  1340. nc->state = NCSI_CHANNEL_INVISIBLE;
  1341. spin_unlock_irqrestore(&nc->lock, flags);
  1342. ndp->active_channel = nc;
  1343. ndp->active_package = nc->package;
  1344. switch (old_state) {
  1345. case NCSI_CHANNEL_INACTIVE:
  1346. ndp->ndev.state = ncsi_dev_state_config;
  1347. netdev_dbg(ndp->ndev.dev, "NCSI: configuring channel %u\n",
  1348. nc->id);
  1349. ncsi_configure_channel(ndp);
  1350. break;
  1351. case NCSI_CHANNEL_ACTIVE:
  1352. ndp->ndev.state = ncsi_dev_state_suspend;
  1353. netdev_dbg(ndp->ndev.dev, "NCSI: suspending channel %u\n",
  1354. nc->id);
  1355. ncsi_suspend_channel(ndp);
  1356. break;
  1357. default:
  1358. netdev_err(ndp->ndev.dev, "Invalid state 0x%x on %d:%d\n",
  1359. old_state, nc->package->id, nc->id);
  1360. ncsi_report_link(ndp, false);
  1361. return -EINVAL;
  1362. }
  1363. return 0;
  1364. out:
  1365. ndp->active_channel = NULL;
  1366. ndp->active_package = NULL;
  1367. if (ndp->flags & NCSI_DEV_RESHUFFLE) {
  1368. ndp->flags &= ~NCSI_DEV_RESHUFFLE;
  1369. return ncsi_choose_active_channel(ndp);
  1370. }
  1371. ncsi_report_link(ndp, false);
  1372. return -ENODEV;
  1373. }
  1374. static int ncsi_kick_channels(struct ncsi_dev_priv *ndp)
  1375. {
  1376. struct ncsi_dev *nd = &ndp->ndev;
  1377. struct ncsi_channel *nc;
  1378. struct ncsi_package *np;
  1379. unsigned long flags;
  1380. unsigned int n = 0;
  1381. NCSI_FOR_EACH_PACKAGE(ndp, np) {
  1382. NCSI_FOR_EACH_CHANNEL(np, nc) {
  1383. spin_lock_irqsave(&nc->lock, flags);
  1384. /* Channels may be busy, mark dirty instead of
  1385. * kicking if;
  1386. * a) not ACTIVE (configured)
  1387. * b) in the channel_queue (to be configured)
  1388. * c) it's ndev is in the config state
  1389. */
  1390. if (nc->state != NCSI_CHANNEL_ACTIVE) {
  1391. if ((ndp->ndev.state & 0xff00) ==
  1392. ncsi_dev_state_config ||
  1393. !list_empty(&nc->link)) {
  1394. netdev_dbg(nd->dev,
  1395. "NCSI: channel %p marked dirty\n",
  1396. nc);
  1397. nc->reconfigure_needed = true;
  1398. }
  1399. spin_unlock_irqrestore(&nc->lock, flags);
  1400. continue;
  1401. }
  1402. spin_unlock_irqrestore(&nc->lock, flags);
  1403. ncsi_stop_channel_monitor(nc);
  1404. spin_lock_irqsave(&nc->lock, flags);
  1405. nc->state = NCSI_CHANNEL_INACTIVE;
  1406. spin_unlock_irqrestore(&nc->lock, flags);
  1407. spin_lock_irqsave(&ndp->lock, flags);
  1408. list_add_tail_rcu(&nc->link, &ndp->channel_queue);
  1409. spin_unlock_irqrestore(&ndp->lock, flags);
  1410. netdev_dbg(nd->dev, "NCSI: kicked channel %p\n", nc);
  1411. n++;
  1412. }
  1413. }
  1414. return n;
  1415. }
  1416. int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
  1417. {
  1418. struct ncsi_dev_priv *ndp;
  1419. unsigned int n_vids = 0;
  1420. struct vlan_vid *vlan;
  1421. struct ncsi_dev *nd;
  1422. bool found = false;
  1423. if (vid == 0)
  1424. return 0;
  1425. nd = ncsi_find_dev(dev);
  1426. if (!nd) {
  1427. netdev_warn(dev, "NCSI: No net_device?\n");
  1428. return 0;
  1429. }
  1430. ndp = TO_NCSI_DEV_PRIV(nd);
  1431. /* Add the VLAN id to our internal list */
  1432. list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) {
  1433. n_vids++;
  1434. if (vlan->vid == vid) {
  1435. netdev_dbg(dev, "NCSI: vid %u already registered\n",
  1436. vid);
  1437. return 0;
  1438. }
  1439. }
  1440. if (n_vids >= NCSI_MAX_VLAN_VIDS) {
  1441. netdev_warn(dev,
  1442. "tried to add vlan id %u but NCSI max already registered (%u)\n",
  1443. vid, NCSI_MAX_VLAN_VIDS);
  1444. return -ENOSPC;
  1445. }
  1446. vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
  1447. if (!vlan)
  1448. return -ENOMEM;
  1449. vlan->proto = proto;
  1450. vlan->vid = vid;
  1451. list_add_rcu(&vlan->list, &ndp->vlan_vids);
  1452. netdev_dbg(dev, "NCSI: Added new vid %u\n", vid);
  1453. found = ncsi_kick_channels(ndp) != 0;
  1454. return found ? ncsi_process_next_channel(ndp) : 0;
  1455. }
  1456. EXPORT_SYMBOL_GPL(ncsi_vlan_rx_add_vid);
  1457. int ncsi_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
  1458. {
  1459. struct vlan_vid *vlan, *tmp;
  1460. struct ncsi_dev_priv *ndp;
  1461. struct ncsi_dev *nd;
  1462. bool found = false;
  1463. if (vid == 0)
  1464. return 0;
  1465. nd = ncsi_find_dev(dev);
  1466. if (!nd) {
  1467. netdev_warn(dev, "NCSI: no net_device?\n");
  1468. return 0;
  1469. }
  1470. ndp = TO_NCSI_DEV_PRIV(nd);
  1471. /* Remove the VLAN id from our internal list */
  1472. list_for_each_entry_safe(vlan, tmp, &ndp->vlan_vids, list)
  1473. if (vlan->vid == vid) {
  1474. netdev_dbg(dev, "NCSI: vid %u found, removing\n", vid);
  1475. list_del_rcu(&vlan->list);
  1476. found = true;
  1477. kfree(vlan);
  1478. }
  1479. if (!found) {
  1480. netdev_err(dev, "NCSI: vid %u wasn't registered!\n", vid);
  1481. return -EINVAL;
  1482. }
  1483. found = ncsi_kick_channels(ndp) != 0;
  1484. return found ? ncsi_process_next_channel(ndp) : 0;
  1485. }
  1486. EXPORT_SYMBOL_GPL(ncsi_vlan_rx_kill_vid);
  1487. struct ncsi_dev *ncsi_register_dev(struct net_device *dev,
  1488. void (*handler)(struct ncsi_dev *ndev))
  1489. {
  1490. struct ncsi_dev_priv *ndp;
  1491. struct ncsi_dev *nd;
  1492. struct platform_device *pdev;
  1493. struct device_node *np;
  1494. unsigned long flags;
  1495. int i;
  1496. /* Check if the device has been registered or not */
  1497. nd = ncsi_find_dev(dev);
  1498. if (nd)
  1499. return nd;
  1500. /* Create NCSI device */
  1501. ndp = kzalloc(sizeof(*ndp), GFP_ATOMIC);
  1502. if (!ndp)
  1503. return NULL;
  1504. nd = &ndp->ndev;
  1505. nd->state = ncsi_dev_state_registered;
  1506. nd->dev = dev;
  1507. nd->handler = handler;
  1508. ndp->pending_req_num = 0;
  1509. INIT_LIST_HEAD(&ndp->channel_queue);
  1510. INIT_LIST_HEAD(&ndp->vlan_vids);
  1511. INIT_WORK(&ndp->work, ncsi_dev_work);
  1512. ndp->package_whitelist = UINT_MAX;
  1513. /* Initialize private NCSI device */
  1514. spin_lock_init(&ndp->lock);
  1515. INIT_LIST_HEAD(&ndp->packages);
  1516. ndp->request_id = NCSI_REQ_START_IDX;
  1517. for (i = 0; i < ARRAY_SIZE(ndp->requests); i++) {
  1518. ndp->requests[i].id = i;
  1519. ndp->requests[i].ndp = ndp;
  1520. timer_setup(&ndp->requests[i].timer, ncsi_request_timeout, 0);
  1521. }
  1522. spin_lock_irqsave(&ncsi_dev_lock, flags);
  1523. list_add_tail_rcu(&ndp->node, &ncsi_dev_list);
  1524. spin_unlock_irqrestore(&ncsi_dev_lock, flags);
  1525. /* Register NCSI packet Rx handler */
  1526. ndp->ptype.type = cpu_to_be16(ETH_P_NCSI);
  1527. ndp->ptype.func = ncsi_rcv_rsp;
  1528. ndp->ptype.dev = dev;
  1529. dev_add_pack(&ndp->ptype);
  1530. pdev = to_platform_device(dev->dev.parent);
  1531. if (pdev) {
  1532. np = pdev->dev.of_node;
  1533. if (np && (of_get_property(np, "mellanox,multi-host", NULL) ||
  1534. of_get_property(np, "mlx,multi-host", NULL)))
  1535. ndp->mlx_multi_host = true;
  1536. }
  1537. return nd;
  1538. }
  1539. EXPORT_SYMBOL_GPL(ncsi_register_dev);
  1540. int ncsi_start_dev(struct ncsi_dev *nd)
  1541. {
  1542. struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
  1543. if (nd->state != ncsi_dev_state_registered &&
  1544. nd->state != ncsi_dev_state_functional)
  1545. return -ENOTTY;
  1546. if (!(ndp->flags & NCSI_DEV_PROBED)) {
  1547. ndp->package_probe_id = 0;
  1548. nd->state = ncsi_dev_state_probe;
  1549. schedule_work(&ndp->work);
  1550. return 0;
  1551. }
  1552. return ncsi_reset_dev(nd);
  1553. }
  1554. EXPORT_SYMBOL_GPL(ncsi_start_dev);
  1555. void ncsi_stop_dev(struct ncsi_dev *nd)
  1556. {
  1557. struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
  1558. struct ncsi_package *np;
  1559. struct ncsi_channel *nc;
  1560. bool chained;
  1561. int old_state;
  1562. unsigned long flags;
  1563. /* Stop the channel monitor on any active channels. Don't reset the
  1564. * channel state so we know which were active when ncsi_start_dev()
  1565. * is next called.
  1566. */
  1567. NCSI_FOR_EACH_PACKAGE(ndp, np) {
  1568. NCSI_FOR_EACH_CHANNEL(np, nc) {
  1569. ncsi_stop_channel_monitor(nc);
  1570. spin_lock_irqsave(&nc->lock, flags);
  1571. chained = !list_empty(&nc->link);
  1572. old_state = nc->state;
  1573. spin_unlock_irqrestore(&nc->lock, flags);
  1574. WARN_ON_ONCE(chained ||
  1575. old_state == NCSI_CHANNEL_INVISIBLE);
  1576. }
  1577. }
  1578. netdev_dbg(ndp->ndev.dev, "NCSI: Stopping device\n");
  1579. ncsi_report_link(ndp, true);
  1580. }
  1581. EXPORT_SYMBOL_GPL(ncsi_stop_dev);
  1582. int ncsi_reset_dev(struct ncsi_dev *nd)
  1583. {
  1584. struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
  1585. struct ncsi_channel *nc, *active, *tmp;
  1586. struct ncsi_package *np;
  1587. unsigned long flags;
  1588. spin_lock_irqsave(&ndp->lock, flags);
  1589. if (!(ndp->flags & NCSI_DEV_RESET)) {
  1590. /* Haven't been called yet, check states */
  1591. switch (nd->state & ncsi_dev_state_major) {
  1592. case ncsi_dev_state_registered:
  1593. case ncsi_dev_state_probe:
  1594. /* Not even probed yet - do nothing */
  1595. spin_unlock_irqrestore(&ndp->lock, flags);
  1596. return 0;
  1597. case ncsi_dev_state_suspend:
  1598. case ncsi_dev_state_config:
  1599. /* Wait for the channel to finish its suspend/config
  1600. * operation; once it finishes it will check for
  1601. * NCSI_DEV_RESET and reset the state.
  1602. */
  1603. ndp->flags |= NCSI_DEV_RESET;
  1604. spin_unlock_irqrestore(&ndp->lock, flags);
  1605. return 0;
  1606. }
  1607. } else {
  1608. switch (nd->state) {
  1609. case ncsi_dev_state_suspend_done:
  1610. case ncsi_dev_state_config_done:
  1611. case ncsi_dev_state_functional:
  1612. /* Ok */
  1613. break;
  1614. default:
  1615. /* Current reset operation happening */
  1616. spin_unlock_irqrestore(&ndp->lock, flags);
  1617. return 0;
  1618. }
  1619. }
  1620. if (!list_empty(&ndp->channel_queue)) {
  1621. /* Clear any channel queue we may have interrupted */
  1622. list_for_each_entry_safe(nc, tmp, &ndp->channel_queue, link)
  1623. list_del_init(&nc->link);
  1624. }
  1625. spin_unlock_irqrestore(&ndp->lock, flags);
  1626. active = NULL;
  1627. NCSI_FOR_EACH_PACKAGE(ndp, np) {
  1628. NCSI_FOR_EACH_CHANNEL(np, nc) {
  1629. spin_lock_irqsave(&nc->lock, flags);
  1630. if (nc->state == NCSI_CHANNEL_ACTIVE) {
  1631. active = nc;
  1632. nc->state = NCSI_CHANNEL_INVISIBLE;
  1633. spin_unlock_irqrestore(&nc->lock, flags);
  1634. ncsi_stop_channel_monitor(nc);
  1635. break;
  1636. }
  1637. spin_unlock_irqrestore(&nc->lock, flags);
  1638. }
  1639. if (active)
  1640. break;
  1641. }
  1642. if (!active) {
  1643. /* Done */
  1644. spin_lock_irqsave(&ndp->lock, flags);
  1645. ndp->flags &= ~NCSI_DEV_RESET;
  1646. spin_unlock_irqrestore(&ndp->lock, flags);
  1647. return ncsi_choose_active_channel(ndp);
  1648. }
  1649. spin_lock_irqsave(&ndp->lock, flags);
  1650. ndp->flags |= NCSI_DEV_RESET;
  1651. ndp->active_channel = active;
  1652. ndp->active_package = active->package;
  1653. spin_unlock_irqrestore(&ndp->lock, flags);
  1654. nd->state = ncsi_dev_state_suspend;
  1655. schedule_work(&ndp->work);
  1656. return 0;
  1657. }
  1658. void ncsi_unregister_dev(struct ncsi_dev *nd)
  1659. {
  1660. struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
  1661. struct ncsi_package *np, *tmp;
  1662. unsigned long flags;
  1663. dev_remove_pack(&ndp->ptype);
  1664. list_for_each_entry_safe(np, tmp, &ndp->packages, node)
  1665. ncsi_remove_package(np);
  1666. spin_lock_irqsave(&ncsi_dev_lock, flags);
  1667. list_del_rcu(&ndp->node);
  1668. spin_unlock_irqrestore(&ncsi_dev_lock, flags);
  1669. kfree(ndp);
  1670. }
  1671. EXPORT_SYMBOL_GPL(ncsi_unregister_dev);