fjes_main.c 37 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * FUJITSU Extended Socket Network Device driver
  4. * Copyright (c) 2015 FUJITSU LIMITED
  5. */
  6. #include <linux/module.h>
  7. #include <linux/types.h>
  8. #include <linux/nls.h>
  9. #include <linux/platform_device.h>
  10. #include <linux/netdevice.h>
  11. #include <linux/interrupt.h>
  12. #include "fjes.h"
  13. #include "fjes_trace.h"
  14. #define MAJ 1
  15. #define MIN 2
  16. #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN)
  17. #define DRV_NAME "fjes"
  18. char fjes_driver_name[] = DRV_NAME;
  19. char fjes_driver_version[] = DRV_VERSION;
  20. static const char fjes_driver_string[] =
  21. "FUJITSU Extended Socket Network Device Driver";
  22. static const char fjes_copyright[] =
  23. "Copyright (c) 2015 FUJITSU LIMITED";
  24. MODULE_AUTHOR("Taku Izumi <[email protected]>");
  25. MODULE_DESCRIPTION("FUJITSU Extended Socket Network Device Driver");
  26. MODULE_LICENSE("GPL");
  27. MODULE_VERSION(DRV_VERSION);
  28. #define ACPI_MOTHERBOARD_RESOURCE_HID "PNP0C02"
  29. static const struct acpi_device_id fjes_acpi_ids[] = {
  30. {ACPI_MOTHERBOARD_RESOURCE_HID, 0},
  31. {"", 0},
  32. };
  33. MODULE_DEVICE_TABLE(acpi, fjes_acpi_ids);
  34. static bool is_extended_socket_device(struct acpi_device *device)
  35. {
  36. struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
  37. char str_buf[sizeof(FJES_ACPI_SYMBOL) + 1];
  38. union acpi_object *str;
  39. acpi_status status;
  40. int result;
  41. status = acpi_evaluate_object(device->handle, "_STR", NULL, &buffer);
  42. if (ACPI_FAILURE(status))
  43. return false;
  44. str = buffer.pointer;
  45. result = utf16s_to_utf8s((wchar_t *)str->string.pointer,
  46. str->string.length, UTF16_LITTLE_ENDIAN,
  47. str_buf, sizeof(str_buf) - 1);
  48. str_buf[result] = 0;
  49. if (strncmp(FJES_ACPI_SYMBOL, str_buf, strlen(FJES_ACPI_SYMBOL)) != 0) {
  50. kfree(buffer.pointer);
  51. return false;
  52. }
  53. kfree(buffer.pointer);
  54. return true;
  55. }
  56. static int acpi_check_extended_socket_status(struct acpi_device *device)
  57. {
  58. unsigned long long sta;
  59. acpi_status status;
  60. status = acpi_evaluate_integer(device->handle, "_STA", NULL, &sta);
  61. if (ACPI_FAILURE(status))
  62. return -ENODEV;
  63. if (!((sta & ACPI_STA_DEVICE_PRESENT) &&
  64. (sta & ACPI_STA_DEVICE_ENABLED) &&
  65. (sta & ACPI_STA_DEVICE_UI) &&
  66. (sta & ACPI_STA_DEVICE_FUNCTIONING)))
  67. return -ENODEV;
  68. return 0;
  69. }
  70. static acpi_status
  71. fjes_get_acpi_resource(struct acpi_resource *acpi_res, void *data)
  72. {
  73. struct acpi_resource_address32 *addr;
  74. struct acpi_resource_irq *irq;
  75. struct resource *res = data;
  76. switch (acpi_res->type) {
  77. case ACPI_RESOURCE_TYPE_ADDRESS32:
  78. addr = &acpi_res->data.address32;
  79. res[0].start = addr->address.minimum;
  80. res[0].end = addr->address.minimum +
  81. addr->address.address_length - 1;
  82. break;
  83. case ACPI_RESOURCE_TYPE_IRQ:
  84. irq = &acpi_res->data.irq;
  85. if (irq->interrupt_count != 1)
  86. return AE_ERROR;
  87. res[1].start = irq->interrupts[0];
  88. res[1].end = irq->interrupts[0];
  89. break;
  90. default:
  91. break;
  92. }
  93. return AE_OK;
  94. }
  95. static struct resource fjes_resource[] = {
  96. DEFINE_RES_MEM(0, 1),
  97. DEFINE_RES_IRQ(0)
  98. };
  99. static int fjes_acpi_add(struct acpi_device *device)
  100. {
  101. struct platform_device *plat_dev;
  102. acpi_status status;
  103. if (!is_extended_socket_device(device))
  104. return -ENODEV;
  105. if (acpi_check_extended_socket_status(device))
  106. return -ENODEV;
  107. status = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
  108. fjes_get_acpi_resource, fjes_resource);
  109. if (ACPI_FAILURE(status))
  110. return -ENODEV;
  111. /* create platform_device */
  112. plat_dev = platform_device_register_simple(DRV_NAME, 0, fjes_resource,
  113. ARRAY_SIZE(fjes_resource));
  114. if (IS_ERR(plat_dev))
  115. return PTR_ERR(plat_dev);
  116. device->driver_data = plat_dev;
  117. return 0;
  118. }
  119. static int fjes_acpi_remove(struct acpi_device *device)
  120. {
  121. struct platform_device *plat_dev;
  122. plat_dev = (struct platform_device *)acpi_driver_data(device);
  123. platform_device_unregister(plat_dev);
  124. return 0;
  125. }
  126. static struct acpi_driver fjes_acpi_driver = {
  127. .name = DRV_NAME,
  128. .class = DRV_NAME,
  129. .owner = THIS_MODULE,
  130. .ids = fjes_acpi_ids,
  131. .ops = {
  132. .add = fjes_acpi_add,
  133. .remove = fjes_acpi_remove,
  134. },
  135. };
  136. static int fjes_setup_resources(struct fjes_adapter *adapter)
  137. {
  138. struct net_device *netdev = adapter->netdev;
  139. struct ep_share_mem_info *buf_pair;
  140. struct fjes_hw *hw = &adapter->hw;
  141. unsigned long flags;
  142. int result;
  143. int epidx;
  144. mutex_lock(&hw->hw_info.lock);
  145. result = fjes_hw_request_info(hw);
  146. switch (result) {
  147. case 0:
  148. for (epidx = 0; epidx < hw->max_epid; epidx++) {
  149. hw->ep_shm_info[epidx].es_status =
  150. hw->hw_info.res_buf->info.info[epidx].es_status;
  151. hw->ep_shm_info[epidx].zone =
  152. hw->hw_info.res_buf->info.info[epidx].zone;
  153. }
  154. break;
  155. default:
  156. case -ENOMSG:
  157. case -EBUSY:
  158. adapter->force_reset = true;
  159. mutex_unlock(&hw->hw_info.lock);
  160. return result;
  161. }
  162. mutex_unlock(&hw->hw_info.lock);
  163. for (epidx = 0; epidx < (hw->max_epid); epidx++) {
  164. if ((epidx != hw->my_epid) &&
  165. (hw->ep_shm_info[epidx].es_status ==
  166. FJES_ZONING_STATUS_ENABLE)) {
  167. fjes_hw_raise_interrupt(hw, epidx,
  168. REG_ICTL_MASK_INFO_UPDATE);
  169. hw->ep_shm_info[epidx].ep_stats
  170. .send_intr_zoneupdate += 1;
  171. }
  172. }
  173. msleep(FJES_OPEN_ZONE_UPDATE_WAIT * hw->max_epid);
  174. for (epidx = 0; epidx < (hw->max_epid); epidx++) {
  175. if (epidx == hw->my_epid)
  176. continue;
  177. buf_pair = &hw->ep_shm_info[epidx];
  178. spin_lock_irqsave(&hw->rx_status_lock, flags);
  179. fjes_hw_setup_epbuf(&buf_pair->tx, netdev->dev_addr,
  180. netdev->mtu);
  181. spin_unlock_irqrestore(&hw->rx_status_lock, flags);
  182. if (fjes_hw_epid_is_same_zone(hw, epidx)) {
  183. mutex_lock(&hw->hw_info.lock);
  184. result =
  185. fjes_hw_register_buff_addr(hw, epidx, buf_pair);
  186. mutex_unlock(&hw->hw_info.lock);
  187. switch (result) {
  188. case 0:
  189. break;
  190. case -ENOMSG:
  191. case -EBUSY:
  192. default:
  193. adapter->force_reset = true;
  194. return result;
  195. }
  196. hw->ep_shm_info[epidx].ep_stats
  197. .com_regist_buf_exec += 1;
  198. }
  199. }
  200. return 0;
  201. }
  202. static void fjes_rx_irq(struct fjes_adapter *adapter, int src_epid)
  203. {
  204. struct fjes_hw *hw = &adapter->hw;
  205. fjes_hw_set_irqmask(hw, REG_ICTL_MASK_RX_DATA, true);
  206. adapter->unset_rx_last = true;
  207. napi_schedule(&adapter->napi);
  208. }
  209. static void fjes_stop_req_irq(struct fjes_adapter *adapter, int src_epid)
  210. {
  211. struct fjes_hw *hw = &adapter->hw;
  212. enum ep_partner_status status;
  213. unsigned long flags;
  214. set_bit(src_epid, &hw->hw_info.buffer_unshare_reserve_bit);
  215. status = fjes_hw_get_partner_ep_status(hw, src_epid);
  216. trace_fjes_stop_req_irq_pre(hw, src_epid, status);
  217. switch (status) {
  218. case EP_PARTNER_WAITING:
  219. spin_lock_irqsave(&hw->rx_status_lock, flags);
  220. hw->ep_shm_info[src_epid].tx.info->v1i.rx_status |=
  221. FJES_RX_STOP_REQ_DONE;
  222. spin_unlock_irqrestore(&hw->rx_status_lock, flags);
  223. clear_bit(src_epid, &hw->txrx_stop_req_bit);
  224. fallthrough;
  225. case EP_PARTNER_UNSHARE:
  226. case EP_PARTNER_COMPLETE:
  227. default:
  228. set_bit(src_epid, &adapter->unshare_watch_bitmask);
  229. if (!work_pending(&adapter->unshare_watch_task))
  230. queue_work(adapter->control_wq,
  231. &adapter->unshare_watch_task);
  232. break;
  233. case EP_PARTNER_SHARED:
  234. set_bit(src_epid, &hw->epstop_req_bit);
  235. if (!work_pending(&hw->epstop_task))
  236. queue_work(adapter->control_wq, &hw->epstop_task);
  237. break;
  238. }
  239. trace_fjes_stop_req_irq_post(hw, src_epid);
  240. }
  241. static void fjes_txrx_stop_req_irq(struct fjes_adapter *adapter,
  242. int src_epid)
  243. {
  244. struct fjes_hw *hw = &adapter->hw;
  245. enum ep_partner_status status;
  246. unsigned long flags;
  247. status = fjes_hw_get_partner_ep_status(hw, src_epid);
  248. trace_fjes_txrx_stop_req_irq_pre(hw, src_epid, status);
  249. switch (status) {
  250. case EP_PARTNER_UNSHARE:
  251. case EP_PARTNER_COMPLETE:
  252. default:
  253. break;
  254. case EP_PARTNER_WAITING:
  255. if (src_epid < hw->my_epid) {
  256. spin_lock_irqsave(&hw->rx_status_lock, flags);
  257. hw->ep_shm_info[src_epid].tx.info->v1i.rx_status |=
  258. FJES_RX_STOP_REQ_DONE;
  259. spin_unlock_irqrestore(&hw->rx_status_lock, flags);
  260. clear_bit(src_epid, &hw->txrx_stop_req_bit);
  261. set_bit(src_epid, &adapter->unshare_watch_bitmask);
  262. if (!work_pending(&adapter->unshare_watch_task))
  263. queue_work(adapter->control_wq,
  264. &adapter->unshare_watch_task);
  265. }
  266. break;
  267. case EP_PARTNER_SHARED:
  268. if (hw->ep_shm_info[src_epid].rx.info->v1i.rx_status &
  269. FJES_RX_STOP_REQ_REQUEST) {
  270. set_bit(src_epid, &hw->epstop_req_bit);
  271. if (!work_pending(&hw->epstop_task))
  272. queue_work(adapter->control_wq,
  273. &hw->epstop_task);
  274. }
  275. break;
  276. }
  277. trace_fjes_txrx_stop_req_irq_post(hw, src_epid);
  278. }
  279. static void fjes_update_zone_irq(struct fjes_adapter *adapter,
  280. int src_epid)
  281. {
  282. struct fjes_hw *hw = &adapter->hw;
  283. if (!work_pending(&hw->update_zone_task))
  284. queue_work(adapter->control_wq, &hw->update_zone_task);
  285. }
  286. static irqreturn_t fjes_intr(int irq, void *data)
  287. {
  288. struct fjes_adapter *adapter = data;
  289. struct fjes_hw *hw = &adapter->hw;
  290. irqreturn_t ret;
  291. u32 icr;
  292. icr = fjes_hw_capture_interrupt_status(hw);
  293. if (icr & REG_IS_MASK_IS_ASSERT) {
  294. if (icr & REG_ICTL_MASK_RX_DATA) {
  295. fjes_rx_irq(adapter, icr & REG_IS_MASK_EPID);
  296. hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
  297. .recv_intr_rx += 1;
  298. }
  299. if (icr & REG_ICTL_MASK_DEV_STOP_REQ) {
  300. fjes_stop_req_irq(adapter, icr & REG_IS_MASK_EPID);
  301. hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
  302. .recv_intr_stop += 1;
  303. }
  304. if (icr & REG_ICTL_MASK_TXRX_STOP_REQ) {
  305. fjes_txrx_stop_req_irq(adapter, icr & REG_IS_MASK_EPID);
  306. hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
  307. .recv_intr_unshare += 1;
  308. }
  309. if (icr & REG_ICTL_MASK_TXRX_STOP_DONE)
  310. fjes_hw_set_irqmask(hw,
  311. REG_ICTL_MASK_TXRX_STOP_DONE, true);
  312. if (icr & REG_ICTL_MASK_INFO_UPDATE) {
  313. fjes_update_zone_irq(adapter, icr & REG_IS_MASK_EPID);
  314. hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
  315. .recv_intr_zoneupdate += 1;
  316. }
  317. ret = IRQ_HANDLED;
  318. } else {
  319. ret = IRQ_NONE;
  320. }
  321. return ret;
  322. }
  323. static int fjes_request_irq(struct fjes_adapter *adapter)
  324. {
  325. struct net_device *netdev = adapter->netdev;
  326. int result = -1;
  327. adapter->interrupt_watch_enable = true;
  328. if (!delayed_work_pending(&adapter->interrupt_watch_task)) {
  329. queue_delayed_work(adapter->control_wq,
  330. &adapter->interrupt_watch_task,
  331. FJES_IRQ_WATCH_DELAY);
  332. }
  333. if (!adapter->irq_registered) {
  334. result = request_irq(adapter->hw.hw_res.irq, fjes_intr,
  335. IRQF_SHARED, netdev->name, adapter);
  336. if (result)
  337. adapter->irq_registered = false;
  338. else
  339. adapter->irq_registered = true;
  340. }
  341. return result;
  342. }
  343. static void fjes_free_irq(struct fjes_adapter *adapter)
  344. {
  345. struct fjes_hw *hw = &adapter->hw;
  346. adapter->interrupt_watch_enable = false;
  347. cancel_delayed_work_sync(&adapter->interrupt_watch_task);
  348. fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, true);
  349. if (adapter->irq_registered) {
  350. free_irq(adapter->hw.hw_res.irq, adapter);
  351. adapter->irq_registered = false;
  352. }
  353. }
  354. static void fjes_free_resources(struct fjes_adapter *adapter)
  355. {
  356. struct net_device *netdev = adapter->netdev;
  357. struct fjes_device_command_param param;
  358. struct ep_share_mem_info *buf_pair;
  359. struct fjes_hw *hw = &adapter->hw;
  360. bool reset_flag = false;
  361. unsigned long flags;
  362. int result;
  363. int epidx;
  364. for (epidx = 0; epidx < hw->max_epid; epidx++) {
  365. if (epidx == hw->my_epid)
  366. continue;
  367. mutex_lock(&hw->hw_info.lock);
  368. result = fjes_hw_unregister_buff_addr(hw, epidx);
  369. mutex_unlock(&hw->hw_info.lock);
  370. hw->ep_shm_info[epidx].ep_stats.com_unregist_buf_exec += 1;
  371. if (result)
  372. reset_flag = true;
  373. buf_pair = &hw->ep_shm_info[epidx];
  374. spin_lock_irqsave(&hw->rx_status_lock, flags);
  375. fjes_hw_setup_epbuf(&buf_pair->tx,
  376. netdev->dev_addr, netdev->mtu);
  377. spin_unlock_irqrestore(&hw->rx_status_lock, flags);
  378. clear_bit(epidx, &hw->txrx_stop_req_bit);
  379. }
  380. if (reset_flag || adapter->force_reset) {
  381. result = fjes_hw_reset(hw);
  382. adapter->force_reset = false;
  383. if (result)
  384. adapter->open_guard = true;
  385. hw->hw_info.buffer_share_bit = 0;
  386. memset((void *)&param, 0, sizeof(param));
  387. param.req_len = hw->hw_info.req_buf_size;
  388. param.req_start = __pa(hw->hw_info.req_buf);
  389. param.res_len = hw->hw_info.res_buf_size;
  390. param.res_start = __pa(hw->hw_info.res_buf);
  391. param.share_start = __pa(hw->hw_info.share->ep_status);
  392. fjes_hw_init_command_registers(hw, &param);
  393. }
  394. }
  395. /* fjes_open - Called when a network interface is made active */
  396. static int fjes_open(struct net_device *netdev)
  397. {
  398. struct fjes_adapter *adapter = netdev_priv(netdev);
  399. struct fjes_hw *hw = &adapter->hw;
  400. int result;
  401. if (adapter->open_guard)
  402. return -ENXIO;
  403. result = fjes_setup_resources(adapter);
  404. if (result)
  405. goto err_setup_res;
  406. hw->txrx_stop_req_bit = 0;
  407. hw->epstop_req_bit = 0;
  408. napi_enable(&adapter->napi);
  409. fjes_hw_capture_interrupt_status(hw);
  410. result = fjes_request_irq(adapter);
  411. if (result)
  412. goto err_req_irq;
  413. fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, false);
  414. netif_tx_start_all_queues(netdev);
  415. netif_carrier_on(netdev);
  416. return 0;
  417. err_req_irq:
  418. fjes_free_irq(adapter);
  419. napi_disable(&adapter->napi);
  420. err_setup_res:
  421. fjes_free_resources(adapter);
  422. return result;
  423. }
  424. /* fjes_close - Disables a network interface */
  425. static int fjes_close(struct net_device *netdev)
  426. {
  427. struct fjes_adapter *adapter = netdev_priv(netdev);
  428. struct fjes_hw *hw = &adapter->hw;
  429. unsigned long flags;
  430. int epidx;
  431. netif_tx_stop_all_queues(netdev);
  432. netif_carrier_off(netdev);
  433. fjes_hw_raise_epstop(hw);
  434. napi_disable(&adapter->napi);
  435. spin_lock_irqsave(&hw->rx_status_lock, flags);
  436. for (epidx = 0; epidx < hw->max_epid; epidx++) {
  437. if (epidx == hw->my_epid)
  438. continue;
  439. if (fjes_hw_get_partner_ep_status(hw, epidx) ==
  440. EP_PARTNER_SHARED)
  441. adapter->hw.ep_shm_info[epidx]
  442. .tx.info->v1i.rx_status &=
  443. ~FJES_RX_POLL_WORK;
  444. }
  445. spin_unlock_irqrestore(&hw->rx_status_lock, flags);
  446. fjes_free_irq(adapter);
  447. cancel_delayed_work_sync(&adapter->interrupt_watch_task);
  448. cancel_work_sync(&adapter->unshare_watch_task);
  449. adapter->unshare_watch_bitmask = 0;
  450. cancel_work_sync(&adapter->raise_intr_rxdata_task);
  451. cancel_work_sync(&adapter->tx_stall_task);
  452. cancel_work_sync(&hw->update_zone_task);
  453. cancel_work_sync(&hw->epstop_task);
  454. fjes_hw_wait_epstop(hw);
  455. fjes_free_resources(adapter);
  456. return 0;
  457. }
  458. static int fjes_tx_send(struct fjes_adapter *adapter, int dest,
  459. void *data, size_t len)
  460. {
  461. int retval;
  462. retval = fjes_hw_epbuf_tx_pkt_send(&adapter->hw.ep_shm_info[dest].tx,
  463. data, len);
  464. if (retval)
  465. return retval;
  466. adapter->hw.ep_shm_info[dest].tx.info->v1i.tx_status =
  467. FJES_TX_DELAY_SEND_PENDING;
  468. if (!work_pending(&adapter->raise_intr_rxdata_task))
  469. queue_work(adapter->txrx_wq,
  470. &adapter->raise_intr_rxdata_task);
  471. retval = 0;
  472. return retval;
  473. }
  474. static netdev_tx_t
  475. fjes_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
  476. {
  477. struct fjes_adapter *adapter = netdev_priv(netdev);
  478. struct fjes_hw *hw = &adapter->hw;
  479. int max_epid, my_epid, dest_epid;
  480. enum ep_partner_status pstatus;
  481. struct netdev_queue *cur_queue;
  482. char shortpkt[VLAN_ETH_HLEN];
  483. bool is_multi, vlan;
  484. struct ethhdr *eth;
  485. u16 queue_no = 0;
  486. u16 vlan_id = 0;
  487. netdev_tx_t ret;
  488. char *data;
  489. int len;
  490. ret = NETDEV_TX_OK;
  491. is_multi = false;
  492. cur_queue = netdev_get_tx_queue(netdev, queue_no);
  493. eth = (struct ethhdr *)skb->data;
  494. my_epid = hw->my_epid;
  495. vlan = (vlan_get_tag(skb, &vlan_id) == 0) ? true : false;
  496. data = skb->data;
  497. len = skb->len;
  498. if (is_multicast_ether_addr(eth->h_dest)) {
  499. dest_epid = 0;
  500. max_epid = hw->max_epid;
  501. is_multi = true;
  502. } else if (is_local_ether_addr(eth->h_dest)) {
  503. dest_epid = eth->h_dest[ETH_ALEN - 1];
  504. max_epid = dest_epid + 1;
  505. if ((eth->h_dest[0] == 0x02) &&
  506. (0x00 == (eth->h_dest[1] | eth->h_dest[2] |
  507. eth->h_dest[3] | eth->h_dest[4])) &&
  508. (dest_epid < hw->max_epid)) {
  509. ;
  510. } else {
  511. dest_epid = 0;
  512. max_epid = 0;
  513. ret = NETDEV_TX_OK;
  514. adapter->stats64.tx_packets += 1;
  515. hw->ep_shm_info[my_epid].net_stats.tx_packets += 1;
  516. adapter->stats64.tx_bytes += len;
  517. hw->ep_shm_info[my_epid].net_stats.tx_bytes += len;
  518. }
  519. } else {
  520. dest_epid = 0;
  521. max_epid = 0;
  522. ret = NETDEV_TX_OK;
  523. adapter->stats64.tx_packets += 1;
  524. hw->ep_shm_info[my_epid].net_stats.tx_packets += 1;
  525. adapter->stats64.tx_bytes += len;
  526. hw->ep_shm_info[my_epid].net_stats.tx_bytes += len;
  527. }
  528. for (; dest_epid < max_epid; dest_epid++) {
  529. if (my_epid == dest_epid)
  530. continue;
  531. pstatus = fjes_hw_get_partner_ep_status(hw, dest_epid);
  532. if (pstatus != EP_PARTNER_SHARED) {
  533. if (!is_multi)
  534. hw->ep_shm_info[dest_epid].ep_stats
  535. .tx_dropped_not_shared += 1;
  536. ret = NETDEV_TX_OK;
  537. } else if (!fjes_hw_check_epbuf_version(
  538. &adapter->hw.ep_shm_info[dest_epid].rx, 0)) {
  539. /* version is NOT 0 */
  540. adapter->stats64.tx_carrier_errors += 1;
  541. hw->ep_shm_info[dest_epid].net_stats
  542. .tx_carrier_errors += 1;
  543. hw->ep_shm_info[dest_epid].ep_stats
  544. .tx_dropped_ver_mismatch += 1;
  545. ret = NETDEV_TX_OK;
  546. } else if (!fjes_hw_check_mtu(
  547. &adapter->hw.ep_shm_info[dest_epid].rx,
  548. netdev->mtu)) {
  549. adapter->stats64.tx_dropped += 1;
  550. hw->ep_shm_info[dest_epid].net_stats.tx_dropped += 1;
  551. adapter->stats64.tx_errors += 1;
  552. hw->ep_shm_info[dest_epid].net_stats.tx_errors += 1;
  553. hw->ep_shm_info[dest_epid].ep_stats
  554. .tx_dropped_buf_size_mismatch += 1;
  555. ret = NETDEV_TX_OK;
  556. } else if (vlan &&
  557. !fjes_hw_check_vlan_id(
  558. &adapter->hw.ep_shm_info[dest_epid].rx,
  559. vlan_id)) {
  560. hw->ep_shm_info[dest_epid].ep_stats
  561. .tx_dropped_vlanid_mismatch += 1;
  562. ret = NETDEV_TX_OK;
  563. } else {
  564. if (len < VLAN_ETH_HLEN) {
  565. memset(shortpkt, 0, VLAN_ETH_HLEN);
  566. memcpy(shortpkt, skb->data, skb->len);
  567. len = VLAN_ETH_HLEN;
  568. data = shortpkt;
  569. }
  570. if (adapter->tx_retry_count == 0) {
  571. adapter->tx_start_jiffies = jiffies;
  572. adapter->tx_retry_count = 1;
  573. } else {
  574. adapter->tx_retry_count++;
  575. }
  576. if (fjes_tx_send(adapter, dest_epid, data, len)) {
  577. if (is_multi) {
  578. ret = NETDEV_TX_OK;
  579. } else if (
  580. ((long)jiffies -
  581. (long)adapter->tx_start_jiffies) >=
  582. FJES_TX_RETRY_TIMEOUT) {
  583. adapter->stats64.tx_fifo_errors += 1;
  584. hw->ep_shm_info[dest_epid].net_stats
  585. .tx_fifo_errors += 1;
  586. adapter->stats64.tx_errors += 1;
  587. hw->ep_shm_info[dest_epid].net_stats
  588. .tx_errors += 1;
  589. ret = NETDEV_TX_OK;
  590. } else {
  591. netif_trans_update(netdev);
  592. hw->ep_shm_info[dest_epid].ep_stats
  593. .tx_buffer_full += 1;
  594. netif_tx_stop_queue(cur_queue);
  595. if (!work_pending(&adapter->tx_stall_task))
  596. queue_work(adapter->txrx_wq,
  597. &adapter->tx_stall_task);
  598. ret = NETDEV_TX_BUSY;
  599. }
  600. } else {
  601. if (!is_multi) {
  602. adapter->stats64.tx_packets += 1;
  603. hw->ep_shm_info[dest_epid].net_stats
  604. .tx_packets += 1;
  605. adapter->stats64.tx_bytes += len;
  606. hw->ep_shm_info[dest_epid].net_stats
  607. .tx_bytes += len;
  608. }
  609. adapter->tx_retry_count = 0;
  610. ret = NETDEV_TX_OK;
  611. }
  612. }
  613. }
  614. if (ret == NETDEV_TX_OK) {
  615. dev_kfree_skb(skb);
  616. if (is_multi) {
  617. adapter->stats64.tx_packets += 1;
  618. hw->ep_shm_info[my_epid].net_stats.tx_packets += 1;
  619. adapter->stats64.tx_bytes += 1;
  620. hw->ep_shm_info[my_epid].net_stats.tx_bytes += len;
  621. }
  622. }
  623. return ret;
  624. }
  625. static void
  626. fjes_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
  627. {
  628. struct fjes_adapter *adapter = netdev_priv(netdev);
  629. memcpy(stats, &adapter->stats64, sizeof(struct rtnl_link_stats64));
  630. }
  631. static int fjes_change_mtu(struct net_device *netdev, int new_mtu)
  632. {
  633. struct fjes_adapter *adapter = netdev_priv(netdev);
  634. bool running = netif_running(netdev);
  635. struct fjes_hw *hw = &adapter->hw;
  636. unsigned long flags;
  637. int ret = -EINVAL;
  638. int idx, epidx;
  639. for (idx = 0; fjes_support_mtu[idx] != 0; idx++) {
  640. if (new_mtu <= fjes_support_mtu[idx]) {
  641. new_mtu = fjes_support_mtu[idx];
  642. if (new_mtu == netdev->mtu)
  643. return 0;
  644. ret = 0;
  645. break;
  646. }
  647. }
  648. if (ret)
  649. return ret;
  650. if (running) {
  651. spin_lock_irqsave(&hw->rx_status_lock, flags);
  652. for (epidx = 0; epidx < hw->max_epid; epidx++) {
  653. if (epidx == hw->my_epid)
  654. continue;
  655. hw->ep_shm_info[epidx].tx.info->v1i.rx_status &=
  656. ~FJES_RX_MTU_CHANGING_DONE;
  657. }
  658. spin_unlock_irqrestore(&hw->rx_status_lock, flags);
  659. netif_tx_stop_all_queues(netdev);
  660. netif_carrier_off(netdev);
  661. cancel_work_sync(&adapter->tx_stall_task);
  662. napi_disable(&adapter->napi);
  663. msleep(1000);
  664. netif_tx_stop_all_queues(netdev);
  665. }
  666. netdev->mtu = new_mtu;
  667. if (running) {
  668. for (epidx = 0; epidx < hw->max_epid; epidx++) {
  669. if (epidx == hw->my_epid)
  670. continue;
  671. spin_lock_irqsave(&hw->rx_status_lock, flags);
  672. fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx,
  673. netdev->dev_addr,
  674. netdev->mtu);
  675. hw->ep_shm_info[epidx].tx.info->v1i.rx_status |=
  676. FJES_RX_MTU_CHANGING_DONE;
  677. spin_unlock_irqrestore(&hw->rx_status_lock, flags);
  678. }
  679. netif_tx_wake_all_queues(netdev);
  680. netif_carrier_on(netdev);
  681. napi_enable(&adapter->napi);
  682. napi_schedule(&adapter->napi);
  683. }
  684. return ret;
  685. }
  686. static void fjes_tx_retry(struct net_device *netdev, unsigned int txqueue)
  687. {
  688. struct netdev_queue *queue = netdev_get_tx_queue(netdev, 0);
  689. netif_tx_wake_queue(queue);
  690. }
  691. static int fjes_vlan_rx_add_vid(struct net_device *netdev,
  692. __be16 proto, u16 vid)
  693. {
  694. struct fjes_adapter *adapter = netdev_priv(netdev);
  695. bool ret = true;
  696. int epid;
  697. for (epid = 0; epid < adapter->hw.max_epid; epid++) {
  698. if (epid == adapter->hw.my_epid)
  699. continue;
  700. if (!fjes_hw_check_vlan_id(
  701. &adapter->hw.ep_shm_info[epid].tx, vid))
  702. ret = fjes_hw_set_vlan_id(
  703. &adapter->hw.ep_shm_info[epid].tx, vid);
  704. }
  705. return ret ? 0 : -ENOSPC;
  706. }
  707. static int fjes_vlan_rx_kill_vid(struct net_device *netdev,
  708. __be16 proto, u16 vid)
  709. {
  710. struct fjes_adapter *adapter = netdev_priv(netdev);
  711. int epid;
  712. for (epid = 0; epid < adapter->hw.max_epid; epid++) {
  713. if (epid == adapter->hw.my_epid)
  714. continue;
  715. fjes_hw_del_vlan_id(&adapter->hw.ep_shm_info[epid].tx, vid);
  716. }
  717. return 0;
  718. }
  719. static const struct net_device_ops fjes_netdev_ops = {
  720. .ndo_open = fjes_open,
  721. .ndo_stop = fjes_close,
  722. .ndo_start_xmit = fjes_xmit_frame,
  723. .ndo_get_stats64 = fjes_get_stats64,
  724. .ndo_change_mtu = fjes_change_mtu,
  725. .ndo_tx_timeout = fjes_tx_retry,
  726. .ndo_vlan_rx_add_vid = fjes_vlan_rx_add_vid,
  727. .ndo_vlan_rx_kill_vid = fjes_vlan_rx_kill_vid,
  728. };
  729. /* fjes_netdev_setup - netdevice initialization routine */
  730. static void fjes_netdev_setup(struct net_device *netdev)
  731. {
  732. ether_setup(netdev);
  733. netdev->watchdog_timeo = FJES_TX_RETRY_INTERVAL;
  734. netdev->netdev_ops = &fjes_netdev_ops;
  735. fjes_set_ethtool_ops(netdev);
  736. netdev->mtu = fjes_support_mtu[3];
  737. netdev->min_mtu = fjes_support_mtu[0];
  738. netdev->max_mtu = fjes_support_mtu[3];
  739. netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
  740. }
  741. static int fjes_rxframe_search_exist(struct fjes_adapter *adapter,
  742. int start_epid)
  743. {
  744. struct fjes_hw *hw = &adapter->hw;
  745. enum ep_partner_status pstatus;
  746. int max_epid, cur_epid;
  747. int i;
  748. max_epid = hw->max_epid;
  749. start_epid = (start_epid + 1 + max_epid) % max_epid;
  750. for (i = 0; i < max_epid; i++) {
  751. cur_epid = (start_epid + i) % max_epid;
  752. if (cur_epid == hw->my_epid)
  753. continue;
  754. pstatus = fjes_hw_get_partner_ep_status(hw, cur_epid);
  755. if (pstatus == EP_PARTNER_SHARED) {
  756. if (!fjes_hw_epbuf_rx_is_empty(
  757. &hw->ep_shm_info[cur_epid].rx))
  758. return cur_epid;
  759. }
  760. }
  761. return -1;
  762. }
  763. static void *fjes_rxframe_get(struct fjes_adapter *adapter, size_t *psize,
  764. int *cur_epid)
  765. {
  766. void *frame;
  767. *cur_epid = fjes_rxframe_search_exist(adapter, *cur_epid);
  768. if (*cur_epid < 0)
  769. return NULL;
  770. frame =
  771. fjes_hw_epbuf_rx_curpkt_get_addr(
  772. &adapter->hw.ep_shm_info[*cur_epid].rx, psize);
  773. return frame;
  774. }
  775. static void fjes_rxframe_release(struct fjes_adapter *adapter, int cur_epid)
  776. {
  777. fjes_hw_epbuf_rx_curpkt_drop(&adapter->hw.ep_shm_info[cur_epid].rx);
  778. }
  779. static int fjes_poll(struct napi_struct *napi, int budget)
  780. {
  781. struct fjes_adapter *adapter =
  782. container_of(napi, struct fjes_adapter, napi);
  783. struct net_device *netdev = napi->dev;
  784. struct fjes_hw *hw = &adapter->hw;
  785. struct sk_buff *skb;
  786. int work_done = 0;
  787. int cur_epid = 0;
  788. int epidx;
  789. size_t frame_len;
  790. void *frame;
  791. spin_lock(&hw->rx_status_lock);
  792. for (epidx = 0; epidx < hw->max_epid; epidx++) {
  793. if (epidx == hw->my_epid)
  794. continue;
  795. if (fjes_hw_get_partner_ep_status(hw, epidx) ==
  796. EP_PARTNER_SHARED)
  797. adapter->hw.ep_shm_info[epidx]
  798. .tx.info->v1i.rx_status |= FJES_RX_POLL_WORK;
  799. }
  800. spin_unlock(&hw->rx_status_lock);
  801. while (work_done < budget) {
  802. prefetch(&adapter->hw);
  803. frame = fjes_rxframe_get(adapter, &frame_len, &cur_epid);
  804. if (frame) {
  805. skb = napi_alloc_skb(napi, frame_len);
  806. if (!skb) {
  807. adapter->stats64.rx_dropped += 1;
  808. hw->ep_shm_info[cur_epid].net_stats
  809. .rx_dropped += 1;
  810. adapter->stats64.rx_errors += 1;
  811. hw->ep_shm_info[cur_epid].net_stats
  812. .rx_errors += 1;
  813. } else {
  814. skb_put_data(skb, frame, frame_len);
  815. skb->protocol = eth_type_trans(skb, netdev);
  816. skb->ip_summed = CHECKSUM_UNNECESSARY;
  817. netif_receive_skb(skb);
  818. work_done++;
  819. adapter->stats64.rx_packets += 1;
  820. hw->ep_shm_info[cur_epid].net_stats
  821. .rx_packets += 1;
  822. adapter->stats64.rx_bytes += frame_len;
  823. hw->ep_shm_info[cur_epid].net_stats
  824. .rx_bytes += frame_len;
  825. if (is_multicast_ether_addr(
  826. ((struct ethhdr *)frame)->h_dest)) {
  827. adapter->stats64.multicast += 1;
  828. hw->ep_shm_info[cur_epid].net_stats
  829. .multicast += 1;
  830. }
  831. }
  832. fjes_rxframe_release(adapter, cur_epid);
  833. adapter->unset_rx_last = true;
  834. } else {
  835. break;
  836. }
  837. }
  838. if (work_done < budget) {
  839. napi_complete_done(napi, work_done);
  840. if (adapter->unset_rx_last) {
  841. adapter->rx_last_jiffies = jiffies;
  842. adapter->unset_rx_last = false;
  843. }
  844. if (((long)jiffies - (long)adapter->rx_last_jiffies) < 3) {
  845. napi_reschedule(napi);
  846. } else {
  847. spin_lock(&hw->rx_status_lock);
  848. for (epidx = 0; epidx < hw->max_epid; epidx++) {
  849. if (epidx == hw->my_epid)
  850. continue;
  851. if (fjes_hw_get_partner_ep_status(hw, epidx) ==
  852. EP_PARTNER_SHARED)
  853. adapter->hw.ep_shm_info[epidx].tx
  854. .info->v1i.rx_status &=
  855. ~FJES_RX_POLL_WORK;
  856. }
  857. spin_unlock(&hw->rx_status_lock);
  858. fjes_hw_set_irqmask(hw, REG_ICTL_MASK_RX_DATA, false);
  859. }
  860. }
  861. return work_done;
  862. }
  863. static int fjes_sw_init(struct fjes_adapter *adapter)
  864. {
  865. struct net_device *netdev = adapter->netdev;
  866. netif_napi_add(netdev, &adapter->napi, fjes_poll);
  867. return 0;
  868. }
  869. static void fjes_force_close_task(struct work_struct *work)
  870. {
  871. struct fjes_adapter *adapter = container_of(work,
  872. struct fjes_adapter, force_close_task);
  873. struct net_device *netdev = adapter->netdev;
  874. rtnl_lock();
  875. dev_close(netdev);
  876. rtnl_unlock();
  877. }
  878. static void fjes_tx_stall_task(struct work_struct *work)
  879. {
  880. struct fjes_adapter *adapter = container_of(work,
  881. struct fjes_adapter, tx_stall_task);
  882. struct net_device *netdev = adapter->netdev;
  883. struct fjes_hw *hw = &adapter->hw;
  884. int all_queue_available, sendable;
  885. enum ep_partner_status pstatus;
  886. int max_epid, my_epid, epid;
  887. union ep_buffer_info *info;
  888. int i;
  889. if (((long)jiffies -
  890. dev_trans_start(netdev)) > FJES_TX_TX_STALL_TIMEOUT) {
  891. netif_wake_queue(netdev);
  892. return;
  893. }
  894. my_epid = hw->my_epid;
  895. max_epid = hw->max_epid;
  896. for (i = 0; i < 5; i++) {
  897. all_queue_available = 1;
  898. for (epid = 0; epid < max_epid; epid++) {
  899. if (my_epid == epid)
  900. continue;
  901. pstatus = fjes_hw_get_partner_ep_status(hw, epid);
  902. sendable = (pstatus == EP_PARTNER_SHARED);
  903. if (!sendable)
  904. continue;
  905. info = adapter->hw.ep_shm_info[epid].tx.info;
  906. if (!(info->v1i.rx_status & FJES_RX_MTU_CHANGING_DONE))
  907. return;
  908. if (EP_RING_FULL(info->v1i.head, info->v1i.tail,
  909. info->v1i.count_max)) {
  910. all_queue_available = 0;
  911. break;
  912. }
  913. }
  914. if (all_queue_available) {
  915. netif_wake_queue(netdev);
  916. return;
  917. }
  918. }
  919. usleep_range(50, 100);
  920. queue_work(adapter->txrx_wq, &adapter->tx_stall_task);
  921. }
  922. static void fjes_raise_intr_rxdata_task(struct work_struct *work)
  923. {
  924. struct fjes_adapter *adapter = container_of(work,
  925. struct fjes_adapter, raise_intr_rxdata_task);
  926. struct fjes_hw *hw = &adapter->hw;
  927. enum ep_partner_status pstatus;
  928. int max_epid, my_epid, epid;
  929. my_epid = hw->my_epid;
  930. max_epid = hw->max_epid;
  931. for (epid = 0; epid < max_epid; epid++)
  932. hw->ep_shm_info[epid].tx_status_work = 0;
  933. for (epid = 0; epid < max_epid; epid++) {
  934. if (epid == my_epid)
  935. continue;
  936. pstatus = fjes_hw_get_partner_ep_status(hw, epid);
  937. if (pstatus == EP_PARTNER_SHARED) {
  938. hw->ep_shm_info[epid].tx_status_work =
  939. hw->ep_shm_info[epid].tx.info->v1i.tx_status;
  940. if (hw->ep_shm_info[epid].tx_status_work ==
  941. FJES_TX_DELAY_SEND_PENDING) {
  942. hw->ep_shm_info[epid].tx.info->v1i.tx_status =
  943. FJES_TX_DELAY_SEND_NONE;
  944. }
  945. }
  946. }
  947. for (epid = 0; epid < max_epid; epid++) {
  948. if (epid == my_epid)
  949. continue;
  950. pstatus = fjes_hw_get_partner_ep_status(hw, epid);
  951. if ((hw->ep_shm_info[epid].tx_status_work ==
  952. FJES_TX_DELAY_SEND_PENDING) &&
  953. (pstatus == EP_PARTNER_SHARED) &&
  954. !(hw->ep_shm_info[epid].rx.info->v1i.rx_status &
  955. FJES_RX_POLL_WORK)) {
  956. fjes_hw_raise_interrupt(hw, epid,
  957. REG_ICTL_MASK_RX_DATA);
  958. hw->ep_shm_info[epid].ep_stats.send_intr_rx += 1;
  959. }
  960. }
  961. usleep_range(500, 1000);
  962. }
  963. static void fjes_watch_unshare_task(struct work_struct *work)
  964. {
  965. struct fjes_adapter *adapter =
  966. container_of(work, struct fjes_adapter, unshare_watch_task);
  967. struct net_device *netdev = adapter->netdev;
  968. struct fjes_hw *hw = &adapter->hw;
  969. int unshare_watch, unshare_reserve;
  970. int max_epid, my_epid, epidx;
  971. int stop_req, stop_req_done;
  972. ulong unshare_watch_bitmask;
  973. unsigned long flags;
  974. int wait_time = 0;
  975. int is_shared;
  976. int ret;
  977. my_epid = hw->my_epid;
  978. max_epid = hw->max_epid;
  979. unshare_watch_bitmask = adapter->unshare_watch_bitmask;
  980. adapter->unshare_watch_bitmask = 0;
  981. while ((unshare_watch_bitmask || hw->txrx_stop_req_bit) &&
  982. (wait_time < 3000)) {
  983. for (epidx = 0; epidx < max_epid; epidx++) {
  984. if (epidx == my_epid)
  985. continue;
  986. is_shared = fjes_hw_epid_is_shared(hw->hw_info.share,
  987. epidx);
  988. stop_req = test_bit(epidx, &hw->txrx_stop_req_bit);
  989. stop_req_done = hw->ep_shm_info[epidx].rx.info->v1i.rx_status &
  990. FJES_RX_STOP_REQ_DONE;
  991. unshare_watch = test_bit(epidx, &unshare_watch_bitmask);
  992. unshare_reserve = test_bit(epidx,
  993. &hw->hw_info.buffer_unshare_reserve_bit);
  994. if ((!stop_req ||
  995. (is_shared && (!is_shared || !stop_req_done))) &&
  996. (is_shared || !unshare_watch || !unshare_reserve))
  997. continue;
  998. mutex_lock(&hw->hw_info.lock);
  999. ret = fjes_hw_unregister_buff_addr(hw, epidx);
  1000. switch (ret) {
  1001. case 0:
  1002. break;
  1003. case -ENOMSG:
  1004. case -EBUSY:
  1005. default:
  1006. if (!work_pending(
  1007. &adapter->force_close_task)) {
  1008. adapter->force_reset = true;
  1009. schedule_work(
  1010. &adapter->force_close_task);
  1011. }
  1012. break;
  1013. }
  1014. mutex_unlock(&hw->hw_info.lock);
  1015. hw->ep_shm_info[epidx].ep_stats
  1016. .com_unregist_buf_exec += 1;
  1017. spin_lock_irqsave(&hw->rx_status_lock, flags);
  1018. fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx,
  1019. netdev->dev_addr, netdev->mtu);
  1020. spin_unlock_irqrestore(&hw->rx_status_lock, flags);
  1021. clear_bit(epidx, &hw->txrx_stop_req_bit);
  1022. clear_bit(epidx, &unshare_watch_bitmask);
  1023. clear_bit(epidx,
  1024. &hw->hw_info.buffer_unshare_reserve_bit);
  1025. }
  1026. msleep(100);
  1027. wait_time += 100;
  1028. }
  1029. if (hw->hw_info.buffer_unshare_reserve_bit) {
  1030. for (epidx = 0; epidx < max_epid; epidx++) {
  1031. if (epidx == my_epid)
  1032. continue;
  1033. if (test_bit(epidx,
  1034. &hw->hw_info.buffer_unshare_reserve_bit)) {
  1035. mutex_lock(&hw->hw_info.lock);
  1036. ret = fjes_hw_unregister_buff_addr(hw, epidx);
  1037. switch (ret) {
  1038. case 0:
  1039. break;
  1040. case -ENOMSG:
  1041. case -EBUSY:
  1042. default:
  1043. if (!work_pending(
  1044. &adapter->force_close_task)) {
  1045. adapter->force_reset = true;
  1046. schedule_work(
  1047. &adapter->force_close_task);
  1048. }
  1049. break;
  1050. }
  1051. mutex_unlock(&hw->hw_info.lock);
  1052. hw->ep_shm_info[epidx].ep_stats
  1053. .com_unregist_buf_exec += 1;
  1054. spin_lock_irqsave(&hw->rx_status_lock, flags);
  1055. fjes_hw_setup_epbuf(
  1056. &hw->ep_shm_info[epidx].tx,
  1057. netdev->dev_addr, netdev->mtu);
  1058. spin_unlock_irqrestore(&hw->rx_status_lock,
  1059. flags);
  1060. clear_bit(epidx, &hw->txrx_stop_req_bit);
  1061. clear_bit(epidx, &unshare_watch_bitmask);
  1062. clear_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit);
  1063. }
  1064. if (test_bit(epidx, &unshare_watch_bitmask)) {
  1065. spin_lock_irqsave(&hw->rx_status_lock, flags);
  1066. hw->ep_shm_info[epidx].tx.info->v1i.rx_status &=
  1067. ~FJES_RX_STOP_REQ_DONE;
  1068. spin_unlock_irqrestore(&hw->rx_status_lock,
  1069. flags);
  1070. }
  1071. }
  1072. }
  1073. }
  1074. static void fjes_irq_watch_task(struct work_struct *work)
  1075. {
  1076. struct fjes_adapter *adapter = container_of(to_delayed_work(work),
  1077. struct fjes_adapter, interrupt_watch_task);
  1078. local_irq_disable();
  1079. fjes_intr(adapter->hw.hw_res.irq, adapter);
  1080. local_irq_enable();
  1081. if (fjes_rxframe_search_exist(adapter, 0) >= 0)
  1082. napi_schedule(&adapter->napi);
  1083. if (adapter->interrupt_watch_enable) {
  1084. if (!delayed_work_pending(&adapter->interrupt_watch_task))
  1085. queue_delayed_work(adapter->control_wq,
  1086. &adapter->interrupt_watch_task,
  1087. FJES_IRQ_WATCH_DELAY);
  1088. }
  1089. }
  1090. /* fjes_probe - Device Initialization Routine */
  1091. static int fjes_probe(struct platform_device *plat_dev)
  1092. {
  1093. struct fjes_adapter *adapter;
  1094. struct net_device *netdev;
  1095. struct resource *res;
  1096. struct fjes_hw *hw;
  1097. u8 addr[ETH_ALEN];
  1098. int err;
  1099. err = -ENOMEM;
  1100. netdev = alloc_netdev_mq(sizeof(struct fjes_adapter), "es%d",
  1101. NET_NAME_UNKNOWN, fjes_netdev_setup,
  1102. FJES_MAX_QUEUES);
  1103. if (!netdev)
  1104. goto err_out;
  1105. SET_NETDEV_DEV(netdev, &plat_dev->dev);
  1106. dev_set_drvdata(&plat_dev->dev, netdev);
  1107. adapter = netdev_priv(netdev);
  1108. adapter->netdev = netdev;
  1109. adapter->plat_dev = plat_dev;
  1110. hw = &adapter->hw;
  1111. hw->back = adapter;
  1112. /* setup the private structure */
  1113. err = fjes_sw_init(adapter);
  1114. if (err)
  1115. goto err_free_netdev;
  1116. INIT_WORK(&adapter->force_close_task, fjes_force_close_task);
  1117. adapter->force_reset = false;
  1118. adapter->open_guard = false;
  1119. adapter->txrx_wq = alloc_workqueue(DRV_NAME "/txrx", WQ_MEM_RECLAIM, 0);
  1120. if (unlikely(!adapter->txrx_wq)) {
  1121. err = -ENOMEM;
  1122. goto err_free_netdev;
  1123. }
  1124. adapter->control_wq = alloc_workqueue(DRV_NAME "/control",
  1125. WQ_MEM_RECLAIM, 0);
  1126. if (unlikely(!adapter->control_wq)) {
  1127. err = -ENOMEM;
  1128. goto err_free_txrx_wq;
  1129. }
  1130. INIT_WORK(&adapter->tx_stall_task, fjes_tx_stall_task);
  1131. INIT_WORK(&adapter->raise_intr_rxdata_task,
  1132. fjes_raise_intr_rxdata_task);
  1133. INIT_WORK(&adapter->unshare_watch_task, fjes_watch_unshare_task);
  1134. adapter->unshare_watch_bitmask = 0;
  1135. INIT_DELAYED_WORK(&adapter->interrupt_watch_task, fjes_irq_watch_task);
  1136. adapter->interrupt_watch_enable = false;
  1137. res = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
  1138. if (!res) {
  1139. err = -EINVAL;
  1140. goto err_free_control_wq;
  1141. }
  1142. hw->hw_res.start = res->start;
  1143. hw->hw_res.size = resource_size(res);
  1144. hw->hw_res.irq = platform_get_irq(plat_dev, 0);
  1145. if (hw->hw_res.irq < 0) {
  1146. err = hw->hw_res.irq;
  1147. goto err_free_control_wq;
  1148. }
  1149. err = fjes_hw_init(&adapter->hw);
  1150. if (err)
  1151. goto err_free_control_wq;
  1152. /* setup MAC address (02:00:00:00:00:[epid])*/
  1153. addr[0] = 2;
  1154. addr[1] = 0;
  1155. addr[2] = 0;
  1156. addr[3] = 0;
  1157. addr[4] = 0;
  1158. addr[5] = hw->my_epid; /* EPID */
  1159. eth_hw_addr_set(netdev, addr);
  1160. err = register_netdev(netdev);
  1161. if (err)
  1162. goto err_hw_exit;
  1163. netif_carrier_off(netdev);
  1164. fjes_dbg_adapter_init(adapter);
  1165. return 0;
  1166. err_hw_exit:
  1167. fjes_hw_exit(&adapter->hw);
  1168. err_free_control_wq:
  1169. destroy_workqueue(adapter->control_wq);
  1170. err_free_txrx_wq:
  1171. destroy_workqueue(adapter->txrx_wq);
  1172. err_free_netdev:
  1173. free_netdev(netdev);
  1174. err_out:
  1175. return err;
  1176. }
  1177. /* fjes_remove - Device Removal Routine */
  1178. static int fjes_remove(struct platform_device *plat_dev)
  1179. {
  1180. struct net_device *netdev = dev_get_drvdata(&plat_dev->dev);
  1181. struct fjes_adapter *adapter = netdev_priv(netdev);
  1182. struct fjes_hw *hw = &adapter->hw;
  1183. fjes_dbg_adapter_exit(adapter);
  1184. cancel_delayed_work_sync(&adapter->interrupt_watch_task);
  1185. cancel_work_sync(&adapter->unshare_watch_task);
  1186. cancel_work_sync(&adapter->raise_intr_rxdata_task);
  1187. cancel_work_sync(&adapter->tx_stall_task);
  1188. if (adapter->control_wq)
  1189. destroy_workqueue(adapter->control_wq);
  1190. if (adapter->txrx_wq)
  1191. destroy_workqueue(adapter->txrx_wq);
  1192. unregister_netdev(netdev);
  1193. fjes_hw_exit(hw);
  1194. netif_napi_del(&adapter->napi);
  1195. free_netdev(netdev);
  1196. return 0;
  1197. }
  1198. static struct platform_driver fjes_driver = {
  1199. .driver = {
  1200. .name = DRV_NAME,
  1201. },
  1202. .probe = fjes_probe,
  1203. .remove = fjes_remove,
  1204. };
  1205. static acpi_status
  1206. acpi_find_extended_socket_device(acpi_handle obj_handle, u32 level,
  1207. void *context, void **return_value)
  1208. {
  1209. struct acpi_device *device;
  1210. bool *found = context;
  1211. device = acpi_fetch_acpi_dev(obj_handle);
  1212. if (!device)
  1213. return AE_OK;
  1214. if (strcmp(acpi_device_hid(device), ACPI_MOTHERBOARD_RESOURCE_HID))
  1215. return AE_OK;
  1216. if (!is_extended_socket_device(device))
  1217. return AE_OK;
  1218. if (acpi_check_extended_socket_status(device))
  1219. return AE_OK;
  1220. *found = true;
  1221. return AE_CTRL_TERMINATE;
  1222. }
  1223. /* fjes_init_module - Driver Registration Routine */
  1224. static int __init fjes_init_module(void)
  1225. {
  1226. bool found = false;
  1227. int result;
  1228. acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX,
  1229. acpi_find_extended_socket_device, NULL, &found,
  1230. NULL);
  1231. if (!found)
  1232. return -ENODEV;
  1233. pr_info("%s - version %s - %s\n",
  1234. fjes_driver_string, fjes_driver_version, fjes_copyright);
  1235. fjes_dbg_init();
  1236. result = platform_driver_register(&fjes_driver);
  1237. if (result < 0) {
  1238. fjes_dbg_exit();
  1239. return result;
  1240. }
  1241. result = acpi_bus_register_driver(&fjes_acpi_driver);
  1242. if (result < 0)
  1243. goto fail_acpi_driver;
  1244. return 0;
  1245. fail_acpi_driver:
  1246. platform_driver_unregister(&fjes_driver);
  1247. fjes_dbg_exit();
  1248. return result;
  1249. }
  1250. module_init(fjes_init_module);
  1251. /* fjes_exit_module - Driver Exit Cleanup Routine */
  1252. static void __exit fjes_exit_module(void)
  1253. {
  1254. acpi_bus_unregister_driver(&fjes_acpi_driver);
  1255. platform_driver_unregister(&fjes_driver);
  1256. fjes_dbg_exit();
  1257. }
  1258. module_exit(fjes_exit_module);