fnic_fcs.c 38 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright 2008 Cisco Systems, Inc. All rights reserved.
  4. * Copyright 2007 Nuova Systems, Inc. All rights reserved.
  5. */
  6. #include <linux/errno.h>
  7. #include <linux/pci.h>
  8. #include <linux/slab.h>
  9. #include <linux/skbuff.h>
  10. #include <linux/interrupt.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/if_ether.h>
  13. #include <linux/if_vlan.h>
  14. #include <linux/workqueue.h>
  15. #include <scsi/fc/fc_fip.h>
  16. #include <scsi/fc/fc_els.h>
  17. #include <scsi/fc/fc_fcoe.h>
  18. #include <scsi/fc_frame.h>
  19. #include <scsi/libfc.h>
  20. #include "fnic_io.h"
  21. #include "fnic.h"
  22. #include "fnic_fip.h"
  23. #include "cq_enet_desc.h"
  24. #include "cq_exch_desc.h"
  25. static u8 fcoe_all_fcfs[ETH_ALEN] = FIP_ALL_FCF_MACS;
  26. struct workqueue_struct *fnic_fip_queue;
  27. struct workqueue_struct *fnic_event_queue;
  28. static void fnic_set_eth_mode(struct fnic *);
  29. static void fnic_fcoe_send_vlan_req(struct fnic *fnic);
  30. static void fnic_fcoe_start_fcf_disc(struct fnic *fnic);
  31. static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *);
  32. static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag);
  33. static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb);
  34. void fnic_handle_link(struct work_struct *work)
  35. {
  36. struct fnic *fnic = container_of(work, struct fnic, link_work);
  37. unsigned long flags;
  38. int old_link_status;
  39. u32 old_link_down_cnt;
  40. u64 old_port_speed, new_port_speed;
  41. spin_lock_irqsave(&fnic->fnic_lock, flags);
  42. fnic->link_events = 1; /* less work to just set everytime*/
  43. if (fnic->stop_rx_link_events) {
  44. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  45. return;
  46. }
  47. old_link_down_cnt = fnic->link_down_cnt;
  48. old_link_status = fnic->link_status;
  49. old_port_speed = atomic64_read(
  50. &fnic->fnic_stats.misc_stats.current_port_speed);
  51. fnic->link_status = vnic_dev_link_status(fnic->vdev);
  52. fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev);
  53. new_port_speed = vnic_dev_port_speed(fnic->vdev);
  54. atomic64_set(&fnic->fnic_stats.misc_stats.current_port_speed,
  55. new_port_speed);
  56. if (old_port_speed != new_port_speed)
  57. FNIC_MAIN_DBG(KERN_INFO, fnic->lport->host,
  58. "Current vnic speed set to : %llu\n",
  59. new_port_speed);
  60. switch (vnic_dev_port_speed(fnic->vdev)) {
  61. case DCEM_PORTSPEED_10G:
  62. fc_host_speed(fnic->lport->host) = FC_PORTSPEED_10GBIT;
  63. fnic->lport->link_supported_speeds = FC_PORTSPEED_10GBIT;
  64. break;
  65. case DCEM_PORTSPEED_20G:
  66. fc_host_speed(fnic->lport->host) = FC_PORTSPEED_20GBIT;
  67. fnic->lport->link_supported_speeds = FC_PORTSPEED_20GBIT;
  68. break;
  69. case DCEM_PORTSPEED_25G:
  70. fc_host_speed(fnic->lport->host) = FC_PORTSPEED_25GBIT;
  71. fnic->lport->link_supported_speeds = FC_PORTSPEED_25GBIT;
  72. break;
  73. case DCEM_PORTSPEED_40G:
  74. case DCEM_PORTSPEED_4x10G:
  75. fc_host_speed(fnic->lport->host) = FC_PORTSPEED_40GBIT;
  76. fnic->lport->link_supported_speeds = FC_PORTSPEED_40GBIT;
  77. break;
  78. case DCEM_PORTSPEED_100G:
  79. fc_host_speed(fnic->lport->host) = FC_PORTSPEED_100GBIT;
  80. fnic->lport->link_supported_speeds = FC_PORTSPEED_100GBIT;
  81. break;
  82. default:
  83. fc_host_speed(fnic->lport->host) = FC_PORTSPEED_UNKNOWN;
  84. fnic->lport->link_supported_speeds = FC_PORTSPEED_UNKNOWN;
  85. break;
  86. }
  87. if (old_link_status == fnic->link_status) {
  88. if (!fnic->link_status) {
  89. /* DOWN -> DOWN */
  90. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  91. fnic_fc_trace_set_data(fnic->lport->host->host_no,
  92. FNIC_FC_LE, "Link Status: DOWN->DOWN",
  93. strlen("Link Status: DOWN->DOWN"));
  94. } else {
  95. if (old_link_down_cnt != fnic->link_down_cnt) {
  96. /* UP -> DOWN -> UP */
  97. fnic->lport->host_stats.link_failure_count++;
  98. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  99. fnic_fc_trace_set_data(
  100. fnic->lport->host->host_no,
  101. FNIC_FC_LE,
  102. "Link Status:UP_DOWN_UP",
  103. strlen("Link_Status:UP_DOWN_UP")
  104. );
  105. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  106. "link down\n");
  107. fcoe_ctlr_link_down(&fnic->ctlr);
  108. if (fnic->config.flags & VFCF_FIP_CAPABLE) {
  109. /* start FCoE VLAN discovery */
  110. fnic_fc_trace_set_data(
  111. fnic->lport->host->host_no,
  112. FNIC_FC_LE,
  113. "Link Status: UP_DOWN_UP_VLAN",
  114. strlen(
  115. "Link Status: UP_DOWN_UP_VLAN")
  116. );
  117. fnic_fcoe_send_vlan_req(fnic);
  118. return;
  119. }
  120. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  121. "link up\n");
  122. fcoe_ctlr_link_up(&fnic->ctlr);
  123. } else {
  124. /* UP -> UP */
  125. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  126. fnic_fc_trace_set_data(
  127. fnic->lport->host->host_no, FNIC_FC_LE,
  128. "Link Status: UP_UP",
  129. strlen("Link Status: UP_UP"));
  130. }
  131. }
  132. } else if (fnic->link_status) {
  133. /* DOWN -> UP */
  134. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  135. if (fnic->config.flags & VFCF_FIP_CAPABLE) {
  136. /* start FCoE VLAN discovery */
  137. fnic_fc_trace_set_data(
  138. fnic->lport->host->host_no,
  139. FNIC_FC_LE, "Link Status: DOWN_UP_VLAN",
  140. strlen("Link Status: DOWN_UP_VLAN"));
  141. fnic_fcoe_send_vlan_req(fnic);
  142. return;
  143. }
  144. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n");
  145. fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_LE,
  146. "Link Status: DOWN_UP", strlen("Link Status: DOWN_UP"));
  147. fcoe_ctlr_link_up(&fnic->ctlr);
  148. } else {
  149. /* UP -> DOWN */
  150. fnic->lport->host_stats.link_failure_count++;
  151. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  152. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n");
  153. fnic_fc_trace_set_data(
  154. fnic->lport->host->host_no, FNIC_FC_LE,
  155. "Link Status: UP_DOWN",
  156. strlen("Link Status: UP_DOWN"));
  157. if (fnic->config.flags & VFCF_FIP_CAPABLE) {
  158. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  159. "deleting fip-timer during link-down\n");
  160. del_timer_sync(&fnic->fip_timer);
  161. }
  162. fcoe_ctlr_link_down(&fnic->ctlr);
  163. }
  164. }
  165. /*
  166. * This function passes incoming fabric frames to libFC
  167. */
  168. void fnic_handle_frame(struct work_struct *work)
  169. {
  170. struct fnic *fnic = container_of(work, struct fnic, frame_work);
  171. struct fc_lport *lp = fnic->lport;
  172. unsigned long flags;
  173. struct sk_buff *skb;
  174. struct fc_frame *fp;
  175. while ((skb = skb_dequeue(&fnic->frame_queue))) {
  176. spin_lock_irqsave(&fnic->fnic_lock, flags);
  177. if (fnic->stop_rx_link_events) {
  178. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  179. dev_kfree_skb(skb);
  180. return;
  181. }
  182. fp = (struct fc_frame *)skb;
  183. /*
  184. * If we're in a transitional state, just re-queue and return.
  185. * The queue will be serviced when we get to a stable state.
  186. */
  187. if (fnic->state != FNIC_IN_FC_MODE &&
  188. fnic->state != FNIC_IN_ETH_MODE) {
  189. skb_queue_head(&fnic->frame_queue, skb);
  190. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  191. return;
  192. }
  193. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  194. fc_exch_recv(lp, fp);
  195. }
  196. }
  197. void fnic_fcoe_evlist_free(struct fnic *fnic)
  198. {
  199. struct fnic_event *fevt = NULL;
  200. struct fnic_event *next = NULL;
  201. unsigned long flags;
  202. spin_lock_irqsave(&fnic->fnic_lock, flags);
  203. if (list_empty(&fnic->evlist)) {
  204. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  205. return;
  206. }
  207. list_for_each_entry_safe(fevt, next, &fnic->evlist, list) {
  208. list_del(&fevt->list);
  209. kfree(fevt);
  210. }
  211. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  212. }
  213. void fnic_handle_event(struct work_struct *work)
  214. {
  215. struct fnic *fnic = container_of(work, struct fnic, event_work);
  216. struct fnic_event *fevt = NULL;
  217. struct fnic_event *next = NULL;
  218. unsigned long flags;
  219. spin_lock_irqsave(&fnic->fnic_lock, flags);
  220. if (list_empty(&fnic->evlist)) {
  221. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  222. return;
  223. }
  224. list_for_each_entry_safe(fevt, next, &fnic->evlist, list) {
  225. if (fnic->stop_rx_link_events) {
  226. list_del(&fevt->list);
  227. kfree(fevt);
  228. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  229. return;
  230. }
  231. /*
  232. * If we're in a transitional state, just re-queue and return.
  233. * The queue will be serviced when we get to a stable state.
  234. */
  235. if (fnic->state != FNIC_IN_FC_MODE &&
  236. fnic->state != FNIC_IN_ETH_MODE) {
  237. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  238. return;
  239. }
  240. list_del(&fevt->list);
  241. switch (fevt->event) {
  242. case FNIC_EVT_START_VLAN_DISC:
  243. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  244. fnic_fcoe_send_vlan_req(fnic);
  245. spin_lock_irqsave(&fnic->fnic_lock, flags);
  246. break;
  247. case FNIC_EVT_START_FCF_DISC:
  248. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  249. "Start FCF Discovery\n");
  250. fnic_fcoe_start_fcf_disc(fnic);
  251. break;
  252. default:
  253. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  254. "Unknown event 0x%x\n", fevt->event);
  255. break;
  256. }
  257. kfree(fevt);
  258. }
  259. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  260. }
  261. /**
  262. * is_fnic_fip_flogi_reject() - Check if the Received FIP FLOGI frame is rejected
  263. * @fip: The FCoE controller that received the frame
  264. * @skb: The received FIP frame
  265. *
  266. * Returns non-zero if the frame is rejected with unsupported cmd with
  267. * insufficient resource els explanation.
  268. */
  269. static inline int is_fnic_fip_flogi_reject(struct fcoe_ctlr *fip,
  270. struct sk_buff *skb)
  271. {
  272. struct fc_lport *lport = fip->lp;
  273. struct fip_header *fiph;
  274. struct fc_frame_header *fh = NULL;
  275. struct fip_desc *desc;
  276. struct fip_encaps *els;
  277. u16 op;
  278. u8 els_op;
  279. u8 sub;
  280. size_t rlen;
  281. size_t dlen = 0;
  282. if (skb_linearize(skb))
  283. return 0;
  284. if (skb->len < sizeof(*fiph))
  285. return 0;
  286. fiph = (struct fip_header *)skb->data;
  287. op = ntohs(fiph->fip_op);
  288. sub = fiph->fip_subcode;
  289. if (op != FIP_OP_LS)
  290. return 0;
  291. if (sub != FIP_SC_REP)
  292. return 0;
  293. rlen = ntohs(fiph->fip_dl_len) * 4;
  294. if (rlen + sizeof(*fiph) > skb->len)
  295. return 0;
  296. desc = (struct fip_desc *)(fiph + 1);
  297. dlen = desc->fip_dlen * FIP_BPW;
  298. if (desc->fip_dtype == FIP_DT_FLOGI) {
  299. if (dlen < sizeof(*els) + sizeof(*fh) + 1)
  300. return 0;
  301. els = (struct fip_encaps *)desc;
  302. fh = (struct fc_frame_header *)(els + 1);
  303. if (!fh)
  304. return 0;
  305. /*
  306. * ELS command code, reason and explanation should be = Reject,
  307. * unsupported command and insufficient resource
  308. */
  309. els_op = *(u8 *)(fh + 1);
  310. if (els_op == ELS_LS_RJT) {
  311. shost_printk(KERN_INFO, lport->host,
  312. "Flogi Request Rejected by Switch\n");
  313. return 1;
  314. }
  315. shost_printk(KERN_INFO, lport->host,
  316. "Flogi Request Accepted by Switch\n");
  317. }
  318. return 0;
  319. }
  320. static void fnic_fcoe_send_vlan_req(struct fnic *fnic)
  321. {
  322. struct fcoe_ctlr *fip = &fnic->ctlr;
  323. struct fnic_stats *fnic_stats = &fnic->fnic_stats;
  324. struct sk_buff *skb;
  325. char *eth_fr;
  326. struct fip_vlan *vlan;
  327. u64 vlan_tov;
  328. fnic_fcoe_reset_vlans(fnic);
  329. fnic->set_vlan(fnic, 0);
  330. if (printk_ratelimit())
  331. FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
  332. "Sending VLAN request...\n");
  333. skb = dev_alloc_skb(sizeof(struct fip_vlan));
  334. if (!skb)
  335. return;
  336. eth_fr = (char *)skb->data;
  337. vlan = (struct fip_vlan *)eth_fr;
  338. memset(vlan, 0, sizeof(*vlan));
  339. memcpy(vlan->eth.h_source, fip->ctl_src_addr, ETH_ALEN);
  340. memcpy(vlan->eth.h_dest, fcoe_all_fcfs, ETH_ALEN);
  341. vlan->eth.h_proto = htons(ETH_P_FIP);
  342. vlan->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER);
  343. vlan->fip.fip_op = htons(FIP_OP_VLAN);
  344. vlan->fip.fip_subcode = FIP_SC_VL_REQ;
  345. vlan->fip.fip_dl_len = htons(sizeof(vlan->desc) / FIP_BPW);
  346. vlan->desc.mac.fd_desc.fip_dtype = FIP_DT_MAC;
  347. vlan->desc.mac.fd_desc.fip_dlen = sizeof(vlan->desc.mac) / FIP_BPW;
  348. memcpy(&vlan->desc.mac.fd_mac, fip->ctl_src_addr, ETH_ALEN);
  349. vlan->desc.wwnn.fd_desc.fip_dtype = FIP_DT_NAME;
  350. vlan->desc.wwnn.fd_desc.fip_dlen = sizeof(vlan->desc.wwnn) / FIP_BPW;
  351. put_unaligned_be64(fip->lp->wwnn, &vlan->desc.wwnn.fd_wwn);
  352. atomic64_inc(&fnic_stats->vlan_stats.vlan_disc_reqs);
  353. skb_put(skb, sizeof(*vlan));
  354. skb->protocol = htons(ETH_P_FIP);
  355. skb_reset_mac_header(skb);
  356. skb_reset_network_header(skb);
  357. fip->send(fip, skb);
  358. /* set a timer so that we can retry if there no response */
  359. vlan_tov = jiffies + msecs_to_jiffies(FCOE_CTLR_FIPVLAN_TOV);
  360. mod_timer(&fnic->fip_timer, round_jiffies(vlan_tov));
  361. }
  362. static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *skb)
  363. {
  364. struct fcoe_ctlr *fip = &fnic->ctlr;
  365. struct fip_header *fiph;
  366. struct fip_desc *desc;
  367. struct fnic_stats *fnic_stats = &fnic->fnic_stats;
  368. u16 vid;
  369. size_t rlen;
  370. size_t dlen;
  371. struct fcoe_vlan *vlan;
  372. u64 sol_time;
  373. unsigned long flags;
  374. FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
  375. "Received VLAN response...\n");
  376. fiph = (struct fip_header *) skb->data;
  377. FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
  378. "Received VLAN response... OP 0x%x SUB_OP 0x%x\n",
  379. ntohs(fiph->fip_op), fiph->fip_subcode);
  380. rlen = ntohs(fiph->fip_dl_len) * 4;
  381. fnic_fcoe_reset_vlans(fnic);
  382. spin_lock_irqsave(&fnic->vlans_lock, flags);
  383. desc = (struct fip_desc *)(fiph + 1);
  384. while (rlen > 0) {
  385. dlen = desc->fip_dlen * FIP_BPW;
  386. switch (desc->fip_dtype) {
  387. case FIP_DT_VLAN:
  388. vid = ntohs(((struct fip_vlan_desc *)desc)->fd_vlan);
  389. shost_printk(KERN_INFO, fnic->lport->host,
  390. "process_vlan_resp: FIP VLAN %d\n", vid);
  391. vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
  392. if (!vlan) {
  393. /* retry from timer */
  394. spin_unlock_irqrestore(&fnic->vlans_lock,
  395. flags);
  396. goto out;
  397. }
  398. vlan->vid = vid & 0x0fff;
  399. vlan->state = FIP_VLAN_AVAIL;
  400. list_add_tail(&vlan->list, &fnic->vlans);
  401. break;
  402. }
  403. desc = (struct fip_desc *)((char *)desc + dlen);
  404. rlen -= dlen;
  405. }
  406. /* any VLAN descriptors present ? */
  407. if (list_empty(&fnic->vlans)) {
  408. /* retry from timer */
  409. atomic64_inc(&fnic_stats->vlan_stats.resp_withno_vlanID);
  410. FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
  411. "No VLAN descriptors in FIP VLAN response\n");
  412. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  413. goto out;
  414. }
  415. vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
  416. fnic->set_vlan(fnic, vlan->vid);
  417. vlan->state = FIP_VLAN_SENT; /* sent now */
  418. vlan->sol_count++;
  419. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  420. /* start the solicitation */
  421. fcoe_ctlr_link_up(fip);
  422. sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY);
  423. mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
  424. out:
  425. return;
  426. }
  427. static void fnic_fcoe_start_fcf_disc(struct fnic *fnic)
  428. {
  429. unsigned long flags;
  430. struct fcoe_vlan *vlan;
  431. u64 sol_time;
  432. spin_lock_irqsave(&fnic->vlans_lock, flags);
  433. vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
  434. fnic->set_vlan(fnic, vlan->vid);
  435. vlan->state = FIP_VLAN_SENT; /* sent now */
  436. vlan->sol_count = 1;
  437. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  438. /* start the solicitation */
  439. fcoe_ctlr_link_up(&fnic->ctlr);
  440. sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY);
  441. mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
  442. }
  443. static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag)
  444. {
  445. unsigned long flags;
  446. struct fcoe_vlan *fvlan;
  447. spin_lock_irqsave(&fnic->vlans_lock, flags);
  448. if (list_empty(&fnic->vlans)) {
  449. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  450. return -EINVAL;
  451. }
  452. fvlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
  453. if (fvlan->state == FIP_VLAN_USED) {
  454. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  455. return 0;
  456. }
  457. if (fvlan->state == FIP_VLAN_SENT) {
  458. fvlan->state = FIP_VLAN_USED;
  459. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  460. return 0;
  461. }
  462. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  463. return -EINVAL;
  464. }
  465. static void fnic_event_enq(struct fnic *fnic, enum fnic_evt ev)
  466. {
  467. struct fnic_event *fevt;
  468. unsigned long flags;
  469. fevt = kmalloc(sizeof(*fevt), GFP_ATOMIC);
  470. if (!fevt)
  471. return;
  472. fevt->fnic = fnic;
  473. fevt->event = ev;
  474. spin_lock_irqsave(&fnic->fnic_lock, flags);
  475. list_add_tail(&fevt->list, &fnic->evlist);
  476. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  477. schedule_work(&fnic->event_work);
  478. }
  479. static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb)
  480. {
  481. struct fip_header *fiph;
  482. int ret = 1;
  483. u16 op;
  484. u8 sub;
  485. if (!skb || !(skb->data))
  486. return -1;
  487. if (skb_linearize(skb))
  488. goto drop;
  489. fiph = (struct fip_header *)skb->data;
  490. op = ntohs(fiph->fip_op);
  491. sub = fiph->fip_subcode;
  492. if (FIP_VER_DECAPS(fiph->fip_ver) != FIP_VER)
  493. goto drop;
  494. if (ntohs(fiph->fip_dl_len) * FIP_BPW + sizeof(*fiph) > skb->len)
  495. goto drop;
  496. if (op == FIP_OP_DISC && sub == FIP_SC_ADV) {
  497. if (fnic_fcoe_vlan_check(fnic, ntohs(fiph->fip_flags)))
  498. goto drop;
  499. /* pass it on to fcoe */
  500. ret = 1;
  501. } else if (op == FIP_OP_VLAN && sub == FIP_SC_VL_NOTE) {
  502. /* set the vlan as used */
  503. fnic_fcoe_process_vlan_resp(fnic, skb);
  504. ret = 0;
  505. } else if (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK) {
  506. /* received CVL request, restart vlan disc */
  507. fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
  508. /* pass it on to fcoe */
  509. ret = 1;
  510. }
  511. drop:
  512. return ret;
  513. }
  514. void fnic_handle_fip_frame(struct work_struct *work)
  515. {
  516. struct fnic *fnic = container_of(work, struct fnic, fip_frame_work);
  517. struct fnic_stats *fnic_stats = &fnic->fnic_stats;
  518. unsigned long flags;
  519. struct sk_buff *skb;
  520. struct ethhdr *eh;
  521. while ((skb = skb_dequeue(&fnic->fip_frame_queue))) {
  522. spin_lock_irqsave(&fnic->fnic_lock, flags);
  523. if (fnic->stop_rx_link_events) {
  524. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  525. dev_kfree_skb(skb);
  526. return;
  527. }
  528. /*
  529. * If we're in a transitional state, just re-queue and return.
  530. * The queue will be serviced when we get to a stable state.
  531. */
  532. if (fnic->state != FNIC_IN_FC_MODE &&
  533. fnic->state != FNIC_IN_ETH_MODE) {
  534. skb_queue_head(&fnic->fip_frame_queue, skb);
  535. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  536. return;
  537. }
  538. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  539. eh = (struct ethhdr *)skb->data;
  540. if (eh->h_proto == htons(ETH_P_FIP)) {
  541. skb_pull(skb, sizeof(*eh));
  542. if (fnic_fcoe_handle_fip_frame(fnic, skb) <= 0) {
  543. dev_kfree_skb(skb);
  544. continue;
  545. }
  546. /*
  547. * If there's FLOGI rejects - clear all
  548. * fcf's & restart from scratch
  549. */
  550. if (is_fnic_fip_flogi_reject(&fnic->ctlr, skb)) {
  551. atomic64_inc(
  552. &fnic_stats->vlan_stats.flogi_rejects);
  553. shost_printk(KERN_INFO, fnic->lport->host,
  554. "Trigger a Link down - VLAN Disc\n");
  555. fcoe_ctlr_link_down(&fnic->ctlr);
  556. /* start FCoE VLAN discovery */
  557. fnic_fcoe_send_vlan_req(fnic);
  558. dev_kfree_skb(skb);
  559. continue;
  560. }
  561. fcoe_ctlr_recv(&fnic->ctlr, skb);
  562. continue;
  563. }
  564. }
  565. }
  566. /**
  567. * fnic_import_rq_eth_pkt() - handle received FCoE or FIP frame.
  568. * @fnic: fnic instance.
  569. * @skb: Ethernet Frame.
  570. */
  571. static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, struct sk_buff *skb)
  572. {
  573. struct fc_frame *fp;
  574. struct ethhdr *eh;
  575. struct fcoe_hdr *fcoe_hdr;
  576. struct fcoe_crc_eof *ft;
  577. /*
  578. * Undo VLAN encapsulation if present.
  579. */
  580. eh = (struct ethhdr *)skb->data;
  581. if (eh->h_proto == htons(ETH_P_8021Q)) {
  582. memmove((u8 *)eh + VLAN_HLEN, eh, ETH_ALEN * 2);
  583. eh = skb_pull(skb, VLAN_HLEN);
  584. skb_reset_mac_header(skb);
  585. }
  586. if (eh->h_proto == htons(ETH_P_FIP)) {
  587. if (!(fnic->config.flags & VFCF_FIP_CAPABLE)) {
  588. printk(KERN_ERR "Dropped FIP frame, as firmware "
  589. "uses non-FIP mode, Enable FIP "
  590. "using UCSM\n");
  591. goto drop;
  592. }
  593. if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
  594. FNIC_FC_RECV|0x80, (char *)skb->data, skb->len)) != 0) {
  595. printk(KERN_ERR "fnic ctlr frame trace error!!!");
  596. }
  597. skb_queue_tail(&fnic->fip_frame_queue, skb);
  598. queue_work(fnic_fip_queue, &fnic->fip_frame_work);
  599. return 1; /* let caller know packet was used */
  600. }
  601. if (eh->h_proto != htons(ETH_P_FCOE))
  602. goto drop;
  603. skb_set_network_header(skb, sizeof(*eh));
  604. skb_pull(skb, sizeof(*eh));
  605. fcoe_hdr = (struct fcoe_hdr *)skb->data;
  606. if (FC_FCOE_DECAPS_VER(fcoe_hdr) != FC_FCOE_VER)
  607. goto drop;
  608. fp = (struct fc_frame *)skb;
  609. fc_frame_init(fp);
  610. fr_sof(fp) = fcoe_hdr->fcoe_sof;
  611. skb_pull(skb, sizeof(struct fcoe_hdr));
  612. skb_reset_transport_header(skb);
  613. ft = (struct fcoe_crc_eof *)(skb->data + skb->len - sizeof(*ft));
  614. fr_eof(fp) = ft->fcoe_eof;
  615. skb_trim(skb, skb->len - sizeof(*ft));
  616. return 0;
  617. drop:
  618. dev_kfree_skb_irq(skb);
  619. return -1;
  620. }
  621. /**
  622. * fnic_update_mac_locked() - set data MAC address and filters.
  623. * @fnic: fnic instance.
  624. * @new: newly-assigned FCoE MAC address.
  625. *
  626. * Called with the fnic lock held.
  627. */
  628. void fnic_update_mac_locked(struct fnic *fnic, u8 *new)
  629. {
  630. u8 *ctl = fnic->ctlr.ctl_src_addr;
  631. u8 *data = fnic->data_src_addr;
  632. if (is_zero_ether_addr(new))
  633. new = ctl;
  634. if (ether_addr_equal(data, new))
  635. return;
  636. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "update_mac %pM\n", new);
  637. if (!is_zero_ether_addr(data) && !ether_addr_equal(data, ctl))
  638. vnic_dev_del_addr(fnic->vdev, data);
  639. memcpy(data, new, ETH_ALEN);
  640. if (!ether_addr_equal(new, ctl))
  641. vnic_dev_add_addr(fnic->vdev, new);
  642. }
  643. /**
  644. * fnic_update_mac() - set data MAC address and filters.
  645. * @lport: local port.
  646. * @new: newly-assigned FCoE MAC address.
  647. */
  648. void fnic_update_mac(struct fc_lport *lport, u8 *new)
  649. {
  650. struct fnic *fnic = lport_priv(lport);
  651. spin_lock_irq(&fnic->fnic_lock);
  652. fnic_update_mac_locked(fnic, new);
  653. spin_unlock_irq(&fnic->fnic_lock);
  654. }
  655. /**
  656. * fnic_set_port_id() - set the port_ID after successful FLOGI.
  657. * @lport: local port.
  658. * @port_id: assigned FC_ID.
  659. * @fp: received frame containing the FLOGI accept or NULL.
  660. *
  661. * This is called from libfc when a new FC_ID has been assigned.
  662. * This causes us to reset the firmware to FC_MODE and setup the new MAC
  663. * address and FC_ID.
  664. *
  665. * It is also called with FC_ID 0 when we're logged off.
  666. *
  667. * If the FC_ID is due to point-to-point, fp may be NULL.
  668. */
  669. void fnic_set_port_id(struct fc_lport *lport, u32 port_id, struct fc_frame *fp)
  670. {
  671. struct fnic *fnic = lport_priv(lport);
  672. u8 *mac;
  673. int ret;
  674. FNIC_FCS_DBG(KERN_DEBUG, lport->host, "set port_id %x fp %p\n",
  675. port_id, fp);
  676. /*
  677. * If we're clearing the FC_ID, change to use the ctl_src_addr.
  678. * Set ethernet mode to send FLOGI.
  679. */
  680. if (!port_id) {
  681. fnic_update_mac(lport, fnic->ctlr.ctl_src_addr);
  682. fnic_set_eth_mode(fnic);
  683. return;
  684. }
  685. if (fp) {
  686. mac = fr_cb(fp)->granted_mac;
  687. if (is_zero_ether_addr(mac)) {
  688. /* non-FIP - FLOGI already accepted - ignore return */
  689. fcoe_ctlr_recv_flogi(&fnic->ctlr, lport, fp);
  690. }
  691. fnic_update_mac(lport, mac);
  692. }
  693. /* Change state to reflect transition to FC mode */
  694. spin_lock_irq(&fnic->fnic_lock);
  695. if (fnic->state == FNIC_IN_ETH_MODE || fnic->state == FNIC_IN_FC_MODE)
  696. fnic->state = FNIC_IN_ETH_TRANS_FC_MODE;
  697. else {
  698. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  699. "Unexpected fnic state %s while"
  700. " processing flogi resp\n",
  701. fnic_state_to_str(fnic->state));
  702. spin_unlock_irq(&fnic->fnic_lock);
  703. return;
  704. }
  705. spin_unlock_irq(&fnic->fnic_lock);
  706. /*
  707. * Send FLOGI registration to firmware to set up FC mode.
  708. * The new address will be set up when registration completes.
  709. */
  710. ret = fnic_flogi_reg_handler(fnic, port_id);
  711. if (ret < 0) {
  712. spin_lock_irq(&fnic->fnic_lock);
  713. if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE)
  714. fnic->state = FNIC_IN_ETH_MODE;
  715. spin_unlock_irq(&fnic->fnic_lock);
  716. }
  717. }
  718. static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
  719. *cq_desc, struct vnic_rq_buf *buf,
  720. int skipped __attribute__((unused)),
  721. void *opaque)
  722. {
  723. struct fnic *fnic = vnic_dev_priv(rq->vdev);
  724. struct sk_buff *skb;
  725. struct fc_frame *fp;
  726. struct fnic_stats *fnic_stats = &fnic->fnic_stats;
  727. u8 type, color, eop, sop, ingress_port, vlan_stripped;
  728. u8 fcoe = 0, fcoe_sof, fcoe_eof;
  729. u8 fcoe_fc_crc_ok = 1, fcoe_enc_error = 0;
  730. u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
  731. u8 ipv6, ipv4, ipv4_fragment, rss_type, csum_not_calc;
  732. u8 fcs_ok = 1, packet_error = 0;
  733. u16 q_number, completed_index, bytes_written = 0, vlan, checksum;
  734. u32 rss_hash;
  735. u16 exchange_id, tmpl;
  736. u8 sof = 0;
  737. u8 eof = 0;
  738. u32 fcp_bytes_written = 0;
  739. unsigned long flags;
  740. dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
  741. DMA_FROM_DEVICE);
  742. skb = buf->os_buf;
  743. fp = (struct fc_frame *)skb;
  744. buf->os_buf = NULL;
  745. cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index);
  746. if (type == CQ_DESC_TYPE_RQ_FCP) {
  747. cq_fcp_rq_desc_dec((struct cq_fcp_rq_desc *)cq_desc,
  748. &type, &color, &q_number, &completed_index,
  749. &eop, &sop, &fcoe_fc_crc_ok, &exchange_id,
  750. &tmpl, &fcp_bytes_written, &sof, &eof,
  751. &ingress_port, &packet_error,
  752. &fcoe_enc_error, &fcs_ok, &vlan_stripped,
  753. &vlan);
  754. skb_trim(skb, fcp_bytes_written);
  755. fr_sof(fp) = sof;
  756. fr_eof(fp) = eof;
  757. } else if (type == CQ_DESC_TYPE_RQ_ENET) {
  758. cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
  759. &type, &color, &q_number, &completed_index,
  760. &ingress_port, &fcoe, &eop, &sop,
  761. &rss_type, &csum_not_calc, &rss_hash,
  762. &bytes_written, &packet_error,
  763. &vlan_stripped, &vlan, &checksum,
  764. &fcoe_sof, &fcoe_fc_crc_ok,
  765. &fcoe_enc_error, &fcoe_eof,
  766. &tcp_udp_csum_ok, &udp, &tcp,
  767. &ipv4_csum_ok, &ipv6, &ipv4,
  768. &ipv4_fragment, &fcs_ok);
  769. skb_trim(skb, bytes_written);
  770. if (!fcs_ok) {
  771. atomic64_inc(&fnic_stats->misc_stats.frame_errors);
  772. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  773. "fcs error. dropping packet.\n");
  774. goto drop;
  775. }
  776. if (fnic_import_rq_eth_pkt(fnic, skb))
  777. return;
  778. } else {
  779. /* wrong CQ type*/
  780. shost_printk(KERN_ERR, fnic->lport->host,
  781. "fnic rq_cmpl wrong cq type x%x\n", type);
  782. goto drop;
  783. }
  784. if (!fcs_ok || packet_error || !fcoe_fc_crc_ok || fcoe_enc_error) {
  785. atomic64_inc(&fnic_stats->misc_stats.frame_errors);
  786. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  787. "fnic rq_cmpl fcoe x%x fcsok x%x"
  788. " pkterr x%x fcoe_fc_crc_ok x%x, fcoe_enc_err"
  789. " x%x\n",
  790. fcoe, fcs_ok, packet_error,
  791. fcoe_fc_crc_ok, fcoe_enc_error);
  792. goto drop;
  793. }
  794. spin_lock_irqsave(&fnic->fnic_lock, flags);
  795. if (fnic->stop_rx_link_events) {
  796. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  797. goto drop;
  798. }
  799. fr_dev(fp) = fnic->lport;
  800. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  801. if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_RECV,
  802. (char *)skb->data, skb->len)) != 0) {
  803. printk(KERN_ERR "fnic ctlr frame trace error!!!");
  804. }
  805. skb_queue_tail(&fnic->frame_queue, skb);
  806. queue_work(fnic_event_queue, &fnic->frame_work);
  807. return;
  808. drop:
  809. dev_kfree_skb_irq(skb);
  810. }
  811. static int fnic_rq_cmpl_handler_cont(struct vnic_dev *vdev,
  812. struct cq_desc *cq_desc, u8 type,
  813. u16 q_number, u16 completed_index,
  814. void *opaque)
  815. {
  816. struct fnic *fnic = vnic_dev_priv(vdev);
  817. vnic_rq_service(&fnic->rq[q_number], cq_desc, completed_index,
  818. VNIC_RQ_RETURN_DESC, fnic_rq_cmpl_frame_recv,
  819. NULL);
  820. return 0;
  821. }
  822. int fnic_rq_cmpl_handler(struct fnic *fnic, int rq_work_to_do)
  823. {
  824. unsigned int tot_rq_work_done = 0, cur_work_done;
  825. unsigned int i;
  826. int err;
  827. for (i = 0; i < fnic->rq_count; i++) {
  828. cur_work_done = vnic_cq_service(&fnic->cq[i], rq_work_to_do,
  829. fnic_rq_cmpl_handler_cont,
  830. NULL);
  831. if (cur_work_done) {
  832. err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame);
  833. if (err)
  834. shost_printk(KERN_ERR, fnic->lport->host,
  835. "fnic_alloc_rq_frame can't alloc"
  836. " frame\n");
  837. }
  838. tot_rq_work_done += cur_work_done;
  839. }
  840. return tot_rq_work_done;
  841. }
  842. /*
  843. * This function is called once at init time to allocate and fill RQ
  844. * buffers. Subsequently, it is called in the interrupt context after RQ
  845. * buffer processing to replenish the buffers in the RQ
  846. */
  847. int fnic_alloc_rq_frame(struct vnic_rq *rq)
  848. {
  849. struct fnic *fnic = vnic_dev_priv(rq->vdev);
  850. struct sk_buff *skb;
  851. u16 len;
  852. dma_addr_t pa;
  853. int r;
  854. len = FC_FRAME_HEADROOM + FC_MAX_FRAME + FC_FRAME_TAILROOM;
  855. skb = dev_alloc_skb(len);
  856. if (!skb) {
  857. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  858. "Unable to allocate RQ sk_buff\n");
  859. return -ENOMEM;
  860. }
  861. skb_reset_mac_header(skb);
  862. skb_reset_transport_header(skb);
  863. skb_reset_network_header(skb);
  864. skb_put(skb, len);
  865. pa = dma_map_single(&fnic->pdev->dev, skb->data, len, DMA_FROM_DEVICE);
  866. if (dma_mapping_error(&fnic->pdev->dev, pa)) {
  867. r = -ENOMEM;
  868. printk(KERN_ERR "PCI mapping failed with error %d\n", r);
  869. goto free_skb;
  870. }
  871. fnic_queue_rq_desc(rq, skb, pa, len);
  872. return 0;
  873. free_skb:
  874. kfree_skb(skb);
  875. return r;
  876. }
  877. void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
  878. {
  879. struct fc_frame *fp = buf->os_buf;
  880. struct fnic *fnic = vnic_dev_priv(rq->vdev);
  881. dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
  882. DMA_FROM_DEVICE);
  883. dev_kfree_skb(fp_skb(fp));
  884. buf->os_buf = NULL;
  885. }
  886. /**
  887. * fnic_eth_send() - Send Ethernet frame.
  888. * @fip: fcoe_ctlr instance.
  889. * @skb: Ethernet Frame, FIP, without VLAN encapsulation.
  890. */
  891. void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
  892. {
  893. struct fnic *fnic = fnic_from_ctlr(fip);
  894. struct vnic_wq *wq = &fnic->wq[0];
  895. dma_addr_t pa;
  896. struct ethhdr *eth_hdr;
  897. struct vlan_ethhdr *vlan_hdr;
  898. unsigned long flags;
  899. if (!fnic->vlan_hw_insert) {
  900. eth_hdr = (struct ethhdr *)skb_mac_header(skb);
  901. vlan_hdr = skb_push(skb, sizeof(*vlan_hdr) - sizeof(*eth_hdr));
  902. memcpy(vlan_hdr, eth_hdr, 2 * ETH_ALEN);
  903. vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
  904. vlan_hdr->h_vlan_encapsulated_proto = eth_hdr->h_proto;
  905. vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
  906. if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
  907. FNIC_FC_SEND|0x80, (char *)eth_hdr, skb->len)) != 0) {
  908. printk(KERN_ERR "fnic ctlr frame trace error!!!");
  909. }
  910. } else {
  911. if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
  912. FNIC_FC_SEND|0x80, (char *)skb->data, skb->len)) != 0) {
  913. printk(KERN_ERR "fnic ctlr frame trace error!!!");
  914. }
  915. }
  916. pa = dma_map_single(&fnic->pdev->dev, skb->data, skb->len,
  917. DMA_TO_DEVICE);
  918. if (dma_mapping_error(&fnic->pdev->dev, pa)) {
  919. printk(KERN_ERR "DMA mapping failed\n");
  920. goto free_skb;
  921. }
  922. spin_lock_irqsave(&fnic->wq_lock[0], flags);
  923. if (!vnic_wq_desc_avail(wq))
  924. goto irq_restore;
  925. fnic_queue_wq_eth_desc(wq, skb, pa, skb->len,
  926. 0 /* hw inserts cos value */,
  927. fnic->vlan_id, 1);
  928. spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
  929. return;
  930. irq_restore:
  931. spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
  932. dma_unmap_single(&fnic->pdev->dev, pa, skb->len, DMA_TO_DEVICE);
  933. free_skb:
  934. kfree_skb(skb);
  935. }
  936. /*
  937. * Send FC frame.
  938. */
  939. static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
  940. {
  941. struct vnic_wq *wq = &fnic->wq[0];
  942. struct sk_buff *skb;
  943. dma_addr_t pa;
  944. struct ethhdr *eth_hdr;
  945. struct vlan_ethhdr *vlan_hdr;
  946. struct fcoe_hdr *fcoe_hdr;
  947. struct fc_frame_header *fh;
  948. u32 tot_len, eth_hdr_len;
  949. int ret = 0;
  950. unsigned long flags;
  951. fh = fc_frame_header_get(fp);
  952. skb = fp_skb(fp);
  953. if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ) &&
  954. fcoe_ctlr_els_send(&fnic->ctlr, fnic->lport, skb))
  955. return 0;
  956. if (!fnic->vlan_hw_insert) {
  957. eth_hdr_len = sizeof(*vlan_hdr) + sizeof(*fcoe_hdr);
  958. vlan_hdr = skb_push(skb, eth_hdr_len);
  959. eth_hdr = (struct ethhdr *)vlan_hdr;
  960. vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
  961. vlan_hdr->h_vlan_encapsulated_proto = htons(ETH_P_FCOE);
  962. vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
  963. fcoe_hdr = (struct fcoe_hdr *)(vlan_hdr + 1);
  964. } else {
  965. eth_hdr_len = sizeof(*eth_hdr) + sizeof(*fcoe_hdr);
  966. eth_hdr = skb_push(skb, eth_hdr_len);
  967. eth_hdr->h_proto = htons(ETH_P_FCOE);
  968. fcoe_hdr = (struct fcoe_hdr *)(eth_hdr + 1);
  969. }
  970. if (fnic->ctlr.map_dest)
  971. fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id);
  972. else
  973. memcpy(eth_hdr->h_dest, fnic->ctlr.dest_addr, ETH_ALEN);
  974. memcpy(eth_hdr->h_source, fnic->data_src_addr, ETH_ALEN);
  975. tot_len = skb->len;
  976. BUG_ON(tot_len % 4);
  977. memset(fcoe_hdr, 0, sizeof(*fcoe_hdr));
  978. fcoe_hdr->fcoe_sof = fr_sof(fp);
  979. if (FC_FCOE_VER)
  980. FC_FCOE_ENCAPS_VER(fcoe_hdr, FC_FCOE_VER);
  981. pa = dma_map_single(&fnic->pdev->dev, eth_hdr, tot_len, DMA_TO_DEVICE);
  982. if (dma_mapping_error(&fnic->pdev->dev, pa)) {
  983. ret = -ENOMEM;
  984. printk(KERN_ERR "DMA map failed with error %d\n", ret);
  985. goto free_skb_on_err;
  986. }
  987. if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_SEND,
  988. (char *)eth_hdr, tot_len)) != 0) {
  989. printk(KERN_ERR "fnic ctlr frame trace error!!!");
  990. }
  991. spin_lock_irqsave(&fnic->wq_lock[0], flags);
  992. if (!vnic_wq_desc_avail(wq)) {
  993. dma_unmap_single(&fnic->pdev->dev, pa, tot_len, DMA_TO_DEVICE);
  994. ret = -1;
  995. goto irq_restore;
  996. }
  997. fnic_queue_wq_desc(wq, skb, pa, tot_len, fr_eof(fp),
  998. 0 /* hw inserts cos value */,
  999. fnic->vlan_id, 1, 1, 1);
  1000. irq_restore:
  1001. spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
  1002. free_skb_on_err:
  1003. if (ret)
  1004. dev_kfree_skb_any(fp_skb(fp));
  1005. return ret;
  1006. }
  1007. /*
  1008. * fnic_send
  1009. * Routine to send a raw frame
  1010. */
  1011. int fnic_send(struct fc_lport *lp, struct fc_frame *fp)
  1012. {
  1013. struct fnic *fnic = lport_priv(lp);
  1014. unsigned long flags;
  1015. if (fnic->in_remove) {
  1016. dev_kfree_skb(fp_skb(fp));
  1017. return -1;
  1018. }
  1019. /*
  1020. * Queue frame if in a transitional state.
  1021. * This occurs while registering the Port_ID / MAC address after FLOGI.
  1022. */
  1023. spin_lock_irqsave(&fnic->fnic_lock, flags);
  1024. if (fnic->state != FNIC_IN_FC_MODE && fnic->state != FNIC_IN_ETH_MODE) {
  1025. skb_queue_tail(&fnic->tx_queue, fp_skb(fp));
  1026. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  1027. return 0;
  1028. }
  1029. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  1030. return fnic_send_frame(fnic, fp);
  1031. }
  1032. /**
  1033. * fnic_flush_tx() - send queued frames.
  1034. * @fnic: fnic device
  1035. *
  1036. * Send frames that were waiting to go out in FC or Ethernet mode.
  1037. * Whenever changing modes we purge queued frames, so these frames should
  1038. * be queued for the stable mode that we're in, either FC or Ethernet.
  1039. *
  1040. * Called without fnic_lock held.
  1041. */
  1042. void fnic_flush_tx(struct fnic *fnic)
  1043. {
  1044. struct sk_buff *skb;
  1045. struct fc_frame *fp;
  1046. while ((skb = skb_dequeue(&fnic->tx_queue))) {
  1047. fp = (struct fc_frame *)skb;
  1048. fnic_send_frame(fnic, fp);
  1049. }
  1050. }
  1051. /**
  1052. * fnic_set_eth_mode() - put fnic into ethernet mode.
  1053. * @fnic: fnic device
  1054. *
  1055. * Called without fnic lock held.
  1056. */
  1057. static void fnic_set_eth_mode(struct fnic *fnic)
  1058. {
  1059. unsigned long flags;
  1060. enum fnic_state old_state;
  1061. int ret;
  1062. spin_lock_irqsave(&fnic->fnic_lock, flags);
  1063. again:
  1064. old_state = fnic->state;
  1065. switch (old_state) {
  1066. case FNIC_IN_FC_MODE:
  1067. case FNIC_IN_ETH_TRANS_FC_MODE:
  1068. default:
  1069. fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
  1070. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  1071. ret = fnic_fw_reset_handler(fnic);
  1072. spin_lock_irqsave(&fnic->fnic_lock, flags);
  1073. if (fnic->state != FNIC_IN_FC_TRANS_ETH_MODE)
  1074. goto again;
  1075. if (ret)
  1076. fnic->state = old_state;
  1077. break;
  1078. case FNIC_IN_FC_TRANS_ETH_MODE:
  1079. case FNIC_IN_ETH_MODE:
  1080. break;
  1081. }
  1082. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  1083. }
  1084. static void fnic_wq_complete_frame_send(struct vnic_wq *wq,
  1085. struct cq_desc *cq_desc,
  1086. struct vnic_wq_buf *buf, void *opaque)
  1087. {
  1088. struct sk_buff *skb = buf->os_buf;
  1089. struct fc_frame *fp = (struct fc_frame *)skb;
  1090. struct fnic *fnic = vnic_dev_priv(wq->vdev);
  1091. dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
  1092. DMA_TO_DEVICE);
  1093. dev_kfree_skb_irq(fp_skb(fp));
  1094. buf->os_buf = NULL;
  1095. }
  1096. static int fnic_wq_cmpl_handler_cont(struct vnic_dev *vdev,
  1097. struct cq_desc *cq_desc, u8 type,
  1098. u16 q_number, u16 completed_index,
  1099. void *opaque)
  1100. {
  1101. struct fnic *fnic = vnic_dev_priv(vdev);
  1102. unsigned long flags;
  1103. spin_lock_irqsave(&fnic->wq_lock[q_number], flags);
  1104. vnic_wq_service(&fnic->wq[q_number], cq_desc, completed_index,
  1105. fnic_wq_complete_frame_send, NULL);
  1106. spin_unlock_irqrestore(&fnic->wq_lock[q_number], flags);
  1107. return 0;
  1108. }
  1109. int fnic_wq_cmpl_handler(struct fnic *fnic, int work_to_do)
  1110. {
  1111. unsigned int wq_work_done = 0;
  1112. unsigned int i;
  1113. for (i = 0; i < fnic->raw_wq_count; i++) {
  1114. wq_work_done += vnic_cq_service(&fnic->cq[fnic->rq_count+i],
  1115. work_to_do,
  1116. fnic_wq_cmpl_handler_cont,
  1117. NULL);
  1118. }
  1119. return wq_work_done;
  1120. }
  1121. void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
  1122. {
  1123. struct fc_frame *fp = buf->os_buf;
  1124. struct fnic *fnic = vnic_dev_priv(wq->vdev);
  1125. dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
  1126. DMA_TO_DEVICE);
  1127. dev_kfree_skb(fp_skb(fp));
  1128. buf->os_buf = NULL;
  1129. }
  1130. void fnic_fcoe_reset_vlans(struct fnic *fnic)
  1131. {
  1132. unsigned long flags;
  1133. struct fcoe_vlan *vlan;
  1134. struct fcoe_vlan *next;
  1135. /*
  1136. * indicate a link down to fcoe so that all fcf's are free'd
  1137. * might not be required since we did this before sending vlan
  1138. * discovery request
  1139. */
  1140. spin_lock_irqsave(&fnic->vlans_lock, flags);
  1141. if (!list_empty(&fnic->vlans)) {
  1142. list_for_each_entry_safe(vlan, next, &fnic->vlans, list) {
  1143. list_del(&vlan->list);
  1144. kfree(vlan);
  1145. }
  1146. }
  1147. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  1148. }
  1149. void fnic_handle_fip_timer(struct fnic *fnic)
  1150. {
  1151. unsigned long flags;
  1152. struct fcoe_vlan *vlan;
  1153. struct fnic_stats *fnic_stats = &fnic->fnic_stats;
  1154. u64 sol_time;
  1155. spin_lock_irqsave(&fnic->fnic_lock, flags);
  1156. if (fnic->stop_rx_link_events) {
  1157. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  1158. return;
  1159. }
  1160. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  1161. if (fnic->ctlr.mode == FIP_MODE_NON_FIP)
  1162. return;
  1163. spin_lock_irqsave(&fnic->vlans_lock, flags);
  1164. if (list_empty(&fnic->vlans)) {
  1165. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  1166. /* no vlans available, try again */
  1167. if (unlikely(fnic_log_level & FNIC_FCS_LOGGING))
  1168. if (printk_ratelimit())
  1169. shost_printk(KERN_DEBUG, fnic->lport->host,
  1170. "Start VLAN Discovery\n");
  1171. fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
  1172. return;
  1173. }
  1174. vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
  1175. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  1176. "fip_timer: vlan %d state %d sol_count %d\n",
  1177. vlan->vid, vlan->state, vlan->sol_count);
  1178. switch (vlan->state) {
  1179. case FIP_VLAN_USED:
  1180. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
  1181. "FIP VLAN is selected for FC transaction\n");
  1182. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  1183. break;
  1184. case FIP_VLAN_FAILED:
  1185. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  1186. /* if all vlans are in failed state, restart vlan disc */
  1187. if (unlikely(fnic_log_level & FNIC_FCS_LOGGING))
  1188. if (printk_ratelimit())
  1189. shost_printk(KERN_DEBUG, fnic->lport->host,
  1190. "Start VLAN Discovery\n");
  1191. fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
  1192. break;
  1193. case FIP_VLAN_SENT:
  1194. if (vlan->sol_count >= FCOE_CTLR_MAX_SOL) {
  1195. /*
  1196. * no response on this vlan, remove from the list.
  1197. * Try the next vlan
  1198. */
  1199. FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
  1200. "Dequeue this VLAN ID %d from list\n",
  1201. vlan->vid);
  1202. list_del(&vlan->list);
  1203. kfree(vlan);
  1204. vlan = NULL;
  1205. if (list_empty(&fnic->vlans)) {
  1206. /* we exhausted all vlans, restart vlan disc */
  1207. spin_unlock_irqrestore(&fnic->vlans_lock,
  1208. flags);
  1209. FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
  1210. "fip_timer: vlan list empty, "
  1211. "trigger vlan disc\n");
  1212. fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
  1213. return;
  1214. }
  1215. /* check the next vlan */
  1216. vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan,
  1217. list);
  1218. fnic->set_vlan(fnic, vlan->vid);
  1219. vlan->state = FIP_VLAN_SENT; /* sent now */
  1220. }
  1221. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  1222. atomic64_inc(&fnic_stats->vlan_stats.sol_expiry_count);
  1223. vlan->sol_count++;
  1224. sol_time = jiffies + msecs_to_jiffies
  1225. (FCOE_CTLR_START_DELAY);
  1226. mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
  1227. break;
  1228. }
  1229. }