qla_tmpl.c 29 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * QLogic Fibre Channel HBA Driver
  4. * Copyright (c) 2003-2014 QLogic Corporation
  5. */
  6. #include "qla_def.h"
  7. #include "qla_tmpl.h"
  8. #define ISPREG(vha) (&(vha)->hw->iobase->isp24)
  9. #define IOBAR(reg) offsetof(typeof(*(reg)), iobase_addr)
  10. #define IOBASE(vha) IOBAR(ISPREG(vha))
  11. #define INVALID_ENTRY ((struct qla27xx_fwdt_entry *)0xffffffffffffffffUL)
  12. static inline void
  13. qla27xx_insert16(uint16_t value, void *buf, ulong *len)
  14. {
  15. if (buf) {
  16. buf += *len;
  17. *(__le16 *)buf = cpu_to_le16(value);
  18. }
  19. *len += sizeof(value);
  20. }
  21. static inline void
  22. qla27xx_insert32(uint32_t value, void *buf, ulong *len)
  23. {
  24. if (buf) {
  25. buf += *len;
  26. *(__le32 *)buf = cpu_to_le32(value);
  27. }
  28. *len += sizeof(value);
  29. }
  30. static inline void
  31. qla27xx_insertbuf(void *mem, ulong size, void *buf, ulong *len)
  32. {
  33. if (buf && mem && size) {
  34. buf += *len;
  35. memcpy(buf, mem, size);
  36. }
  37. *len += size;
  38. }
  39. static inline void
  40. qla27xx_read8(void __iomem *window, void *buf, ulong *len)
  41. {
  42. uint8_t value = ~0;
  43. if (buf) {
  44. value = rd_reg_byte(window);
  45. }
  46. qla27xx_insert32(value, buf, len);
  47. }
  48. static inline void
  49. qla27xx_read16(void __iomem *window, void *buf, ulong *len)
  50. {
  51. uint16_t value = ~0;
  52. if (buf) {
  53. value = rd_reg_word(window);
  54. }
  55. qla27xx_insert32(value, buf, len);
  56. }
  57. static inline void
  58. qla27xx_read32(void __iomem *window, void *buf, ulong *len)
  59. {
  60. uint32_t value = ~0;
  61. if (buf) {
  62. value = rd_reg_dword(window);
  63. }
  64. qla27xx_insert32(value, buf, len);
  65. }
  66. static inline void (*qla27xx_read_vector(uint width))(void __iomem*, void *, ulong *)
  67. {
  68. return
  69. (width == 1) ? qla27xx_read8 :
  70. (width == 2) ? qla27xx_read16 :
  71. qla27xx_read32;
  72. }
  73. static inline void
  74. qla27xx_read_reg(__iomem struct device_reg_24xx *reg,
  75. uint offset, void *buf, ulong *len)
  76. {
  77. void __iomem *window = (void __iomem *)reg + offset;
  78. qla27xx_read32(window, buf, len);
  79. }
  80. static inline void
  81. qla27xx_write_reg(__iomem struct device_reg_24xx *reg,
  82. uint offset, uint32_t data, void *buf)
  83. {
  84. if (buf) {
  85. void __iomem *window = (void __iomem *)reg + offset;
  86. wrt_reg_dword(window, data);
  87. }
  88. }
  89. static inline void
  90. qla27xx_read_window(__iomem struct device_reg_24xx *reg,
  91. uint32_t addr, uint offset, uint count, uint width, void *buf,
  92. ulong *len)
  93. {
  94. void __iomem *window = (void __iomem *)reg + offset;
  95. void (*readn)(void __iomem*, void *, ulong *) = qla27xx_read_vector(width);
  96. qla27xx_write_reg(reg, IOBAR(reg), addr, buf);
  97. while (count--) {
  98. qla27xx_insert32(addr, buf, len);
  99. readn(window, buf, len);
  100. window += width;
  101. addr++;
  102. }
  103. }
  104. static inline void
  105. qla27xx_skip_entry(struct qla27xx_fwdt_entry *ent, void *buf)
  106. {
  107. if (buf)
  108. ent->hdr.driver_flags |= DRIVER_FLAG_SKIP_ENTRY;
  109. }
  110. static inline struct qla27xx_fwdt_entry *
  111. qla27xx_next_entry(struct qla27xx_fwdt_entry *ent)
  112. {
  113. return (void *)ent + le32_to_cpu(ent->hdr.size);
  114. }
  115. static struct qla27xx_fwdt_entry *
  116. qla27xx_fwdt_entry_t0(struct scsi_qla_host *vha,
  117. struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
  118. {
  119. ql_dbg(ql_dbg_misc, vha, 0xd100,
  120. "%s: nop [%lx]\n", __func__, *len);
  121. qla27xx_skip_entry(ent, buf);
  122. return qla27xx_next_entry(ent);
  123. }
  124. static struct qla27xx_fwdt_entry *
  125. qla27xx_fwdt_entry_t255(struct scsi_qla_host *vha,
  126. struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
  127. {
  128. ql_dbg(ql_dbg_misc, vha, 0xd1ff,
  129. "%s: end [%lx]\n", __func__, *len);
  130. qla27xx_skip_entry(ent, buf);
  131. /* terminate */
  132. return NULL;
  133. }
  134. static struct qla27xx_fwdt_entry *
  135. qla27xx_fwdt_entry_t256(struct scsi_qla_host *vha,
  136. struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
  137. {
  138. ulong addr = le32_to_cpu(ent->t256.base_addr);
  139. uint offset = ent->t256.pci_offset;
  140. ulong count = le16_to_cpu(ent->t256.reg_count);
  141. uint width = ent->t256.reg_width;
  142. ql_dbg(ql_dbg_misc, vha, 0xd200,
  143. "%s: rdio t1 [%lx]\n", __func__, *len);
  144. qla27xx_read_window(ISPREG(vha), addr, offset, count, width, buf, len);
  145. return qla27xx_next_entry(ent);
  146. }
  147. static struct qla27xx_fwdt_entry *
  148. qla27xx_fwdt_entry_t257(struct scsi_qla_host *vha,
  149. struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
  150. {
  151. ulong addr = le32_to_cpu(ent->t257.base_addr);
  152. uint offset = ent->t257.pci_offset;
  153. ulong data = le32_to_cpu(ent->t257.write_data);
  154. ql_dbg(ql_dbg_misc, vha, 0xd201,
  155. "%s: wrio t1 [%lx]\n", __func__, *len);
  156. qla27xx_write_reg(ISPREG(vha), IOBASE(vha), addr, buf);
  157. qla27xx_write_reg(ISPREG(vha), offset, data, buf);
  158. return qla27xx_next_entry(ent);
  159. }
  160. static struct qla27xx_fwdt_entry *
  161. qla27xx_fwdt_entry_t258(struct scsi_qla_host *vha,
  162. struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
  163. {
  164. uint banksel = ent->t258.banksel_offset;
  165. ulong bank = le32_to_cpu(ent->t258.bank);
  166. ulong addr = le32_to_cpu(ent->t258.base_addr);
  167. uint offset = ent->t258.pci_offset;
  168. uint count = le16_to_cpu(ent->t258.reg_count);
  169. uint width = ent->t258.reg_width;
  170. ql_dbg(ql_dbg_misc, vha, 0xd202,
  171. "%s: rdio t2 [%lx]\n", __func__, *len);
  172. qla27xx_write_reg(ISPREG(vha), banksel, bank, buf);
  173. qla27xx_read_window(ISPREG(vha), addr, offset, count, width, buf, len);
  174. return qla27xx_next_entry(ent);
  175. }
  176. static struct qla27xx_fwdt_entry *
  177. qla27xx_fwdt_entry_t259(struct scsi_qla_host *vha,
  178. struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
  179. {
  180. ulong addr = le32_to_cpu(ent->t259.base_addr);
  181. uint banksel = ent->t259.banksel_offset;
  182. ulong bank = le32_to_cpu(ent->t259.bank);
  183. uint offset = ent->t259.pci_offset;
  184. ulong data = le32_to_cpu(ent->t259.write_data);
  185. ql_dbg(ql_dbg_misc, vha, 0xd203,
  186. "%s: wrio t2 [%lx]\n", __func__, *len);
  187. qla27xx_write_reg(ISPREG(vha), IOBASE(vha), addr, buf);
  188. qla27xx_write_reg(ISPREG(vha), banksel, bank, buf);
  189. qla27xx_write_reg(ISPREG(vha), offset, data, buf);
  190. return qla27xx_next_entry(ent);
  191. }
  192. static struct qla27xx_fwdt_entry *
  193. qla27xx_fwdt_entry_t260(struct scsi_qla_host *vha,
  194. struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
  195. {
  196. uint offset = ent->t260.pci_offset;
  197. ql_dbg(ql_dbg_misc, vha, 0xd204,
  198. "%s: rdpci [%lx]\n", __func__, *len);
  199. qla27xx_insert32(offset, buf, len);
  200. qla27xx_read_reg(ISPREG(vha), offset, buf, len);
  201. return qla27xx_next_entry(ent);
  202. }
  203. static struct qla27xx_fwdt_entry *
  204. qla27xx_fwdt_entry_t261(struct scsi_qla_host *vha,
  205. struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
  206. {
  207. uint offset = ent->t261.pci_offset;
  208. ulong data = le32_to_cpu(ent->t261.write_data);
  209. ql_dbg(ql_dbg_misc, vha, 0xd205,
  210. "%s: wrpci [%lx]\n", __func__, *len);
  211. qla27xx_write_reg(ISPREG(vha), offset, data, buf);
  212. return qla27xx_next_entry(ent);
  213. }
  214. static struct qla27xx_fwdt_entry *
  215. qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha,
  216. struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
  217. {
  218. uint area = ent->t262.ram_area;
  219. ulong start = le32_to_cpu(ent->t262.start_addr);
  220. ulong end = le32_to_cpu(ent->t262.end_addr);
  221. ulong dwords;
  222. int rc;
  223. ql_dbg(ql_dbg_misc, vha, 0xd206,
  224. "%s: rdram(%x) [%lx]\n", __func__, ent->t262.ram_area, *len);
  225. if (area == T262_RAM_AREA_CRITICAL_RAM) {
  226. ;
  227. } else if (area == T262_RAM_AREA_EXTERNAL_RAM) {
  228. end = vha->hw->fw_memory_size;
  229. if (buf)
  230. ent->t262.end_addr = cpu_to_le32(end);
  231. } else if (area == T262_RAM_AREA_SHARED_RAM) {
  232. start = vha->hw->fw_shared_ram_start;
  233. end = vha->hw->fw_shared_ram_end;
  234. if (buf) {
  235. ent->t262.start_addr = cpu_to_le32(start);
  236. ent->t262.end_addr = cpu_to_le32(end);
  237. }
  238. } else if (area == T262_RAM_AREA_DDR_RAM) {
  239. start = vha->hw->fw_ddr_ram_start;
  240. end = vha->hw->fw_ddr_ram_end;
  241. if (buf) {
  242. ent->t262.start_addr = cpu_to_le32(start);
  243. ent->t262.end_addr = cpu_to_le32(end);
  244. }
  245. } else if (area == T262_RAM_AREA_MISC) {
  246. if (buf) {
  247. ent->t262.start_addr = cpu_to_le32(start);
  248. ent->t262.end_addr = cpu_to_le32(end);
  249. }
  250. } else {
  251. ql_dbg(ql_dbg_misc, vha, 0xd022,
  252. "%s: unknown area %x\n", __func__, area);
  253. qla27xx_skip_entry(ent, buf);
  254. goto done;
  255. }
  256. if (end < start || start == 0 || end == 0) {
  257. ql_dbg(ql_dbg_misc, vha, 0xd023,
  258. "%s: unusable range (start=%lx end=%lx)\n",
  259. __func__, start, end);
  260. qla27xx_skip_entry(ent, buf);
  261. goto done;
  262. }
  263. dwords = end - start + 1;
  264. if (buf) {
  265. buf += *len;
  266. rc = qla24xx_dump_ram(vha->hw, start, buf, dwords, &buf);
  267. if (rc != QLA_SUCCESS) {
  268. ql_dbg(ql_dbg_async, vha, 0xffff,
  269. "%s: dump ram MB failed. Area %xh start %lxh end %lxh\n",
  270. __func__, area, start, end);
  271. return INVALID_ENTRY;
  272. }
  273. }
  274. *len += dwords * sizeof(uint32_t);
  275. done:
  276. return qla27xx_next_entry(ent);
  277. }
  278. static struct qla27xx_fwdt_entry *
  279. qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
  280. struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
  281. {
  282. uint type = ent->t263.queue_type;
  283. uint count = 0;
  284. uint i;
  285. uint length;
  286. ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd207,
  287. "%s: getq(%x) [%lx]\n", __func__, type, *len);
  288. if (type == T263_QUEUE_TYPE_REQ) {
  289. for (i = 0; i < vha->hw->max_req_queues; i++) {
  290. struct req_que *req = vha->hw->req_q_map[i];
  291. if (req || !buf) {
  292. length = req ?
  293. req->length : REQUEST_ENTRY_CNT_24XX;
  294. qla27xx_insert16(i, buf, len);
  295. qla27xx_insert16(length, buf, len);
  296. qla27xx_insertbuf(req ? req->ring : NULL,
  297. length * sizeof(*req->ring), buf, len);
  298. count++;
  299. }
  300. }
  301. } else if (type == T263_QUEUE_TYPE_RSP) {
  302. for (i = 0; i < vha->hw->max_rsp_queues; i++) {
  303. struct rsp_que *rsp = vha->hw->rsp_q_map[i];
  304. if (rsp || !buf) {
  305. length = rsp ?
  306. rsp->length : RESPONSE_ENTRY_CNT_MQ;
  307. qla27xx_insert16(i, buf, len);
  308. qla27xx_insert16(length, buf, len);
  309. qla27xx_insertbuf(rsp ? rsp->ring : NULL,
  310. length * sizeof(*rsp->ring), buf, len);
  311. count++;
  312. }
  313. }
  314. } else if (QLA_TGT_MODE_ENABLED() &&
  315. ent->t263.queue_type == T263_QUEUE_TYPE_ATIO) {
  316. struct qla_hw_data *ha = vha->hw;
  317. struct atio *atr = ha->tgt.atio_ring;
  318. if (atr || !buf) {
  319. length = ha->tgt.atio_q_length;
  320. qla27xx_insert16(0, buf, len);
  321. qla27xx_insert16(length, buf, len);
  322. qla27xx_insertbuf(atr, length * sizeof(*atr), buf, len);
  323. count++;
  324. }
  325. } else {
  326. ql_dbg(ql_dbg_misc, vha, 0xd026,
  327. "%s: unknown queue %x\n", __func__, type);
  328. qla27xx_skip_entry(ent, buf);
  329. }
  330. if (buf) {
  331. if (count)
  332. ent->t263.num_queues = count;
  333. else
  334. qla27xx_skip_entry(ent, buf);
  335. }
  336. return qla27xx_next_entry(ent);
  337. }
  338. static struct qla27xx_fwdt_entry *
  339. qla27xx_fwdt_entry_t264(struct scsi_qla_host *vha,
  340. struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
  341. {
  342. ql_dbg(ql_dbg_misc, vha, 0xd208,
  343. "%s: getfce [%lx]\n", __func__, *len);
  344. if (vha->hw->fce) {
  345. if (buf) {
  346. ent->t264.fce_trace_size = FCE_SIZE;
  347. ent->t264.write_pointer = vha->hw->fce_wr;
  348. ent->t264.base_pointer = vha->hw->fce_dma;
  349. ent->t264.fce_enable_mb0 = vha->hw->fce_mb[0];
  350. ent->t264.fce_enable_mb2 = vha->hw->fce_mb[2];
  351. ent->t264.fce_enable_mb3 = vha->hw->fce_mb[3];
  352. ent->t264.fce_enable_mb4 = vha->hw->fce_mb[4];
  353. ent->t264.fce_enable_mb5 = vha->hw->fce_mb[5];
  354. ent->t264.fce_enable_mb6 = vha->hw->fce_mb[6];
  355. }
  356. qla27xx_insertbuf(vha->hw->fce, FCE_SIZE, buf, len);
  357. } else {
  358. ql_dbg(ql_dbg_misc, vha, 0xd027,
  359. "%s: missing fce\n", __func__);
  360. qla27xx_skip_entry(ent, buf);
  361. }
  362. return qla27xx_next_entry(ent);
  363. }
  364. static struct qla27xx_fwdt_entry *
  365. qla27xx_fwdt_entry_t265(struct scsi_qla_host *vha,
  366. struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
  367. {
  368. ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd209,
  369. "%s: pause risc [%lx]\n", __func__, *len);
  370. if (buf)
  371. qla24xx_pause_risc(ISPREG(vha), vha->hw);
  372. return qla27xx_next_entry(ent);
  373. }
  374. static struct qla27xx_fwdt_entry *
  375. qla27xx_fwdt_entry_t266(struct scsi_qla_host *vha,
  376. struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
  377. {
  378. ql_dbg(ql_dbg_misc, vha, 0xd20a,
  379. "%s: reset risc [%lx]\n", __func__, *len);
  380. if (buf) {
  381. if (qla24xx_soft_reset(vha->hw) != QLA_SUCCESS) {
  382. ql_dbg(ql_dbg_async, vha, 0x5001,
  383. "%s: unable to soft reset\n", __func__);
  384. return INVALID_ENTRY;
  385. }
  386. }
  387. return qla27xx_next_entry(ent);
  388. }
  389. static struct qla27xx_fwdt_entry *
  390. qla27xx_fwdt_entry_t267(struct scsi_qla_host *vha,
  391. struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
  392. {
  393. uint offset = ent->t267.pci_offset;
  394. ulong data = le32_to_cpu(ent->t267.data);
  395. ql_dbg(ql_dbg_misc, vha, 0xd20b,
  396. "%s: dis intr [%lx]\n", __func__, *len);
  397. qla27xx_write_reg(ISPREG(vha), offset, data, buf);
  398. return qla27xx_next_entry(ent);
  399. }
  400. static struct qla27xx_fwdt_entry *
  401. qla27xx_fwdt_entry_t268(struct scsi_qla_host *vha,
  402. struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
  403. {
  404. ql_dbg(ql_dbg_misc, vha, 0xd20c,
  405. "%s: gethb(%x) [%lx]\n", __func__, ent->t268.buf_type, *len);
  406. switch (ent->t268.buf_type) {
  407. case T268_BUF_TYPE_EXTD_TRACE:
  408. if (vha->hw->eft) {
  409. if (buf) {
  410. ent->t268.buf_size = EFT_SIZE;
  411. ent->t268.start_addr = vha->hw->eft_dma;
  412. }
  413. qla27xx_insertbuf(vha->hw->eft, EFT_SIZE, buf, len);
  414. } else {
  415. ql_dbg(ql_dbg_misc, vha, 0xd028,
  416. "%s: missing eft\n", __func__);
  417. qla27xx_skip_entry(ent, buf);
  418. }
  419. break;
  420. case T268_BUF_TYPE_EXCH_BUFOFF:
  421. if (vha->hw->exchoffld_buf) {
  422. if (buf) {
  423. ent->t268.buf_size = vha->hw->exchoffld_size;
  424. ent->t268.start_addr =
  425. vha->hw->exchoffld_buf_dma;
  426. }
  427. qla27xx_insertbuf(vha->hw->exchoffld_buf,
  428. vha->hw->exchoffld_size, buf, len);
  429. } else {
  430. ql_dbg(ql_dbg_misc, vha, 0xd028,
  431. "%s: missing exch offld\n", __func__);
  432. qla27xx_skip_entry(ent, buf);
  433. }
  434. break;
  435. case T268_BUF_TYPE_EXTD_LOGIN:
  436. if (vha->hw->exlogin_buf) {
  437. if (buf) {
  438. ent->t268.buf_size = vha->hw->exlogin_size;
  439. ent->t268.start_addr =
  440. vha->hw->exlogin_buf_dma;
  441. }
  442. qla27xx_insertbuf(vha->hw->exlogin_buf,
  443. vha->hw->exlogin_size, buf, len);
  444. } else {
  445. ql_dbg(ql_dbg_misc, vha, 0xd028,
  446. "%s: missing ext login\n", __func__);
  447. qla27xx_skip_entry(ent, buf);
  448. }
  449. break;
  450. case T268_BUF_TYPE_REQ_MIRROR:
  451. case T268_BUF_TYPE_RSP_MIRROR:
  452. /*
  453. * Mirror pointers are not implemented in the
  454. * driver, instead shadow pointers are used by
  455. * the drier. Skip these entries.
  456. */
  457. qla27xx_skip_entry(ent, buf);
  458. break;
  459. default:
  460. ql_dbg(ql_dbg_async, vha, 0xd02b,
  461. "%s: unknown buffer %x\n", __func__, ent->t268.buf_type);
  462. qla27xx_skip_entry(ent, buf);
  463. break;
  464. }
  465. return qla27xx_next_entry(ent);
  466. }
  467. static struct qla27xx_fwdt_entry *
  468. qla27xx_fwdt_entry_t269(struct scsi_qla_host *vha,
  469. struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
  470. {
  471. ql_dbg(ql_dbg_misc, vha, 0xd20d,
  472. "%s: scratch [%lx]\n", __func__, *len);
  473. qla27xx_insert32(0xaaaaaaaa, buf, len);
  474. qla27xx_insert32(0xbbbbbbbb, buf, len);
  475. qla27xx_insert32(0xcccccccc, buf, len);
  476. qla27xx_insert32(0xdddddddd, buf, len);
  477. qla27xx_insert32(*len + sizeof(uint32_t), buf, len);
  478. if (buf)
  479. ent->t269.scratch_size = 5 * sizeof(uint32_t);
  480. return qla27xx_next_entry(ent);
  481. }
  482. static struct qla27xx_fwdt_entry *
  483. qla27xx_fwdt_entry_t270(struct scsi_qla_host *vha,
  484. struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
  485. {
  486. ulong addr = le32_to_cpu(ent->t270.addr);
  487. ulong dwords = le32_to_cpu(ent->t270.count);
  488. ql_dbg(ql_dbg_misc, vha, 0xd20e,
  489. "%s: rdremreg [%lx]\n", __func__, *len);
  490. qla27xx_write_reg(ISPREG(vha), IOBASE_ADDR, 0x40, buf);
  491. while (dwords--) {
  492. qla27xx_write_reg(ISPREG(vha), 0xc0, addr|0x80000000, buf);
  493. qla27xx_insert32(addr, buf, len);
  494. qla27xx_read_reg(ISPREG(vha), 0xc4, buf, len);
  495. addr += sizeof(uint32_t);
  496. }
  497. return qla27xx_next_entry(ent);
  498. }
  499. static struct qla27xx_fwdt_entry *
  500. qla27xx_fwdt_entry_t271(struct scsi_qla_host *vha,
  501. struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
  502. {
  503. ulong addr = le32_to_cpu(ent->t271.addr);
  504. ulong data = le32_to_cpu(ent->t271.data);
  505. ql_dbg(ql_dbg_misc, vha, 0xd20f,
  506. "%s: wrremreg [%lx]\n", __func__, *len);
  507. qla27xx_write_reg(ISPREG(vha), IOBASE(vha), 0x40, buf);
  508. qla27xx_write_reg(ISPREG(vha), 0xc4, data, buf);
  509. qla27xx_write_reg(ISPREG(vha), 0xc0, addr, buf);
  510. return qla27xx_next_entry(ent);
  511. }
  512. static struct qla27xx_fwdt_entry *
  513. qla27xx_fwdt_entry_t272(struct scsi_qla_host *vha,
  514. struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
  515. {
  516. ulong dwords = le32_to_cpu(ent->t272.count);
  517. ulong start = le32_to_cpu(ent->t272.addr);
  518. ql_dbg(ql_dbg_misc, vha, 0xd210,
  519. "%s: rdremram [%lx]\n", __func__, *len);
  520. if (buf) {
  521. ql_dbg(ql_dbg_misc, vha, 0xd02c,
  522. "%s: @%lx -> (%lx dwords)\n", __func__, start, dwords);
  523. buf += *len;
  524. qla27xx_dump_mpi_ram(vha->hw, start, buf, dwords, &buf);
  525. }
  526. *len += dwords * sizeof(uint32_t);
  527. return qla27xx_next_entry(ent);
  528. }
  529. static struct qla27xx_fwdt_entry *
  530. qla27xx_fwdt_entry_t273(struct scsi_qla_host *vha,
  531. struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
  532. {
  533. ulong dwords = le32_to_cpu(ent->t273.count);
  534. ulong addr = le32_to_cpu(ent->t273.addr);
  535. uint32_t value;
  536. ql_dbg(ql_dbg_misc, vha, 0xd211,
  537. "%s: pcicfg [%lx]\n", __func__, *len);
  538. while (dwords--) {
  539. value = ~0;
  540. if (pci_read_config_dword(vha->hw->pdev, addr, &value))
  541. ql_dbg(ql_dbg_misc, vha, 0xd02d,
  542. "%s: failed pcicfg read at %lx\n", __func__, addr);
  543. qla27xx_insert32(addr, buf, len);
  544. qla27xx_insert32(value, buf, len);
  545. addr += sizeof(uint32_t);
  546. }
  547. return qla27xx_next_entry(ent);
  548. }
  549. static struct qla27xx_fwdt_entry *
  550. qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
  551. struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
  552. {
  553. ulong type = ent->t274.queue_type;
  554. uint count = 0;
  555. uint i;
  556. ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd212,
  557. "%s: getqsh(%lx) [%lx]\n", __func__, type, *len);
  558. if (type == T274_QUEUE_TYPE_REQ_SHAD) {
  559. for (i = 0; i < vha->hw->max_req_queues; i++) {
  560. struct req_que *req = vha->hw->req_q_map[i];
  561. if (req || !buf) {
  562. qla27xx_insert16(i, buf, len);
  563. qla27xx_insert16(1, buf, len);
  564. qla27xx_insert32(req && req->out_ptr ?
  565. *req->out_ptr : 0, buf, len);
  566. count++;
  567. }
  568. }
  569. } else if (type == T274_QUEUE_TYPE_RSP_SHAD) {
  570. for (i = 0; i < vha->hw->max_rsp_queues; i++) {
  571. struct rsp_que *rsp = vha->hw->rsp_q_map[i];
  572. if (rsp || !buf) {
  573. qla27xx_insert16(i, buf, len);
  574. qla27xx_insert16(1, buf, len);
  575. qla27xx_insert32(rsp && rsp->in_ptr ?
  576. *rsp->in_ptr : 0, buf, len);
  577. count++;
  578. }
  579. }
  580. } else if (QLA_TGT_MODE_ENABLED() &&
  581. ent->t274.queue_type == T274_QUEUE_TYPE_ATIO_SHAD) {
  582. struct qla_hw_data *ha = vha->hw;
  583. struct atio *atr = ha->tgt.atio_ring_ptr;
  584. if (atr || !buf) {
  585. qla27xx_insert16(0, buf, len);
  586. qla27xx_insert16(1, buf, len);
  587. qla27xx_insert32(ha->tgt.atio_q_in ?
  588. readl(ha->tgt.atio_q_in) : 0, buf, len);
  589. count++;
  590. }
  591. } else {
  592. ql_dbg(ql_dbg_misc, vha, 0xd02f,
  593. "%s: unknown queue %lx\n", __func__, type);
  594. qla27xx_skip_entry(ent, buf);
  595. }
  596. if (buf) {
  597. if (count)
  598. ent->t274.num_queues = count;
  599. else
  600. qla27xx_skip_entry(ent, buf);
  601. }
  602. return qla27xx_next_entry(ent);
  603. }
  604. static struct qla27xx_fwdt_entry *
  605. qla27xx_fwdt_entry_t275(struct scsi_qla_host *vha,
  606. struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
  607. {
  608. ulong offset = offsetof(typeof(*ent), t275.buffer);
  609. ulong length = le32_to_cpu(ent->t275.length);
  610. ulong size = le32_to_cpu(ent->hdr.size);
  611. void *buffer = ent->t275.buffer;
  612. ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd213,
  613. "%s: buffer(%lx) [%lx]\n", __func__, length, *len);
  614. if (!length) {
  615. ql_dbg(ql_dbg_misc, vha, 0xd020,
  616. "%s: buffer zero length\n", __func__);
  617. qla27xx_skip_entry(ent, buf);
  618. goto done;
  619. }
  620. if (offset + length > size) {
  621. length = size - offset;
  622. ql_dbg(ql_dbg_misc, vha, 0xd030,
  623. "%s: buffer overflow, truncate [%lx]\n", __func__, length);
  624. ent->t275.length = cpu_to_le32(length);
  625. }
  626. qla27xx_insertbuf(buffer, length, buf, len);
  627. done:
  628. return qla27xx_next_entry(ent);
  629. }
  630. static struct qla27xx_fwdt_entry *
  631. qla27xx_fwdt_entry_t276(struct scsi_qla_host *vha,
  632. struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
  633. {
  634. ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd214,
  635. "%s: cond [%lx]\n", __func__, *len);
  636. if (buf) {
  637. ulong cond1 = le32_to_cpu(ent->t276.cond1);
  638. ulong cond2 = le32_to_cpu(ent->t276.cond2);
  639. uint type = vha->hw->pdev->device >> 4 & 0xf;
  640. uint func = vha->hw->port_no & 0x3;
  641. if (type != cond1 || func != cond2) {
  642. struct qla27xx_fwdt_template *tmp = buf;
  643. tmp->count--;
  644. ent = qla27xx_next_entry(ent);
  645. qla27xx_skip_entry(ent, buf);
  646. }
  647. }
  648. return qla27xx_next_entry(ent);
  649. }
  650. static struct qla27xx_fwdt_entry *
  651. qla27xx_fwdt_entry_t277(struct scsi_qla_host *vha,
  652. struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
  653. {
  654. ulong cmd_addr = le32_to_cpu(ent->t277.cmd_addr);
  655. ulong wr_cmd_data = le32_to_cpu(ent->t277.wr_cmd_data);
  656. ulong data_addr = le32_to_cpu(ent->t277.data_addr);
  657. ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd215,
  658. "%s: rdpep [%lx]\n", __func__, *len);
  659. qla27xx_insert32(wr_cmd_data, buf, len);
  660. qla27xx_write_reg(ISPREG(vha), cmd_addr, wr_cmd_data, buf);
  661. qla27xx_read_reg(ISPREG(vha), data_addr, buf, len);
  662. return qla27xx_next_entry(ent);
  663. }
  664. static struct qla27xx_fwdt_entry *
  665. qla27xx_fwdt_entry_t278(struct scsi_qla_host *vha,
  666. struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
  667. {
  668. ulong cmd_addr = le32_to_cpu(ent->t278.cmd_addr);
  669. ulong wr_cmd_data = le32_to_cpu(ent->t278.wr_cmd_data);
  670. ulong data_addr = le32_to_cpu(ent->t278.data_addr);
  671. ulong wr_data = le32_to_cpu(ent->t278.wr_data);
  672. ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd216,
  673. "%s: wrpep [%lx]\n", __func__, *len);
  674. qla27xx_write_reg(ISPREG(vha), data_addr, wr_data, buf);
  675. qla27xx_write_reg(ISPREG(vha), cmd_addr, wr_cmd_data, buf);
  676. return qla27xx_next_entry(ent);
  677. }
  678. static struct qla27xx_fwdt_entry *
  679. qla27xx_fwdt_entry_other(struct scsi_qla_host *vha,
  680. struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
  681. {
  682. ulong type = le32_to_cpu(ent->hdr.type);
  683. ql_dbg(ql_dbg_misc, vha, 0xd2ff,
  684. "%s: other %lx [%lx]\n", __func__, type, *len);
  685. qla27xx_skip_entry(ent, buf);
  686. return qla27xx_next_entry(ent);
  687. }
  688. static struct {
  689. uint type;
  690. typeof(qla27xx_fwdt_entry_other)(*call);
  691. } qla27xx_fwdt_entry_call[] = {
  692. { ENTRY_TYPE_NOP, qla27xx_fwdt_entry_t0 },
  693. { ENTRY_TYPE_TMP_END, qla27xx_fwdt_entry_t255 },
  694. { ENTRY_TYPE_RD_IOB_T1, qla27xx_fwdt_entry_t256 },
  695. { ENTRY_TYPE_WR_IOB_T1, qla27xx_fwdt_entry_t257 },
  696. { ENTRY_TYPE_RD_IOB_T2, qla27xx_fwdt_entry_t258 },
  697. { ENTRY_TYPE_WR_IOB_T2, qla27xx_fwdt_entry_t259 },
  698. { ENTRY_TYPE_RD_PCI, qla27xx_fwdt_entry_t260 },
  699. { ENTRY_TYPE_WR_PCI, qla27xx_fwdt_entry_t261 },
  700. { ENTRY_TYPE_RD_RAM, qla27xx_fwdt_entry_t262 },
  701. { ENTRY_TYPE_GET_QUEUE, qla27xx_fwdt_entry_t263 },
  702. { ENTRY_TYPE_GET_FCE, qla27xx_fwdt_entry_t264 },
  703. { ENTRY_TYPE_PSE_RISC, qla27xx_fwdt_entry_t265 },
  704. { ENTRY_TYPE_RST_RISC, qla27xx_fwdt_entry_t266 },
  705. { ENTRY_TYPE_DIS_INTR, qla27xx_fwdt_entry_t267 },
  706. { ENTRY_TYPE_GET_HBUF, qla27xx_fwdt_entry_t268 },
  707. { ENTRY_TYPE_SCRATCH, qla27xx_fwdt_entry_t269 },
  708. { ENTRY_TYPE_RDREMREG, qla27xx_fwdt_entry_t270 },
  709. { ENTRY_TYPE_WRREMREG, qla27xx_fwdt_entry_t271 },
  710. { ENTRY_TYPE_RDREMRAM, qla27xx_fwdt_entry_t272 },
  711. { ENTRY_TYPE_PCICFG, qla27xx_fwdt_entry_t273 },
  712. { ENTRY_TYPE_GET_SHADOW, qla27xx_fwdt_entry_t274 },
  713. { ENTRY_TYPE_WRITE_BUF, qla27xx_fwdt_entry_t275 },
  714. { ENTRY_TYPE_CONDITIONAL, qla27xx_fwdt_entry_t276 },
  715. { ENTRY_TYPE_RDPEPREG, qla27xx_fwdt_entry_t277 },
  716. { ENTRY_TYPE_WRPEPREG, qla27xx_fwdt_entry_t278 },
  717. { -1, qla27xx_fwdt_entry_other }
  718. };
  719. static inline
  720. typeof(qla27xx_fwdt_entry_call->call)(qla27xx_find_entry(uint type))
  721. {
  722. typeof(*qla27xx_fwdt_entry_call) *list = qla27xx_fwdt_entry_call;
  723. while (list->type < type)
  724. list++;
  725. if (list->type == type)
  726. return list->call;
  727. return qla27xx_fwdt_entry_other;
  728. }
  729. static void
  730. qla27xx_walk_template(struct scsi_qla_host *vha,
  731. struct qla27xx_fwdt_template *tmp, void *buf, ulong *len)
  732. {
  733. struct qla27xx_fwdt_entry *ent = (void *)tmp +
  734. le32_to_cpu(tmp->entry_offset);
  735. ulong type;
  736. tmp->count = le32_to_cpu(tmp->entry_count);
  737. ql_dbg(ql_dbg_misc, vha, 0xd01a,
  738. "%s: entry count %u\n", __func__, tmp->count);
  739. while (ent && tmp->count--) {
  740. type = le32_to_cpu(ent->hdr.type);
  741. ent = qla27xx_find_entry(type)(vha, ent, buf, len);
  742. if (!ent)
  743. break;
  744. if (ent == INVALID_ENTRY) {
  745. *len = 0;
  746. ql_dbg(ql_dbg_async, vha, 0xffff,
  747. "Unable to capture FW dump");
  748. goto bailout;
  749. }
  750. }
  751. if (tmp->count)
  752. ql_dbg(ql_dbg_misc, vha, 0xd018,
  753. "%s: entry count residual=+%u\n", __func__, tmp->count);
  754. if (ent)
  755. ql_dbg(ql_dbg_misc, vha, 0xd019,
  756. "%s: missing end entry\n", __func__);
  757. bailout:
  758. cpu_to_le32s(&tmp->count); /* endianize residual count */
  759. }
  760. static void
  761. qla27xx_time_stamp(struct qla27xx_fwdt_template *tmp)
  762. {
  763. tmp->capture_timestamp = cpu_to_le32(jiffies);
  764. }
  765. static void
  766. qla27xx_driver_info(struct qla27xx_fwdt_template *tmp)
  767. {
  768. uint8_t v[] = { 0, 0, 0, 0, 0, 0 };
  769. WARN_ON_ONCE(sscanf(qla2x00_version_str,
  770. "%hhu.%hhu.%hhu.%hhu",
  771. v + 0, v + 1, v + 2, v + 3) != 4);
  772. tmp->driver_info[0] = cpu_to_le32(
  773. v[3] << 24 | v[2] << 16 | v[1] << 8 | v[0]);
  774. tmp->driver_info[1] = cpu_to_le32(v[5] << 8 | v[4]);
  775. tmp->driver_info[2] = __constant_cpu_to_le32(0x12345678);
  776. }
  777. static void
  778. qla27xx_firmware_info(struct scsi_qla_host *vha,
  779. struct qla27xx_fwdt_template *tmp)
  780. {
  781. tmp->firmware_version[0] = cpu_to_le32(vha->hw->fw_major_version);
  782. tmp->firmware_version[1] = cpu_to_le32(vha->hw->fw_minor_version);
  783. tmp->firmware_version[2] = cpu_to_le32(vha->hw->fw_subminor_version);
  784. tmp->firmware_version[3] = cpu_to_le32(
  785. vha->hw->fw_attributes_h << 16 | vha->hw->fw_attributes);
  786. tmp->firmware_version[4] = cpu_to_le32(
  787. vha->hw->fw_attributes_ext[1] << 16 | vha->hw->fw_attributes_ext[0]);
  788. }
  789. static void
  790. ql27xx_edit_template(struct scsi_qla_host *vha,
  791. struct qla27xx_fwdt_template *tmp)
  792. {
  793. qla27xx_time_stamp(tmp);
  794. qla27xx_driver_info(tmp);
  795. qla27xx_firmware_info(vha, tmp);
  796. }
  797. static inline uint32_t
  798. qla27xx_template_checksum(void *p, ulong size)
  799. {
  800. __le32 *buf = p;
  801. uint64_t sum = 0;
  802. size /= sizeof(*buf);
  803. for ( ; size--; buf++)
  804. sum += le32_to_cpu(*buf);
  805. sum = (sum & 0xffffffff) + (sum >> 32);
  806. return ~sum;
  807. }
  808. static inline int
  809. qla27xx_verify_template_checksum(struct qla27xx_fwdt_template *tmp)
  810. {
  811. return qla27xx_template_checksum(tmp,
  812. le32_to_cpu(tmp->template_size)) == 0;
  813. }
  814. static inline int
  815. qla27xx_verify_template_header(struct qla27xx_fwdt_template *tmp)
  816. {
  817. return le32_to_cpu(tmp->template_type) == TEMPLATE_TYPE_FWDUMP;
  818. }
  819. static ulong
  820. qla27xx_execute_fwdt_template(struct scsi_qla_host *vha,
  821. struct qla27xx_fwdt_template *tmp, void *buf)
  822. {
  823. ulong len = 0;
  824. if (qla27xx_fwdt_template_valid(tmp)) {
  825. len = le32_to_cpu(tmp->template_size);
  826. tmp = memcpy(buf, tmp, len);
  827. ql27xx_edit_template(vha, tmp);
  828. qla27xx_walk_template(vha, tmp, buf, &len);
  829. }
  830. return len;
  831. }
  832. ulong
  833. qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *vha, void *p)
  834. {
  835. struct qla27xx_fwdt_template *tmp = p;
  836. ulong len = 0;
  837. if (qla27xx_fwdt_template_valid(tmp)) {
  838. len = le32_to_cpu(tmp->template_size);
  839. qla27xx_walk_template(vha, tmp, NULL, &len);
  840. }
  841. return len;
  842. }
  843. ulong
  844. qla27xx_fwdt_template_size(void *p)
  845. {
  846. struct qla27xx_fwdt_template *tmp = p;
  847. return le32_to_cpu(tmp->template_size);
  848. }
  849. int
  850. qla27xx_fwdt_template_valid(void *p)
  851. {
  852. struct qla27xx_fwdt_template *tmp = p;
  853. if (!qla27xx_verify_template_header(tmp)) {
  854. ql_log(ql_log_warn, NULL, 0xd01c,
  855. "%s: template type %x\n", __func__,
  856. le32_to_cpu(tmp->template_type));
  857. return false;
  858. }
  859. if (!qla27xx_verify_template_checksum(tmp)) {
  860. ql_log(ql_log_warn, NULL, 0xd01d,
  861. "%s: failed template checksum\n", __func__);
  862. return false;
  863. }
  864. return true;
  865. }
  866. void
  867. qla27xx_mpi_fwdump(scsi_qla_host_t *vha, int hardware_locked)
  868. {
  869. ulong flags = 0;
  870. if (!hardware_locked)
  871. spin_lock_irqsave(&vha->hw->hardware_lock, flags);
  872. if (!vha->hw->mpi_fw_dump) {
  873. ql_log(ql_log_warn, vha, 0x02f3, "-> mpi_fwdump no buffer\n");
  874. } else {
  875. struct fwdt *fwdt = &vha->hw->fwdt[1];
  876. ulong len;
  877. void *buf = vha->hw->mpi_fw_dump;
  878. bool walk_template_only = false;
  879. if (vha->hw->mpi_fw_dumped) {
  880. /* Use the spare area for any further dumps. */
  881. buf += fwdt->dump_size;
  882. walk_template_only = true;
  883. ql_log(ql_log_warn, vha, 0x02f4,
  884. "-> MPI firmware already dumped -- dump saving to temporary buffer %p.\n",
  885. buf);
  886. }
  887. ql_log(ql_log_warn, vha, 0x02f5, "-> fwdt1 running...\n");
  888. if (!fwdt->template) {
  889. ql_log(ql_log_warn, vha, 0x02f6,
  890. "-> fwdt1 no template\n");
  891. goto bailout;
  892. }
  893. len = qla27xx_execute_fwdt_template(vha, fwdt->template, buf);
  894. if (len == 0) {
  895. goto bailout;
  896. } else if (len != fwdt->dump_size) {
  897. ql_log(ql_log_warn, vha, 0x02f7,
  898. "-> fwdt1 fwdump residual=%+ld\n",
  899. fwdt->dump_size - len);
  900. }
  901. vha->hw->stat.num_mpi_reset++;
  902. if (walk_template_only)
  903. goto bailout;
  904. vha->hw->mpi_fw_dump_len = len;
  905. vha->hw->mpi_fw_dumped = 1;
  906. ql_log(ql_log_warn, vha, 0x02f8,
  907. "-> MPI firmware dump saved to buffer (%lu/%p)\n",
  908. vha->host_no, vha->hw->mpi_fw_dump);
  909. qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
  910. }
  911. bailout:
  912. if (!hardware_locked)
  913. spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
  914. }
  915. void
  916. qla27xx_fwdump(scsi_qla_host_t *vha)
  917. {
  918. lockdep_assert_held(&vha->hw->hardware_lock);
  919. if (!vha->hw->fw_dump) {
  920. ql_log(ql_log_warn, vha, 0xd01e, "-> fwdump no buffer\n");
  921. } else if (vha->hw->fw_dumped) {
  922. ql_log(ql_log_warn, vha, 0xd01f,
  923. "-> Firmware already dumped (%p) -- ignoring request\n",
  924. vha->hw->fw_dump);
  925. } else {
  926. struct fwdt *fwdt = vha->hw->fwdt;
  927. ulong len;
  928. void *buf = vha->hw->fw_dump;
  929. ql_log(ql_log_warn, vha, 0xd011, "-> fwdt0 running...\n");
  930. if (!fwdt->template) {
  931. ql_log(ql_log_warn, vha, 0xd012,
  932. "-> fwdt0 no template\n");
  933. return;
  934. }
  935. len = qla27xx_execute_fwdt_template(vha, fwdt->template, buf);
  936. if (len == 0) {
  937. return;
  938. } else if (len != fwdt->dump_size) {
  939. ql_log(ql_log_warn, vha, 0xd013,
  940. "-> fwdt0 fwdump residual=%+ld\n",
  941. fwdt->dump_size - len);
  942. }
  943. vha->hw->fw_dump_len = len;
  944. vha->hw->fw_dumped = true;
  945. ql_log(ql_log_warn, vha, 0xd015,
  946. "-> Firmware dump saved to buffer (%lu/%p) <%lx>\n",
  947. vha->host_no, vha->hw->fw_dump, vha->hw->fw_dump_cap_flags);
  948. qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
  949. }
  950. }