ql4_83xx.c 43 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * QLogic iSCSI HBA Driver
  4. * Copyright (c) 2003-2013 QLogic Corporation
  5. */
  6. #include <linux/ratelimit.h>
  7. #include "ql4_def.h"
  8. #include "ql4_version.h"
  9. #include "ql4_glbl.h"
  10. #include "ql4_dbg.h"
  11. #include "ql4_inline.h"
  12. uint32_t qla4_83xx_rd_reg(struct scsi_qla_host *ha, ulong addr)
  13. {
  14. return readl((void __iomem *)(ha->nx_pcibase + addr));
  15. }
  16. void qla4_83xx_wr_reg(struct scsi_qla_host *ha, ulong addr, uint32_t val)
  17. {
  18. writel(val, (void __iomem *)(ha->nx_pcibase + addr));
  19. }
  20. static int qla4_83xx_set_win_base(struct scsi_qla_host *ha, uint32_t addr)
  21. {
  22. uint32_t val;
  23. int ret_val = QLA_SUCCESS;
  24. qla4_83xx_wr_reg(ha, QLA83XX_CRB_WIN_FUNC(ha->func_num), addr);
  25. val = qla4_83xx_rd_reg(ha, QLA83XX_CRB_WIN_FUNC(ha->func_num));
  26. if (val != addr) {
  27. ql4_printk(KERN_ERR, ha, "%s: Failed to set register window : addr written 0x%x, read 0x%x!\n",
  28. __func__, addr, val);
  29. ret_val = QLA_ERROR;
  30. }
  31. return ret_val;
  32. }
  33. int qla4_83xx_rd_reg_indirect(struct scsi_qla_host *ha, uint32_t addr,
  34. uint32_t *data)
  35. {
  36. int ret_val;
  37. ret_val = qla4_83xx_set_win_base(ha, addr);
  38. if (ret_val == QLA_SUCCESS) {
  39. *data = qla4_83xx_rd_reg(ha, QLA83XX_WILDCARD);
  40. } else {
  41. *data = 0xffffffff;
  42. ql4_printk(KERN_ERR, ha, "%s: failed read of addr 0x%x!\n",
  43. __func__, addr);
  44. }
  45. return ret_val;
  46. }
  47. int qla4_83xx_wr_reg_indirect(struct scsi_qla_host *ha, uint32_t addr,
  48. uint32_t data)
  49. {
  50. int ret_val;
  51. ret_val = qla4_83xx_set_win_base(ha, addr);
  52. if (ret_val == QLA_SUCCESS)
  53. qla4_83xx_wr_reg(ha, QLA83XX_WILDCARD, data);
  54. else
  55. ql4_printk(KERN_ERR, ha, "%s: failed wrt to addr 0x%x, data 0x%x\n",
  56. __func__, addr, data);
  57. return ret_val;
  58. }
  59. static int qla4_83xx_flash_lock(struct scsi_qla_host *ha)
  60. {
  61. int lock_owner;
  62. int timeout = 0;
  63. uint32_t lock_status = 0;
  64. int ret_val = QLA_SUCCESS;
  65. while (lock_status == 0) {
  66. lock_status = qla4_83xx_rd_reg(ha, QLA83XX_FLASH_LOCK);
  67. if (lock_status)
  68. break;
  69. if (++timeout >= QLA83XX_FLASH_LOCK_TIMEOUT / 20) {
  70. lock_owner = qla4_83xx_rd_reg(ha,
  71. QLA83XX_FLASH_LOCK_ID);
  72. ql4_printk(KERN_ERR, ha, "%s: flash lock by func %d failed, held by func %d\n",
  73. __func__, ha->func_num, lock_owner);
  74. ret_val = QLA_ERROR;
  75. break;
  76. }
  77. msleep(20);
  78. }
  79. qla4_83xx_wr_reg(ha, QLA83XX_FLASH_LOCK_ID, ha->func_num);
  80. return ret_val;
  81. }
  82. static void qla4_83xx_flash_unlock(struct scsi_qla_host *ha)
  83. {
  84. /* Reading FLASH_UNLOCK register unlocks the Flash */
  85. qla4_83xx_wr_reg(ha, QLA83XX_FLASH_LOCK_ID, 0xFF);
  86. qla4_83xx_rd_reg(ha, QLA83XX_FLASH_UNLOCK);
  87. }
  88. int qla4_83xx_flash_read_u32(struct scsi_qla_host *ha, uint32_t flash_addr,
  89. uint8_t *p_data, int u32_word_count)
  90. {
  91. int i;
  92. uint32_t u32_word;
  93. uint32_t addr = flash_addr;
  94. int ret_val = QLA_SUCCESS;
  95. ret_val = qla4_83xx_flash_lock(ha);
  96. if (ret_val == QLA_ERROR)
  97. goto exit_lock_error;
  98. if (addr & 0x03) {
  99. ql4_printk(KERN_ERR, ha, "%s: Illegal addr = 0x%x\n",
  100. __func__, addr);
  101. ret_val = QLA_ERROR;
  102. goto exit_flash_read;
  103. }
  104. for (i = 0; i < u32_word_count; i++) {
  105. ret_val = qla4_83xx_wr_reg_indirect(ha,
  106. QLA83XX_FLASH_DIRECT_WINDOW,
  107. (addr & 0xFFFF0000));
  108. if (ret_val == QLA_ERROR) {
  109. ql4_printk(KERN_ERR, ha, "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW\n!",
  110. __func__, addr);
  111. goto exit_flash_read;
  112. }
  113. ret_val = qla4_83xx_rd_reg_indirect(ha,
  114. QLA83XX_FLASH_DIRECT_DATA(addr),
  115. &u32_word);
  116. if (ret_val == QLA_ERROR) {
  117. ql4_printk(KERN_ERR, ha, "%s: failed to read addr 0x%x!\n",
  118. __func__, addr);
  119. goto exit_flash_read;
  120. }
  121. *(__le32 *)p_data = le32_to_cpu(u32_word);
  122. p_data = p_data + 4;
  123. addr = addr + 4;
  124. }
  125. exit_flash_read:
  126. qla4_83xx_flash_unlock(ha);
  127. exit_lock_error:
  128. return ret_val;
  129. }
  130. int qla4_83xx_lockless_flash_read_u32(struct scsi_qla_host *ha,
  131. uint32_t flash_addr, uint8_t *p_data,
  132. int u32_word_count)
  133. {
  134. uint32_t i;
  135. uint32_t u32_word;
  136. uint32_t flash_offset;
  137. uint32_t addr = flash_addr;
  138. int ret_val = QLA_SUCCESS;
  139. flash_offset = addr & (QLA83XX_FLASH_SECTOR_SIZE - 1);
  140. if (addr & 0x3) {
  141. ql4_printk(KERN_ERR, ha, "%s: Illegal addr = 0x%x\n",
  142. __func__, addr);
  143. ret_val = QLA_ERROR;
  144. goto exit_lockless_read;
  145. }
  146. ret_val = qla4_83xx_wr_reg_indirect(ha, QLA83XX_FLASH_DIRECT_WINDOW,
  147. addr);
  148. if (ret_val == QLA_ERROR) {
  149. ql4_printk(KERN_ERR, ha, "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW!\n",
  150. __func__, addr);
  151. goto exit_lockless_read;
  152. }
  153. /* Check if data is spread across multiple sectors */
  154. if ((flash_offset + (u32_word_count * sizeof(uint32_t))) >
  155. (QLA83XX_FLASH_SECTOR_SIZE - 1)) {
  156. /* Multi sector read */
  157. for (i = 0; i < u32_word_count; i++) {
  158. ret_val = qla4_83xx_rd_reg_indirect(ha,
  159. QLA83XX_FLASH_DIRECT_DATA(addr),
  160. &u32_word);
  161. if (ret_val == QLA_ERROR) {
  162. ql4_printk(KERN_ERR, ha, "%s: failed to read addr 0x%x!\n",
  163. __func__, addr);
  164. goto exit_lockless_read;
  165. }
  166. *(__le32 *)p_data = le32_to_cpu(u32_word);
  167. p_data = p_data + 4;
  168. addr = addr + 4;
  169. flash_offset = flash_offset + 4;
  170. if (flash_offset > (QLA83XX_FLASH_SECTOR_SIZE - 1)) {
  171. /* This write is needed once for each sector */
  172. ret_val = qla4_83xx_wr_reg_indirect(ha,
  173. QLA83XX_FLASH_DIRECT_WINDOW,
  174. addr);
  175. if (ret_val == QLA_ERROR) {
  176. ql4_printk(KERN_ERR, ha, "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW!\n",
  177. __func__, addr);
  178. goto exit_lockless_read;
  179. }
  180. flash_offset = 0;
  181. }
  182. }
  183. } else {
  184. /* Single sector read */
  185. for (i = 0; i < u32_word_count; i++) {
  186. ret_val = qla4_83xx_rd_reg_indirect(ha,
  187. QLA83XX_FLASH_DIRECT_DATA(addr),
  188. &u32_word);
  189. if (ret_val == QLA_ERROR) {
  190. ql4_printk(KERN_ERR, ha, "%s: failed to read addr 0x%x!\n",
  191. __func__, addr);
  192. goto exit_lockless_read;
  193. }
  194. *(__le32 *)p_data = le32_to_cpu(u32_word);
  195. p_data = p_data + 4;
  196. addr = addr + 4;
  197. }
  198. }
  199. exit_lockless_read:
  200. return ret_val;
  201. }
  202. void qla4_83xx_rom_lock_recovery(struct scsi_qla_host *ha)
  203. {
  204. if (qla4_83xx_flash_lock(ha))
  205. ql4_printk(KERN_INFO, ha, "%s: Resetting rom lock\n", __func__);
  206. /*
  207. * We got the lock, or someone else is holding the lock
  208. * since we are restting, forcefully unlock
  209. */
  210. qla4_83xx_flash_unlock(ha);
  211. }
  212. #define INTENT_TO_RECOVER 0x01
  213. #define PROCEED_TO_RECOVER 0x02
  214. static int qla4_83xx_lock_recovery(struct scsi_qla_host *ha)
  215. {
  216. uint32_t lock = 0, lockid;
  217. int ret_val = QLA_ERROR;
  218. lockid = ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY);
  219. /* Check for other Recovery in progress, go wait */
  220. if ((lockid & 0x3) != 0)
  221. goto exit_lock_recovery;
  222. /* Intent to Recover */
  223. ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY,
  224. (ha->func_num << 2) | INTENT_TO_RECOVER);
  225. msleep(200);
  226. /* Check Intent to Recover is advertised */
  227. lockid = ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY);
  228. if ((lockid & 0x3C) != (ha->func_num << 2))
  229. goto exit_lock_recovery;
  230. ql4_printk(KERN_INFO, ha, "%s: IDC Lock recovery initiated for func %d\n",
  231. __func__, ha->func_num);
  232. /* Proceed to Recover */
  233. ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY,
  234. (ha->func_num << 2) | PROCEED_TO_RECOVER);
  235. /* Force Unlock */
  236. ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCK_ID, 0xFF);
  237. ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_UNLOCK);
  238. /* Clear bits 0-5 in IDC_RECOVERY register*/
  239. ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY, 0);
  240. /* Get lock */
  241. lock = ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_LOCK);
  242. if (lock) {
  243. lockid = ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_LOCK_ID);
  244. lockid = ((lockid + (1 << 8)) & ~0xFF) | ha->func_num;
  245. ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCK_ID, lockid);
  246. ret_val = QLA_SUCCESS;
  247. }
  248. exit_lock_recovery:
  249. return ret_val;
  250. }
  251. #define QLA83XX_DRV_LOCK_MSLEEP 200
  252. int qla4_83xx_drv_lock(struct scsi_qla_host *ha)
  253. {
  254. int timeout = 0;
  255. uint32_t status = 0;
  256. int ret_val = QLA_SUCCESS;
  257. uint32_t first_owner = 0;
  258. uint32_t tmo_owner = 0;
  259. uint32_t lock_id;
  260. uint32_t func_num;
  261. uint32_t lock_cnt;
  262. while (status == 0) {
  263. status = qla4_83xx_rd_reg(ha, QLA83XX_DRV_LOCK);
  264. if (status) {
  265. /* Increment Counter (8-31) and update func_num (0-7) on
  266. * getting a successful lock */
  267. lock_id = qla4_83xx_rd_reg(ha, QLA83XX_DRV_LOCK_ID);
  268. lock_id = ((lock_id + (1 << 8)) & ~0xFF) | ha->func_num;
  269. qla4_83xx_wr_reg(ha, QLA83XX_DRV_LOCK_ID, lock_id);
  270. break;
  271. }
  272. if (timeout == 0)
  273. /* Save counter + ID of function holding the lock for
  274. * first failure */
  275. first_owner = ha->isp_ops->rd_reg_direct(ha,
  276. QLA83XX_DRV_LOCK_ID);
  277. if (++timeout >=
  278. (QLA83XX_DRV_LOCK_TIMEOUT / QLA83XX_DRV_LOCK_MSLEEP)) {
  279. tmo_owner = qla4_83xx_rd_reg(ha, QLA83XX_DRV_LOCK_ID);
  280. func_num = tmo_owner & 0xFF;
  281. lock_cnt = tmo_owner >> 8;
  282. ql4_printk(KERN_INFO, ha, "%s: Lock by func %d failed after 2s, lock held by func %d, lock count %d, first_owner %d\n",
  283. __func__, ha->func_num, func_num, lock_cnt,
  284. (first_owner & 0xFF));
  285. if (first_owner != tmo_owner) {
  286. /* Some other driver got lock, OR same driver
  287. * got lock again (counter value changed), when
  288. * we were waiting for lock.
  289. * Retry for another 2 sec */
  290. ql4_printk(KERN_INFO, ha, "%s: IDC lock failed for func %d\n",
  291. __func__, ha->func_num);
  292. timeout = 0;
  293. } else {
  294. /* Same driver holding lock > 2sec.
  295. * Force Recovery */
  296. ret_val = qla4_83xx_lock_recovery(ha);
  297. if (ret_val == QLA_SUCCESS) {
  298. /* Recovered and got lock */
  299. ql4_printk(KERN_INFO, ha, "%s: IDC lock Recovery by %d successful\n",
  300. __func__, ha->func_num);
  301. break;
  302. }
  303. /* Recovery Failed, some other function
  304. * has the lock, wait for 2secs and retry */
  305. ql4_printk(KERN_INFO, ha, "%s: IDC lock Recovery by %d failed, Retrying timeout\n",
  306. __func__, ha->func_num);
  307. timeout = 0;
  308. }
  309. }
  310. msleep(QLA83XX_DRV_LOCK_MSLEEP);
  311. }
  312. return ret_val;
  313. }
  314. void qla4_83xx_drv_unlock(struct scsi_qla_host *ha)
  315. {
  316. int id;
  317. id = qla4_83xx_rd_reg(ha, QLA83XX_DRV_LOCK_ID);
  318. if ((id & 0xFF) != ha->func_num) {
  319. ql4_printk(KERN_ERR, ha, "%s: IDC Unlock by %d failed, lock owner is %d\n",
  320. __func__, ha->func_num, (id & 0xFF));
  321. return;
  322. }
  323. /* Keep lock counter value, update the ha->func_num to 0xFF */
  324. qla4_83xx_wr_reg(ha, QLA83XX_DRV_LOCK_ID, (id | 0xFF));
  325. qla4_83xx_rd_reg(ha, QLA83XX_DRV_UNLOCK);
  326. }
  327. void qla4_83xx_set_idc_dontreset(struct scsi_qla_host *ha)
  328. {
  329. uint32_t idc_ctrl;
  330. idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
  331. idc_ctrl |= DONTRESET_BIT0;
  332. qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL, idc_ctrl);
  333. DEBUG2(ql4_printk(KERN_INFO, ha, "%s: idc_ctrl = %d\n", __func__,
  334. idc_ctrl));
  335. }
  336. void qla4_83xx_clear_idc_dontreset(struct scsi_qla_host *ha)
  337. {
  338. uint32_t idc_ctrl;
  339. idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
  340. idc_ctrl &= ~DONTRESET_BIT0;
  341. qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL, idc_ctrl);
  342. DEBUG2(ql4_printk(KERN_INFO, ha, "%s: idc_ctrl = %d\n", __func__,
  343. idc_ctrl));
  344. }
  345. int qla4_83xx_idc_dontreset(struct scsi_qla_host *ha)
  346. {
  347. uint32_t idc_ctrl;
  348. idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
  349. return idc_ctrl & DONTRESET_BIT0;
  350. }
  351. /*-------------------------IDC State Machine ---------------------*/
  352. enum {
  353. UNKNOWN_CLASS = 0,
  354. NIC_CLASS,
  355. FCOE_CLASS,
  356. ISCSI_CLASS
  357. };
  358. struct device_info {
  359. int func_num;
  360. int device_type;
  361. int port_num;
  362. };
  363. int qla4_83xx_can_perform_reset(struct scsi_qla_host *ha)
  364. {
  365. uint32_t drv_active;
  366. uint32_t dev_part, dev_part1, dev_part2;
  367. int i;
  368. struct device_info device_map[16];
  369. int func_nibble;
  370. int nibble;
  371. int nic_present = 0;
  372. int iscsi_present = 0;
  373. int iscsi_func_low = 0;
  374. /* Use the dev_partition register to determine the PCI function number
  375. * and then check drv_active register to see which driver is loaded */
  376. dev_part1 = qla4_83xx_rd_reg(ha,
  377. ha->reg_tbl[QLA8XXX_CRB_DEV_PART_INFO]);
  378. dev_part2 = qla4_83xx_rd_reg(ha, QLA83XX_CRB_DEV_PART_INFO2);
  379. drv_active = qla4_83xx_rd_reg(ha, ha->reg_tbl[QLA8XXX_CRB_DRV_ACTIVE]);
  380. /* Each function has 4 bits in dev_partition Info register,
  381. * Lower 2 bits - device type, Upper 2 bits - physical port number */
  382. dev_part = dev_part1;
  383. for (i = nibble = 0; i <= 15; i++, nibble++) {
  384. func_nibble = dev_part & (0xF << (nibble * 4));
  385. func_nibble >>= (nibble * 4);
  386. device_map[i].func_num = i;
  387. device_map[i].device_type = func_nibble & 0x3;
  388. device_map[i].port_num = func_nibble & 0xC;
  389. if (device_map[i].device_type == NIC_CLASS) {
  390. if (drv_active & (1 << device_map[i].func_num)) {
  391. nic_present++;
  392. break;
  393. }
  394. } else if (device_map[i].device_type == ISCSI_CLASS) {
  395. if (drv_active & (1 << device_map[i].func_num)) {
  396. if (!iscsi_present ||
  397. iscsi_func_low > device_map[i].func_num)
  398. iscsi_func_low = device_map[i].func_num;
  399. iscsi_present++;
  400. }
  401. }
  402. /* For function_num[8..15] get info from dev_part2 register */
  403. if (nibble == 7) {
  404. nibble = 0;
  405. dev_part = dev_part2;
  406. }
  407. }
  408. /* NIC, iSCSI and FCOE are the Reset owners based on order, NIC gets
  409. * precedence over iSCSI and FCOE and iSCSI over FCOE, based on drivers
  410. * present. */
  411. if (!nic_present && (ha->func_num == iscsi_func_low)) {
  412. DEBUG2(ql4_printk(KERN_INFO, ha,
  413. "%s: can reset - NIC not present and lower iSCSI function is %d\n",
  414. __func__, ha->func_num));
  415. return 1;
  416. }
  417. return 0;
  418. }
  419. /**
  420. * qla4_83xx_need_reset_handler - Code to start reset sequence
  421. * @ha: pointer to adapter structure
  422. *
  423. * Note: IDC lock must be held upon entry
  424. **/
  425. void qla4_83xx_need_reset_handler(struct scsi_qla_host *ha)
  426. {
  427. uint32_t dev_state, drv_state, drv_active;
  428. unsigned long reset_timeout, dev_init_timeout;
  429. ql4_printk(KERN_INFO, ha, "%s: Performing ISP error recovery\n",
  430. __func__);
  431. if (!test_bit(AF_8XXX_RST_OWNER, &ha->flags)) {
  432. DEBUG2(ql4_printk(KERN_INFO, ha, "%s: reset acknowledged\n",
  433. __func__));
  434. qla4_8xxx_set_rst_ready(ha);
  435. /* Non-reset owners ACK Reset and wait for device INIT state
  436. * as part of Reset Recovery by Reset Owner */
  437. dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ);
  438. do {
  439. if (time_after_eq(jiffies, dev_init_timeout)) {
  440. ql4_printk(KERN_INFO, ha, "%s: Non Reset owner dev init timeout\n",
  441. __func__);
  442. break;
  443. }
  444. ha->isp_ops->idc_unlock(ha);
  445. msleep(1000);
  446. ha->isp_ops->idc_lock(ha);
  447. dev_state = qla4_8xxx_rd_direct(ha,
  448. QLA8XXX_CRB_DEV_STATE);
  449. } while (dev_state == QLA8XXX_DEV_NEED_RESET);
  450. } else {
  451. qla4_8xxx_set_rst_ready(ha);
  452. reset_timeout = jiffies + (ha->nx_reset_timeout * HZ);
  453. drv_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE);
  454. drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
  455. ql4_printk(KERN_INFO, ha, "%s: drv_state = 0x%x, drv_active = 0x%x\n",
  456. __func__, drv_state, drv_active);
  457. while (drv_state != drv_active) {
  458. if (time_after_eq(jiffies, reset_timeout)) {
  459. ql4_printk(KERN_INFO, ha, "%s: %s: RESET TIMEOUT! drv_state: 0x%08x, drv_active: 0x%08x\n",
  460. __func__, DRIVER_NAME, drv_state,
  461. drv_active);
  462. break;
  463. }
  464. ha->isp_ops->idc_unlock(ha);
  465. msleep(1000);
  466. ha->isp_ops->idc_lock(ha);
  467. drv_state = qla4_8xxx_rd_direct(ha,
  468. QLA8XXX_CRB_DRV_STATE);
  469. drv_active = qla4_8xxx_rd_direct(ha,
  470. QLA8XXX_CRB_DRV_ACTIVE);
  471. }
  472. if (drv_state != drv_active) {
  473. ql4_printk(KERN_INFO, ha, "%s: Reset_owner turning off drv_active of non-acking function 0x%x\n",
  474. __func__, (drv_active ^ drv_state));
  475. drv_active = drv_active & drv_state;
  476. qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_ACTIVE,
  477. drv_active);
  478. }
  479. clear_bit(AF_8XXX_RST_OWNER, &ha->flags);
  480. /* Start Reset Recovery */
  481. qla4_8xxx_device_bootstrap(ha);
  482. }
  483. }
  484. void qla4_83xx_get_idc_param(struct scsi_qla_host *ha)
  485. {
  486. uint32_t idc_params, ret_val;
  487. ret_val = qla4_83xx_flash_read_u32(ha, QLA83XX_IDC_PARAM_ADDR,
  488. (uint8_t *)&idc_params, 1);
  489. if (ret_val == QLA_SUCCESS) {
  490. ha->nx_dev_init_timeout = idc_params & 0xFFFF;
  491. ha->nx_reset_timeout = (idc_params >> 16) & 0xFFFF;
  492. } else {
  493. ha->nx_dev_init_timeout = ROM_DEV_INIT_TIMEOUT;
  494. ha->nx_reset_timeout = ROM_DRV_RESET_ACK_TIMEOUT;
  495. }
  496. DEBUG2(ql4_printk(KERN_DEBUG, ha,
  497. "%s: ha->nx_dev_init_timeout = %d, ha->nx_reset_timeout = %d\n",
  498. __func__, ha->nx_dev_init_timeout,
  499. ha->nx_reset_timeout));
  500. }
  501. /*-------------------------Reset Sequence Functions-----------------------*/
  502. static void qla4_83xx_dump_reset_seq_hdr(struct scsi_qla_host *ha)
  503. {
  504. uint8_t *phdr;
  505. if (!ha->reset_tmplt.buff) {
  506. ql4_printk(KERN_ERR, ha, "%s: Error: Invalid reset_seq_template\n",
  507. __func__);
  508. return;
  509. }
  510. phdr = ha->reset_tmplt.buff;
  511. DEBUG2(ql4_printk(KERN_INFO, ha,
  512. "Reset Template: 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X\n",
  513. *phdr, *(phdr+1), *(phdr+2), *(phdr+3), *(phdr+4),
  514. *(phdr+5), *(phdr+6), *(phdr+7), *(phdr + 8),
  515. *(phdr+9), *(phdr+10), *(phdr+11), *(phdr+12),
  516. *(phdr+13), *(phdr+14), *(phdr+15)));
  517. }
  518. static int qla4_83xx_copy_bootloader(struct scsi_qla_host *ha)
  519. {
  520. uint8_t *p_cache;
  521. uint32_t src, count, size;
  522. uint64_t dest;
  523. int ret_val = QLA_SUCCESS;
  524. src = QLA83XX_BOOTLOADER_FLASH_ADDR;
  525. dest = qla4_83xx_rd_reg(ha, QLA83XX_BOOTLOADER_ADDR);
  526. size = qla4_83xx_rd_reg(ha, QLA83XX_BOOTLOADER_SIZE);
  527. /* 128 bit alignment check */
  528. if (size & 0xF)
  529. size = (size + 16) & ~0xF;
  530. /* 16 byte count */
  531. count = size/16;
  532. p_cache = vmalloc(size);
  533. if (p_cache == NULL) {
  534. ql4_printk(KERN_ERR, ha, "%s: Failed to allocate memory for boot loader cache\n",
  535. __func__);
  536. ret_val = QLA_ERROR;
  537. goto exit_copy_bootloader;
  538. }
  539. ret_val = qla4_83xx_lockless_flash_read_u32(ha, src, p_cache,
  540. size / sizeof(uint32_t));
  541. if (ret_val == QLA_ERROR) {
  542. ql4_printk(KERN_ERR, ha, "%s: Error reading firmware from flash\n",
  543. __func__);
  544. goto exit_copy_error;
  545. }
  546. DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Read firmware from flash\n",
  547. __func__));
  548. /* 128 bit/16 byte write to MS memory */
  549. ret_val = qla4_8xxx_ms_mem_write_128b(ha, dest, (uint32_t *)p_cache,
  550. count);
  551. if (ret_val == QLA_ERROR) {
  552. ql4_printk(KERN_ERR, ha, "%s: Error writing firmware to MS\n",
  553. __func__);
  554. goto exit_copy_error;
  555. }
  556. DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Wrote firmware size %d to MS\n",
  557. __func__, size));
  558. exit_copy_error:
  559. vfree(p_cache);
  560. exit_copy_bootloader:
  561. return ret_val;
  562. }
  563. static int qla4_83xx_check_cmd_peg_status(struct scsi_qla_host *ha)
  564. {
  565. uint32_t val, ret_val = QLA_ERROR;
  566. int retries = CRB_CMDPEG_CHECK_RETRY_COUNT;
  567. do {
  568. val = qla4_83xx_rd_reg(ha, QLA83XX_CMDPEG_STATE);
  569. if (val == PHAN_INITIALIZE_COMPLETE) {
  570. DEBUG2(ql4_printk(KERN_INFO, ha,
  571. "%s: Command Peg initialization complete. State=0x%x\n",
  572. __func__, val));
  573. ret_val = QLA_SUCCESS;
  574. break;
  575. }
  576. msleep(CRB_CMDPEG_CHECK_DELAY);
  577. } while (--retries);
  578. return ret_val;
  579. }
  580. /**
  581. * qla4_83xx_poll_reg - Poll the given CRB addr for duration msecs till
  582. * value read ANDed with test_mask is equal to test_result.
  583. *
  584. * @ha : Pointer to adapter structure
  585. * @addr : CRB register address
  586. * @duration : Poll for total of "duration" msecs
  587. * @test_mask : Mask value read with "test_mask"
  588. * @test_result : Compare (value&test_mask) with test_result.
  589. **/
  590. static int qla4_83xx_poll_reg(struct scsi_qla_host *ha, uint32_t addr,
  591. int duration, uint32_t test_mask,
  592. uint32_t test_result)
  593. {
  594. uint32_t value;
  595. uint8_t retries;
  596. int ret_val = QLA_SUCCESS;
  597. ret_val = qla4_83xx_rd_reg_indirect(ha, addr, &value);
  598. if (ret_val == QLA_ERROR)
  599. goto exit_poll_reg;
  600. retries = duration / 10;
  601. do {
  602. if ((value & test_mask) != test_result) {
  603. msleep(duration / 10);
  604. ret_val = qla4_83xx_rd_reg_indirect(ha, addr, &value);
  605. if (ret_val == QLA_ERROR)
  606. goto exit_poll_reg;
  607. ret_val = QLA_ERROR;
  608. } else {
  609. ret_val = QLA_SUCCESS;
  610. break;
  611. }
  612. } while (retries--);
  613. exit_poll_reg:
  614. if (ret_val == QLA_ERROR) {
  615. ha->reset_tmplt.seq_error++;
  616. ql4_printk(KERN_ERR, ha, "%s: Poll Failed: 0x%08x 0x%08x 0x%08x\n",
  617. __func__, value, test_mask, test_result);
  618. }
  619. return ret_val;
  620. }
  621. static int qla4_83xx_reset_seq_checksum_test(struct scsi_qla_host *ha)
  622. {
  623. uint32_t sum = 0;
  624. uint16_t *buff = (uint16_t *)ha->reset_tmplt.buff;
  625. int u16_count = ha->reset_tmplt.hdr->size / sizeof(uint16_t);
  626. int ret_val;
  627. while (u16_count-- > 0)
  628. sum += *buff++;
  629. while (sum >> 16)
  630. sum = (sum & 0xFFFF) + (sum >> 16);
  631. /* checksum of 0 indicates a valid template */
  632. if (~sum) {
  633. ret_val = QLA_SUCCESS;
  634. } else {
  635. ql4_printk(KERN_ERR, ha, "%s: Reset seq checksum failed\n",
  636. __func__);
  637. ret_val = QLA_ERROR;
  638. }
  639. return ret_val;
  640. }
  641. /**
  642. * qla4_83xx_read_reset_template - Read Reset Template from Flash
  643. * @ha: Pointer to adapter structure
  644. **/
  645. void qla4_83xx_read_reset_template(struct scsi_qla_host *ha)
  646. {
  647. uint8_t *p_buff;
  648. uint32_t addr, tmplt_hdr_def_size, tmplt_hdr_size;
  649. uint32_t ret_val;
  650. ha->reset_tmplt.seq_error = 0;
  651. ha->reset_tmplt.buff = vmalloc(QLA83XX_RESTART_TEMPLATE_SIZE);
  652. if (ha->reset_tmplt.buff == NULL) {
  653. ql4_printk(KERN_ERR, ha, "%s: Failed to allocate reset template resources\n",
  654. __func__);
  655. goto exit_read_reset_template;
  656. }
  657. p_buff = ha->reset_tmplt.buff;
  658. addr = QLA83XX_RESET_TEMPLATE_ADDR;
  659. tmplt_hdr_def_size = sizeof(struct qla4_83xx_reset_template_hdr) /
  660. sizeof(uint32_t);
  661. DEBUG2(ql4_printk(KERN_INFO, ha,
  662. "%s: Read template hdr size %d from Flash\n",
  663. __func__, tmplt_hdr_def_size));
  664. /* Copy template header from flash */
  665. ret_val = qla4_83xx_flash_read_u32(ha, addr, p_buff,
  666. tmplt_hdr_def_size);
  667. if (ret_val != QLA_SUCCESS) {
  668. ql4_printk(KERN_ERR, ha, "%s: Failed to read reset template\n",
  669. __func__);
  670. goto exit_read_template_error;
  671. }
  672. ha->reset_tmplt.hdr =
  673. (struct qla4_83xx_reset_template_hdr *)ha->reset_tmplt.buff;
  674. /* Validate the template header size and signature */
  675. tmplt_hdr_size = ha->reset_tmplt.hdr->hdr_size/sizeof(uint32_t);
  676. if ((tmplt_hdr_size != tmplt_hdr_def_size) ||
  677. (ha->reset_tmplt.hdr->signature != RESET_TMPLT_HDR_SIGNATURE)) {
  678. ql4_printk(KERN_ERR, ha, "%s: Template Header size %d is invalid, tmplt_hdr_def_size %d\n",
  679. __func__, tmplt_hdr_size, tmplt_hdr_def_size);
  680. goto exit_read_template_error;
  681. }
  682. addr = QLA83XX_RESET_TEMPLATE_ADDR + ha->reset_tmplt.hdr->hdr_size;
  683. p_buff = ha->reset_tmplt.buff + ha->reset_tmplt.hdr->hdr_size;
  684. tmplt_hdr_def_size = (ha->reset_tmplt.hdr->size -
  685. ha->reset_tmplt.hdr->hdr_size) / sizeof(uint32_t);
  686. DEBUG2(ql4_printk(KERN_INFO, ha,
  687. "%s: Read rest of the template size %d\n",
  688. __func__, ha->reset_tmplt.hdr->size));
  689. /* Copy rest of the template */
  690. ret_val = qla4_83xx_flash_read_u32(ha, addr, p_buff,
  691. tmplt_hdr_def_size);
  692. if (ret_val != QLA_SUCCESS) {
  693. ql4_printk(KERN_ERR, ha, "%s: Failed to read reset template\n",
  694. __func__);
  695. goto exit_read_template_error;
  696. }
  697. /* Integrity check */
  698. if (qla4_83xx_reset_seq_checksum_test(ha)) {
  699. ql4_printk(KERN_ERR, ha, "%s: Reset Seq checksum failed!\n",
  700. __func__);
  701. goto exit_read_template_error;
  702. }
  703. DEBUG2(ql4_printk(KERN_INFO, ha,
  704. "%s: Reset Seq checksum passed, Get stop, start and init seq offsets\n",
  705. __func__));
  706. /* Get STOP, START, INIT sequence offsets */
  707. ha->reset_tmplt.init_offset = ha->reset_tmplt.buff +
  708. ha->reset_tmplt.hdr->init_seq_offset;
  709. ha->reset_tmplt.start_offset = ha->reset_tmplt.buff +
  710. ha->reset_tmplt.hdr->start_seq_offset;
  711. ha->reset_tmplt.stop_offset = ha->reset_tmplt.buff +
  712. ha->reset_tmplt.hdr->hdr_size;
  713. qla4_83xx_dump_reset_seq_hdr(ha);
  714. goto exit_read_reset_template;
  715. exit_read_template_error:
  716. vfree(ha->reset_tmplt.buff);
  717. exit_read_reset_template:
  718. return;
  719. }
  720. /**
  721. * qla4_83xx_read_write_crb_reg - Read from raddr and write value to waddr.
  722. *
  723. * @ha : Pointer to adapter structure
  724. * @raddr : CRB address to read from
  725. * @waddr : CRB address to write to
  726. **/
  727. static void qla4_83xx_read_write_crb_reg(struct scsi_qla_host *ha,
  728. uint32_t raddr, uint32_t waddr)
  729. {
  730. uint32_t value;
  731. qla4_83xx_rd_reg_indirect(ha, raddr, &value);
  732. qla4_83xx_wr_reg_indirect(ha, waddr, value);
  733. }
  734. /**
  735. * qla4_83xx_rmw_crb_reg - Read Modify Write crb register
  736. *
  737. * This function read value from raddr, AND with test_mask,
  738. * Shift Left,Right/OR/XOR with values RMW header and write value to waddr.
  739. *
  740. * @ha : Pointer to adapter structure
  741. * @raddr : CRB address to read from
  742. * @waddr : CRB address to write to
  743. * @p_rmw_hdr : header with shift/or/xor values.
  744. **/
  745. static void qla4_83xx_rmw_crb_reg(struct scsi_qla_host *ha, uint32_t raddr,
  746. uint32_t waddr,
  747. struct qla4_83xx_rmw *p_rmw_hdr)
  748. {
  749. uint32_t value;
  750. if (p_rmw_hdr->index_a)
  751. value = ha->reset_tmplt.array[p_rmw_hdr->index_a];
  752. else
  753. qla4_83xx_rd_reg_indirect(ha, raddr, &value);
  754. value &= p_rmw_hdr->test_mask;
  755. value <<= p_rmw_hdr->shl;
  756. value >>= p_rmw_hdr->shr;
  757. value |= p_rmw_hdr->or_value;
  758. value ^= p_rmw_hdr->xor_value;
  759. qla4_83xx_wr_reg_indirect(ha, waddr, value);
  760. return;
  761. }
  762. static void qla4_83xx_write_list(struct scsi_qla_host *ha,
  763. struct qla4_83xx_reset_entry_hdr *p_hdr)
  764. {
  765. struct qla4_83xx_entry *p_entry;
  766. uint32_t i;
  767. p_entry = (struct qla4_83xx_entry *)
  768. ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr));
  769. for (i = 0; i < p_hdr->count; i++, p_entry++) {
  770. qla4_83xx_wr_reg_indirect(ha, p_entry->arg1, p_entry->arg2);
  771. if (p_hdr->delay)
  772. udelay((uint32_t)(p_hdr->delay));
  773. }
  774. }
  775. static void qla4_83xx_read_write_list(struct scsi_qla_host *ha,
  776. struct qla4_83xx_reset_entry_hdr *p_hdr)
  777. {
  778. struct qla4_83xx_entry *p_entry;
  779. uint32_t i;
  780. p_entry = (struct qla4_83xx_entry *)
  781. ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr));
  782. for (i = 0; i < p_hdr->count; i++, p_entry++) {
  783. qla4_83xx_read_write_crb_reg(ha, p_entry->arg1, p_entry->arg2);
  784. if (p_hdr->delay)
  785. udelay((uint32_t)(p_hdr->delay));
  786. }
  787. }
  788. static void qla4_83xx_poll_list(struct scsi_qla_host *ha,
  789. struct qla4_83xx_reset_entry_hdr *p_hdr)
  790. {
  791. long delay;
  792. struct qla4_83xx_entry *p_entry;
  793. struct qla4_83xx_poll *p_poll;
  794. uint32_t i;
  795. uint32_t value;
  796. p_poll = (struct qla4_83xx_poll *)
  797. ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr));
  798. /* Entries start after 8 byte qla4_83xx_poll, poll header contains
  799. * the test_mask, test_value. */
  800. p_entry = (struct qla4_83xx_entry *)((char *)p_poll +
  801. sizeof(struct qla4_83xx_poll));
  802. delay = (long)p_hdr->delay;
  803. if (!delay) {
  804. for (i = 0; i < p_hdr->count; i++, p_entry++) {
  805. qla4_83xx_poll_reg(ha, p_entry->arg1, delay,
  806. p_poll->test_mask,
  807. p_poll->test_value);
  808. }
  809. } else {
  810. for (i = 0; i < p_hdr->count; i++, p_entry++) {
  811. if (qla4_83xx_poll_reg(ha, p_entry->arg1, delay,
  812. p_poll->test_mask,
  813. p_poll->test_value)) {
  814. qla4_83xx_rd_reg_indirect(ha, p_entry->arg1,
  815. &value);
  816. qla4_83xx_rd_reg_indirect(ha, p_entry->arg2,
  817. &value);
  818. }
  819. }
  820. }
  821. }
  822. static void qla4_83xx_poll_write_list(struct scsi_qla_host *ha,
  823. struct qla4_83xx_reset_entry_hdr *p_hdr)
  824. {
  825. long delay;
  826. struct qla4_83xx_quad_entry *p_entry;
  827. struct qla4_83xx_poll *p_poll;
  828. uint32_t i;
  829. p_poll = (struct qla4_83xx_poll *)
  830. ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr));
  831. p_entry = (struct qla4_83xx_quad_entry *)
  832. ((char *)p_poll + sizeof(struct qla4_83xx_poll));
  833. delay = (long)p_hdr->delay;
  834. for (i = 0; i < p_hdr->count; i++, p_entry++) {
  835. qla4_83xx_wr_reg_indirect(ha, p_entry->dr_addr,
  836. p_entry->dr_value);
  837. qla4_83xx_wr_reg_indirect(ha, p_entry->ar_addr,
  838. p_entry->ar_value);
  839. if (delay) {
  840. if (qla4_83xx_poll_reg(ha, p_entry->ar_addr, delay,
  841. p_poll->test_mask,
  842. p_poll->test_value)) {
  843. DEBUG2(ql4_printk(KERN_INFO, ha,
  844. "%s: Timeout Error: poll list, item_num %d, entry_num %d\n",
  845. __func__, i,
  846. ha->reset_tmplt.seq_index));
  847. }
  848. }
  849. }
  850. }
  851. static void qla4_83xx_read_modify_write(struct scsi_qla_host *ha,
  852. struct qla4_83xx_reset_entry_hdr *p_hdr)
  853. {
  854. struct qla4_83xx_entry *p_entry;
  855. struct qla4_83xx_rmw *p_rmw_hdr;
  856. uint32_t i;
  857. p_rmw_hdr = (struct qla4_83xx_rmw *)
  858. ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr));
  859. p_entry = (struct qla4_83xx_entry *)
  860. ((char *)p_rmw_hdr + sizeof(struct qla4_83xx_rmw));
  861. for (i = 0; i < p_hdr->count; i++, p_entry++) {
  862. qla4_83xx_rmw_crb_reg(ha, p_entry->arg1, p_entry->arg2,
  863. p_rmw_hdr);
  864. if (p_hdr->delay)
  865. udelay((uint32_t)(p_hdr->delay));
  866. }
  867. }
  868. static void qla4_83xx_pause(struct scsi_qla_host *ha,
  869. struct qla4_83xx_reset_entry_hdr *p_hdr)
  870. {
  871. if (p_hdr->delay)
  872. mdelay((uint32_t)((long)p_hdr->delay));
  873. }
  874. static void qla4_83xx_poll_read_list(struct scsi_qla_host *ha,
  875. struct qla4_83xx_reset_entry_hdr *p_hdr)
  876. {
  877. long delay;
  878. int index;
  879. struct qla4_83xx_quad_entry *p_entry;
  880. struct qla4_83xx_poll *p_poll;
  881. uint32_t i;
  882. uint32_t value;
  883. p_poll = (struct qla4_83xx_poll *)
  884. ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr));
  885. p_entry = (struct qla4_83xx_quad_entry *)
  886. ((char *)p_poll + sizeof(struct qla4_83xx_poll));
  887. delay = (long)p_hdr->delay;
  888. for (i = 0; i < p_hdr->count; i++, p_entry++) {
  889. qla4_83xx_wr_reg_indirect(ha, p_entry->ar_addr,
  890. p_entry->ar_value);
  891. if (delay) {
  892. if (qla4_83xx_poll_reg(ha, p_entry->ar_addr, delay,
  893. p_poll->test_mask,
  894. p_poll->test_value)) {
  895. DEBUG2(ql4_printk(KERN_INFO, ha,
  896. "%s: Timeout Error: poll list, Item_num %d, entry_num %d\n",
  897. __func__, i,
  898. ha->reset_tmplt.seq_index));
  899. } else {
  900. index = ha->reset_tmplt.array_index;
  901. qla4_83xx_rd_reg_indirect(ha, p_entry->dr_addr,
  902. &value);
  903. ha->reset_tmplt.array[index++] = value;
  904. if (index == QLA83XX_MAX_RESET_SEQ_ENTRIES)
  905. ha->reset_tmplt.array_index = 1;
  906. }
  907. }
  908. }
  909. }
  910. static void qla4_83xx_seq_end(struct scsi_qla_host *ha,
  911. struct qla4_83xx_reset_entry_hdr *p_hdr)
  912. {
  913. ha->reset_tmplt.seq_end = 1;
  914. }
  915. static void qla4_83xx_template_end(struct scsi_qla_host *ha,
  916. struct qla4_83xx_reset_entry_hdr *p_hdr)
  917. {
  918. ha->reset_tmplt.template_end = 1;
  919. if (ha->reset_tmplt.seq_error == 0) {
  920. DEBUG2(ql4_printk(KERN_INFO, ha,
  921. "%s: Reset sequence completed SUCCESSFULLY.\n",
  922. __func__));
  923. } else {
  924. ql4_printk(KERN_ERR, ha, "%s: Reset sequence completed with some timeout errors.\n",
  925. __func__);
  926. }
  927. }
  928. /**
  929. * qla4_83xx_process_reset_template - Process reset template.
  930. *
  931. * Process all entries in reset template till entry with SEQ_END opcode,
  932. * which indicates end of the reset template processing. Each entry has a
  933. * Reset Entry header, entry opcode/command, with size of the entry, number
  934. * of entries in sub-sequence and delay in microsecs or timeout in millisecs.
  935. *
  936. * @ha : Pointer to adapter structure
  937. * @p_buff : Common reset entry header.
  938. **/
  939. static void qla4_83xx_process_reset_template(struct scsi_qla_host *ha,
  940. char *p_buff)
  941. {
  942. int index, entries;
  943. struct qla4_83xx_reset_entry_hdr *p_hdr;
  944. char *p_entry = p_buff;
  945. ha->reset_tmplt.seq_end = 0;
  946. ha->reset_tmplt.template_end = 0;
  947. entries = ha->reset_tmplt.hdr->entries;
  948. index = ha->reset_tmplt.seq_index;
  949. for (; (!ha->reset_tmplt.seq_end) && (index < entries); index++) {
  950. p_hdr = (struct qla4_83xx_reset_entry_hdr *)p_entry;
  951. switch (p_hdr->cmd) {
  952. case OPCODE_NOP:
  953. break;
  954. case OPCODE_WRITE_LIST:
  955. qla4_83xx_write_list(ha, p_hdr);
  956. break;
  957. case OPCODE_READ_WRITE_LIST:
  958. qla4_83xx_read_write_list(ha, p_hdr);
  959. break;
  960. case OPCODE_POLL_LIST:
  961. qla4_83xx_poll_list(ha, p_hdr);
  962. break;
  963. case OPCODE_POLL_WRITE_LIST:
  964. qla4_83xx_poll_write_list(ha, p_hdr);
  965. break;
  966. case OPCODE_READ_MODIFY_WRITE:
  967. qla4_83xx_read_modify_write(ha, p_hdr);
  968. break;
  969. case OPCODE_SEQ_PAUSE:
  970. qla4_83xx_pause(ha, p_hdr);
  971. break;
  972. case OPCODE_SEQ_END:
  973. qla4_83xx_seq_end(ha, p_hdr);
  974. break;
  975. case OPCODE_TMPL_END:
  976. qla4_83xx_template_end(ha, p_hdr);
  977. break;
  978. case OPCODE_POLL_READ_LIST:
  979. qla4_83xx_poll_read_list(ha, p_hdr);
  980. break;
  981. default:
  982. ql4_printk(KERN_ERR, ha, "%s: Unknown command ==> 0x%04x on entry = %d\n",
  983. __func__, p_hdr->cmd, index);
  984. break;
  985. }
  986. /* Set pointer to next entry in the sequence. */
  987. p_entry += p_hdr->size;
  988. }
  989. ha->reset_tmplt.seq_index = index;
  990. }
  991. static void qla4_83xx_process_stop_seq(struct scsi_qla_host *ha)
  992. {
  993. ha->reset_tmplt.seq_index = 0;
  994. qla4_83xx_process_reset_template(ha, ha->reset_tmplt.stop_offset);
  995. if (ha->reset_tmplt.seq_end != 1)
  996. ql4_printk(KERN_ERR, ha, "%s: Abrupt STOP Sub-Sequence end.\n",
  997. __func__);
  998. }
  999. static void qla4_83xx_process_start_seq(struct scsi_qla_host *ha)
  1000. {
  1001. qla4_83xx_process_reset_template(ha, ha->reset_tmplt.start_offset);
  1002. if (ha->reset_tmplt.template_end != 1)
  1003. ql4_printk(KERN_ERR, ha, "%s: Abrupt START Sub-Sequence end.\n",
  1004. __func__);
  1005. }
  1006. static void qla4_83xx_process_init_seq(struct scsi_qla_host *ha)
  1007. {
  1008. qla4_83xx_process_reset_template(ha, ha->reset_tmplt.init_offset);
  1009. if (ha->reset_tmplt.seq_end != 1)
  1010. ql4_printk(KERN_ERR, ha, "%s: Abrupt INIT Sub-Sequence end.\n",
  1011. __func__);
  1012. }
  1013. static int qla4_83xx_restart(struct scsi_qla_host *ha)
  1014. {
  1015. int ret_val = QLA_SUCCESS;
  1016. uint32_t idc_ctrl;
  1017. qla4_83xx_process_stop_seq(ha);
  1018. /*
  1019. * Collect minidump.
  1020. * If IDC_CTRL BIT1 is set, clear it on going to INIT state and
  1021. * don't collect minidump
  1022. */
  1023. idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
  1024. if (idc_ctrl & GRACEFUL_RESET_BIT1) {
  1025. qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL,
  1026. (idc_ctrl & ~GRACEFUL_RESET_BIT1));
  1027. ql4_printk(KERN_INFO, ha, "%s: Graceful RESET: Not collecting minidump\n",
  1028. __func__);
  1029. } else {
  1030. qla4_8xxx_get_minidump(ha);
  1031. }
  1032. qla4_83xx_process_init_seq(ha);
  1033. if (qla4_83xx_copy_bootloader(ha)) {
  1034. ql4_printk(KERN_ERR, ha, "%s: Copy bootloader, firmware restart failed!\n",
  1035. __func__);
  1036. ret_val = QLA_ERROR;
  1037. goto exit_restart;
  1038. }
  1039. qla4_83xx_wr_reg(ha, QLA83XX_FW_IMAGE_VALID, QLA83XX_BOOT_FROM_FLASH);
  1040. qla4_83xx_process_start_seq(ha);
  1041. exit_restart:
  1042. return ret_val;
  1043. }
  1044. int qla4_83xx_start_firmware(struct scsi_qla_host *ha)
  1045. {
  1046. int ret_val = QLA_SUCCESS;
  1047. ret_val = qla4_83xx_restart(ha);
  1048. if (ret_val == QLA_ERROR) {
  1049. ql4_printk(KERN_ERR, ha, "%s: Restart error\n", __func__);
  1050. goto exit_start_fw;
  1051. } else {
  1052. DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Restart done\n",
  1053. __func__));
  1054. }
  1055. ret_val = qla4_83xx_check_cmd_peg_status(ha);
  1056. if (ret_val == QLA_ERROR)
  1057. ql4_printk(KERN_ERR, ha, "%s: Peg not initialized\n",
  1058. __func__);
  1059. exit_start_fw:
  1060. return ret_val;
  1061. }
  1062. /*----------------------Interrupt Related functions ---------------------*/
  1063. static void qla4_83xx_disable_iocb_intrs(struct scsi_qla_host *ha)
  1064. {
  1065. if (test_and_clear_bit(AF_83XX_IOCB_INTR_ON, &ha->flags))
  1066. qla4_8xxx_intr_disable(ha);
  1067. }
  1068. static void qla4_83xx_disable_mbox_intrs(struct scsi_qla_host *ha)
  1069. {
  1070. uint32_t mb_int, ret;
  1071. if (test_and_clear_bit(AF_83XX_MBOX_INTR_ON, &ha->flags)) {
  1072. ret = readl(&ha->qla4_83xx_reg->mbox_int);
  1073. mb_int = ret & ~INT_ENABLE_FW_MB;
  1074. writel(mb_int, &ha->qla4_83xx_reg->mbox_int);
  1075. writel(1, &ha->qla4_83xx_reg->leg_int_mask);
  1076. }
  1077. }
  1078. void qla4_83xx_disable_intrs(struct scsi_qla_host *ha)
  1079. {
  1080. qla4_83xx_disable_mbox_intrs(ha);
  1081. qla4_83xx_disable_iocb_intrs(ha);
  1082. }
  1083. static void qla4_83xx_enable_iocb_intrs(struct scsi_qla_host *ha)
  1084. {
  1085. if (!test_bit(AF_83XX_IOCB_INTR_ON, &ha->flags)) {
  1086. qla4_8xxx_intr_enable(ha);
  1087. set_bit(AF_83XX_IOCB_INTR_ON, &ha->flags);
  1088. }
  1089. }
  1090. void qla4_83xx_enable_mbox_intrs(struct scsi_qla_host *ha)
  1091. {
  1092. uint32_t mb_int;
  1093. if (!test_bit(AF_83XX_MBOX_INTR_ON, &ha->flags)) {
  1094. mb_int = INT_ENABLE_FW_MB;
  1095. writel(mb_int, &ha->qla4_83xx_reg->mbox_int);
  1096. writel(0, &ha->qla4_83xx_reg->leg_int_mask);
  1097. set_bit(AF_83XX_MBOX_INTR_ON, &ha->flags);
  1098. }
  1099. }
  1100. void qla4_83xx_enable_intrs(struct scsi_qla_host *ha)
  1101. {
  1102. qla4_83xx_enable_mbox_intrs(ha);
  1103. qla4_83xx_enable_iocb_intrs(ha);
  1104. }
  1105. void qla4_83xx_queue_mbox_cmd(struct scsi_qla_host *ha, uint32_t *mbx_cmd,
  1106. int incount)
  1107. {
  1108. int i;
  1109. /* Load all mailbox registers, except mailbox 0. */
  1110. for (i = 1; i < incount; i++)
  1111. writel(mbx_cmd[i], &ha->qla4_83xx_reg->mailbox_in[i]);
  1112. writel(mbx_cmd[0], &ha->qla4_83xx_reg->mailbox_in[0]);
  1113. /* Set Host Interrupt register to 1, to tell the firmware that
  1114. * a mailbox command is pending. Firmware after reading the
  1115. * mailbox command, clears the host interrupt register */
  1116. writel(HINT_MBX_INT_PENDING, &ha->qla4_83xx_reg->host_intr);
  1117. }
  1118. void qla4_83xx_process_mbox_intr(struct scsi_qla_host *ha, int outcount)
  1119. {
  1120. int intr_status;
  1121. intr_status = readl(&ha->qla4_83xx_reg->risc_intr);
  1122. if (intr_status) {
  1123. ha->mbox_status_count = outcount;
  1124. ha->isp_ops->interrupt_service_routine(ha, intr_status);
  1125. }
  1126. }
  1127. /**
  1128. * qla4_83xx_isp_reset - Resets ISP and aborts all outstanding commands.
  1129. * @ha: pointer to host adapter structure.
  1130. **/
  1131. int qla4_83xx_isp_reset(struct scsi_qla_host *ha)
  1132. {
  1133. int rval;
  1134. uint32_t dev_state;
  1135. ha->isp_ops->idc_lock(ha);
  1136. dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE);
  1137. if (ql4xdontresethba)
  1138. qla4_83xx_set_idc_dontreset(ha);
  1139. if (dev_state == QLA8XXX_DEV_READY) {
  1140. /* If IDC_CTRL DONTRESETHBA_BIT0 is set dont do reset
  1141. * recovery */
  1142. if (qla4_83xx_idc_dontreset(ha) == DONTRESET_BIT0) {
  1143. ql4_printk(KERN_ERR, ha, "%s: Reset recovery disabled\n",
  1144. __func__);
  1145. rval = QLA_ERROR;
  1146. goto exit_isp_reset;
  1147. }
  1148. DEBUG2(ql4_printk(KERN_INFO, ha, "%s: HW State: NEED RESET\n",
  1149. __func__));
  1150. qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
  1151. QLA8XXX_DEV_NEED_RESET);
  1152. } else {
  1153. /* If device_state is NEED_RESET, go ahead with
  1154. * Reset,irrespective of ql4xdontresethba. This is to allow a
  1155. * non-reset-owner to force a reset. Non-reset-owner sets
  1156. * the IDC_CTRL BIT0 to prevent Reset-owner from doing a Reset
  1157. * and then forces a Reset by setting device_state to
  1158. * NEED_RESET. */
  1159. DEBUG2(ql4_printk(KERN_INFO, ha,
  1160. "%s: HW state already set to NEED_RESET\n",
  1161. __func__));
  1162. }
  1163. /* For ISP8324 and ISP8042, Reset owner is NIC, iSCSI or FCOE based on
  1164. * priority and which drivers are present. Unlike ISP8022, the function
  1165. * setting NEED_RESET, may not be the Reset owner. */
  1166. if (qla4_83xx_can_perform_reset(ha))
  1167. set_bit(AF_8XXX_RST_OWNER, &ha->flags);
  1168. ha->isp_ops->idc_unlock(ha);
  1169. rval = qla4_8xxx_device_state_handler(ha);
  1170. ha->isp_ops->idc_lock(ha);
  1171. qla4_8xxx_clear_rst_ready(ha);
  1172. exit_isp_reset:
  1173. ha->isp_ops->idc_unlock(ha);
  1174. if (rval == QLA_SUCCESS)
  1175. clear_bit(AF_FW_RECOVERY, &ha->flags);
  1176. return rval;
  1177. }
  1178. static void qla4_83xx_dump_pause_control_regs(struct scsi_qla_host *ha)
  1179. {
  1180. u32 val = 0, val1 = 0;
  1181. int i;
  1182. qla4_83xx_rd_reg_indirect(ha, QLA83XX_SRE_SHIM_CONTROL, &val);
  1183. DEBUG2(ql4_printk(KERN_INFO, ha, "SRE-Shim Ctrl:0x%x\n", val));
  1184. /* Port 0 Rx Buffer Pause Threshold Registers. */
  1185. DEBUG2(ql4_printk(KERN_INFO, ha,
  1186. "Port 0 Rx Buffer Pause Threshold Registers[TC7..TC0]:"));
  1187. for (i = 0; i < 8; i++) {
  1188. qla4_83xx_rd_reg_indirect(ha,
  1189. QLA83XX_PORT0_RXB_PAUSE_THRS + (i * 0x4), &val);
  1190. DEBUG2(pr_info("0x%x ", val));
  1191. }
  1192. DEBUG2(pr_info("\n"));
  1193. /* Port 1 Rx Buffer Pause Threshold Registers. */
  1194. DEBUG2(ql4_printk(KERN_INFO, ha,
  1195. "Port 1 Rx Buffer Pause Threshold Registers[TC7..TC0]:"));
  1196. for (i = 0; i < 8; i++) {
  1197. qla4_83xx_rd_reg_indirect(ha,
  1198. QLA83XX_PORT1_RXB_PAUSE_THRS + (i * 0x4), &val);
  1199. DEBUG2(pr_info("0x%x ", val));
  1200. }
  1201. DEBUG2(pr_info("\n"));
  1202. /* Port 0 RxB Traffic Class Max Cell Registers. */
  1203. DEBUG2(ql4_printk(KERN_INFO, ha,
  1204. "Port 0 RxB Traffic Class Max Cell Registers[3..0]:"));
  1205. for (i = 0; i < 4; i++) {
  1206. qla4_83xx_rd_reg_indirect(ha,
  1207. QLA83XX_PORT0_RXB_TC_MAX_CELL + (i * 0x4), &val);
  1208. DEBUG2(pr_info("0x%x ", val));
  1209. }
  1210. DEBUG2(pr_info("\n"));
  1211. /* Port 1 RxB Traffic Class Max Cell Registers. */
  1212. DEBUG2(ql4_printk(KERN_INFO, ha,
  1213. "Port 1 RxB Traffic Class Max Cell Registers[3..0]:"));
  1214. for (i = 0; i < 4; i++) {
  1215. qla4_83xx_rd_reg_indirect(ha,
  1216. QLA83XX_PORT1_RXB_TC_MAX_CELL + (i * 0x4), &val);
  1217. DEBUG2(pr_info("0x%x ", val));
  1218. }
  1219. DEBUG2(pr_info("\n"));
  1220. /* Port 0 RxB Rx Traffic Class Stats. */
  1221. DEBUG2(ql4_printk(KERN_INFO, ha,
  1222. "Port 0 RxB Rx Traffic Class Stats [TC7..TC0]"));
  1223. for (i = 7; i >= 0; i--) {
  1224. qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT0_RXB_TC_STATS, &val);
  1225. val &= ~(0x7 << 29); /* Reset bits 29 to 31 */
  1226. qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT0_RXB_TC_STATS,
  1227. (val | (i << 29)));
  1228. qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT0_RXB_TC_STATS, &val);
  1229. DEBUG2(pr_info("0x%x ", val));
  1230. }
  1231. DEBUG2(pr_info("\n"));
  1232. /* Port 1 RxB Rx Traffic Class Stats. */
  1233. DEBUG2(ql4_printk(KERN_INFO, ha,
  1234. "Port 1 RxB Rx Traffic Class Stats [TC7..TC0]"));
  1235. for (i = 7; i >= 0; i--) {
  1236. qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT1_RXB_TC_STATS, &val);
  1237. val &= ~(0x7 << 29); /* Reset bits 29 to 31 */
  1238. qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT1_RXB_TC_STATS,
  1239. (val | (i << 29)));
  1240. qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT1_RXB_TC_STATS, &val);
  1241. DEBUG2(pr_info("0x%x ", val));
  1242. }
  1243. DEBUG2(pr_info("\n"));
  1244. qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT2_IFB_PAUSE_THRS, &val);
  1245. qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT3_IFB_PAUSE_THRS, &val1);
  1246. DEBUG2(ql4_printk(KERN_INFO, ha,
  1247. "IFB-Pause Thresholds: Port 2:0x%x, Port 3:0x%x\n",
  1248. val, val1));
  1249. }
  1250. static void __qla4_83xx_disable_pause(struct scsi_qla_host *ha)
  1251. {
  1252. int i;
  1253. /* set SRE-Shim Control Register */
  1254. qla4_83xx_wr_reg_indirect(ha, QLA83XX_SRE_SHIM_CONTROL,
  1255. QLA83XX_SET_PAUSE_VAL);
  1256. for (i = 0; i < 8; i++) {
  1257. /* Port 0 Rx Buffer Pause Threshold Registers. */
  1258. qla4_83xx_wr_reg_indirect(ha,
  1259. QLA83XX_PORT0_RXB_PAUSE_THRS + (i * 0x4),
  1260. QLA83XX_SET_PAUSE_VAL);
  1261. /* Port 1 Rx Buffer Pause Threshold Registers. */
  1262. qla4_83xx_wr_reg_indirect(ha,
  1263. QLA83XX_PORT1_RXB_PAUSE_THRS + (i * 0x4),
  1264. QLA83XX_SET_PAUSE_VAL);
  1265. }
  1266. for (i = 0; i < 4; i++) {
  1267. /* Port 0 RxB Traffic Class Max Cell Registers. */
  1268. qla4_83xx_wr_reg_indirect(ha,
  1269. QLA83XX_PORT0_RXB_TC_MAX_CELL + (i * 0x4),
  1270. QLA83XX_SET_TC_MAX_CELL_VAL);
  1271. /* Port 1 RxB Traffic Class Max Cell Registers. */
  1272. qla4_83xx_wr_reg_indirect(ha,
  1273. QLA83XX_PORT1_RXB_TC_MAX_CELL + (i * 0x4),
  1274. QLA83XX_SET_TC_MAX_CELL_VAL);
  1275. }
  1276. qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT2_IFB_PAUSE_THRS,
  1277. QLA83XX_SET_PAUSE_VAL);
  1278. qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT3_IFB_PAUSE_THRS,
  1279. QLA83XX_SET_PAUSE_VAL);
  1280. ql4_printk(KERN_INFO, ha, "Disabled pause frames successfully.\n");
  1281. }
  1282. /**
  1283. * qla4_83xx_eport_init - Initialize EPort.
  1284. * @ha: Pointer to host adapter structure.
  1285. *
  1286. * If EPort hardware is in reset state before disabling pause, there would be
  1287. * serious hardware wedging issues. To prevent this perform eport init everytime
  1288. * before disabling pause frames.
  1289. **/
  1290. static void qla4_83xx_eport_init(struct scsi_qla_host *ha)
  1291. {
  1292. /* Clear the 8 registers */
  1293. qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_REG, 0x0);
  1294. qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_PORT0, 0x0);
  1295. qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_PORT1, 0x0);
  1296. qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_PORT2, 0x0);
  1297. qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_PORT3, 0x0);
  1298. qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_SRE_SHIM, 0x0);
  1299. qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_EPG_SHIM, 0x0);
  1300. qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_ETHER_PCS, 0x0);
  1301. /* Write any value to Reset Control register */
  1302. qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_CONTROL, 0xFF);
  1303. ql4_printk(KERN_INFO, ha, "EPORT is out of reset.\n");
  1304. }
  1305. void qla4_83xx_disable_pause(struct scsi_qla_host *ha)
  1306. {
  1307. ha->isp_ops->idc_lock(ha);
  1308. /* Before disabling pause frames, ensure that eport is not in reset */
  1309. qla4_83xx_eport_init(ha);
  1310. qla4_83xx_dump_pause_control_regs(ha);
  1311. __qla4_83xx_disable_pause(ha);
  1312. ha->isp_ops->idc_unlock(ha);
  1313. }
  1314. /**
  1315. * qla4_83xx_is_detached - Check if we are marked invisible.
  1316. * @ha: Pointer to host adapter structure.
  1317. **/
  1318. int qla4_83xx_is_detached(struct scsi_qla_host *ha)
  1319. {
  1320. uint32_t drv_active;
  1321. drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
  1322. if (test_bit(AF_INIT_DONE, &ha->flags) &&
  1323. !(drv_active & (1 << ha->func_num))) {
  1324. DEBUG2(ql4_printk(KERN_INFO, ha, "%s: drv_active = 0x%X\n",
  1325. __func__, drv_active));
  1326. return QLA_SUCCESS;
  1327. }
  1328. return QLA_ERROR;
  1329. }