esas2r_init.c 44 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699
  1. /*
  2. * linux/drivers/scsi/esas2r/esas2r_init.c
  3. * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
  4. *
  5. * Copyright (c) 2001-2013 ATTO Technology, Inc.
  6. * (mailto:[email protected])mpt3sas/mpt3sas_trigger_diag.
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU General Public License
  10. * as published by the Free Software Foundation; either version 2
  11. * of the License, or (at your option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * NO WARRANTY
  19. * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
  20. * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
  21. * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
  22. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
  23. * solely responsible for determining the appropriateness of using and
  24. * distributing the Program and assumes all risks associated with its
  25. * exercise of rights under this Agreement, including but not limited to
  26. * the risks and costs of program errors, damage to or loss of data,
  27. * programs or equipment, and unavailability or interruption of operations.
  28. *
  29. * DISCLAIMER OF LIABILITY
  30. * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
  31. * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  32. * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
  33. * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
  34. * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
  35. * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
  36. * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
  37. *
  38. * You should have received a copy of the GNU General Public License
  39. * along with this program; if not, write to the Free Software
  40. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
  41. * USA.
  42. */
  43. #include "esas2r.h"
  44. static bool esas2r_initmem_alloc(struct esas2r_adapter *a,
  45. struct esas2r_mem_desc *mem_desc,
  46. u32 align)
  47. {
  48. mem_desc->esas2r_param = mem_desc->size + align;
  49. mem_desc->virt_addr = NULL;
  50. mem_desc->phys_addr = 0;
  51. mem_desc->esas2r_data = dma_alloc_coherent(&a->pcid->dev,
  52. (size_t)mem_desc->
  53. esas2r_param,
  54. (dma_addr_t *)&mem_desc->
  55. phys_addr,
  56. GFP_KERNEL);
  57. if (mem_desc->esas2r_data == NULL) {
  58. esas2r_log(ESAS2R_LOG_CRIT,
  59. "failed to allocate %lu bytes of consistent memory!",
  60. (long
  61. unsigned
  62. int)mem_desc->esas2r_param);
  63. return false;
  64. }
  65. mem_desc->virt_addr = PTR_ALIGN(mem_desc->esas2r_data, align);
  66. mem_desc->phys_addr = ALIGN(mem_desc->phys_addr, align);
  67. memset(mem_desc->virt_addr, 0, mem_desc->size);
  68. return true;
  69. }
  70. static void esas2r_initmem_free(struct esas2r_adapter *a,
  71. struct esas2r_mem_desc *mem_desc)
  72. {
  73. if (mem_desc->virt_addr == NULL)
  74. return;
  75. /*
  76. * Careful! phys_addr and virt_addr may have been adjusted from the
  77. * original allocation in order to return the desired alignment. That
  78. * means we have to use the original address (in esas2r_data) and size
  79. * (esas2r_param) and calculate the original physical address based on
  80. * the difference between the requested and actual allocation size.
  81. */
  82. if (mem_desc->phys_addr) {
  83. int unalign = ((u8 *)mem_desc->virt_addr) -
  84. ((u8 *)mem_desc->esas2r_data);
  85. dma_free_coherent(&a->pcid->dev,
  86. (size_t)mem_desc->esas2r_param,
  87. mem_desc->esas2r_data,
  88. (dma_addr_t)(mem_desc->phys_addr - unalign));
  89. } else {
  90. kfree(mem_desc->esas2r_data);
  91. }
  92. mem_desc->virt_addr = NULL;
  93. }
  94. static bool alloc_vda_req(struct esas2r_adapter *a,
  95. struct esas2r_request *rq)
  96. {
  97. struct esas2r_mem_desc *memdesc = kzalloc(
  98. sizeof(struct esas2r_mem_desc), GFP_KERNEL);
  99. if (memdesc == NULL) {
  100. esas2r_hdebug("could not alloc mem for vda request memdesc\n");
  101. return false;
  102. }
  103. memdesc->size = sizeof(union atto_vda_req) +
  104. ESAS2R_DATA_BUF_LEN;
  105. if (!esas2r_initmem_alloc(a, memdesc, 256)) {
  106. esas2r_hdebug("could not alloc mem for vda request\n");
  107. kfree(memdesc);
  108. return false;
  109. }
  110. a->num_vrqs++;
  111. list_add(&memdesc->next_desc, &a->vrq_mds_head);
  112. rq->vrq_md = memdesc;
  113. rq->vrq = (union atto_vda_req *)memdesc->virt_addr;
  114. rq->vrq->scsi.handle = a->num_vrqs;
  115. return true;
  116. }
  117. static void esas2r_unmap_regions(struct esas2r_adapter *a)
  118. {
  119. if (a->regs)
  120. iounmap((void __iomem *)a->regs);
  121. a->regs = NULL;
  122. pci_release_region(a->pcid, 2);
  123. if (a->data_window)
  124. iounmap((void __iomem *)a->data_window);
  125. a->data_window = NULL;
  126. pci_release_region(a->pcid, 0);
  127. }
  128. static int esas2r_map_regions(struct esas2r_adapter *a)
  129. {
  130. int error;
  131. a->regs = NULL;
  132. a->data_window = NULL;
  133. error = pci_request_region(a->pcid, 2, a->name);
  134. if (error != 0) {
  135. esas2r_log(ESAS2R_LOG_CRIT,
  136. "pci_request_region(2) failed, error %d",
  137. error);
  138. return error;
  139. }
  140. a->regs = (void __force *)ioremap(pci_resource_start(a->pcid, 2),
  141. pci_resource_len(a->pcid, 2));
  142. if (a->regs == NULL) {
  143. esas2r_log(ESAS2R_LOG_CRIT,
  144. "ioremap failed for regs mem region\n");
  145. pci_release_region(a->pcid, 2);
  146. return -EFAULT;
  147. }
  148. error = pci_request_region(a->pcid, 0, a->name);
  149. if (error != 0) {
  150. esas2r_log(ESAS2R_LOG_CRIT,
  151. "pci_request_region(2) failed, error %d",
  152. error);
  153. esas2r_unmap_regions(a);
  154. return error;
  155. }
  156. a->data_window = (void __force *)ioremap(pci_resource_start(a->pcid,
  157. 0),
  158. pci_resource_len(a->pcid, 0));
  159. if (a->data_window == NULL) {
  160. esas2r_log(ESAS2R_LOG_CRIT,
  161. "ioremap failed for data_window mem region\n");
  162. esas2r_unmap_regions(a);
  163. return -EFAULT;
  164. }
  165. return 0;
  166. }
  167. static void esas2r_setup_interrupts(struct esas2r_adapter *a, int intr_mode)
  168. {
  169. int i;
  170. /* Set up interrupt mode based on the requested value */
  171. switch (intr_mode) {
  172. case INTR_MODE_LEGACY:
  173. use_legacy_interrupts:
  174. a->intr_mode = INTR_MODE_LEGACY;
  175. break;
  176. case INTR_MODE_MSI:
  177. i = pci_enable_msi(a->pcid);
  178. if (i != 0) {
  179. esas2r_log(ESAS2R_LOG_WARN,
  180. "failed to enable MSI for adapter %d, "
  181. "falling back to legacy interrupts "
  182. "(err=%d)", a->index,
  183. i);
  184. goto use_legacy_interrupts;
  185. }
  186. a->intr_mode = INTR_MODE_MSI;
  187. set_bit(AF2_MSI_ENABLED, &a->flags2);
  188. break;
  189. default:
  190. esas2r_log(ESAS2R_LOG_WARN,
  191. "unknown interrupt_mode %d requested, "
  192. "falling back to legacy interrupt",
  193. interrupt_mode);
  194. goto use_legacy_interrupts;
  195. }
  196. }
  197. static void esas2r_claim_interrupts(struct esas2r_adapter *a)
  198. {
  199. unsigned long flags = 0;
  200. if (a->intr_mode == INTR_MODE_LEGACY)
  201. flags |= IRQF_SHARED;
  202. esas2r_log(ESAS2R_LOG_INFO,
  203. "esas2r_claim_interrupts irq=%d (%p, %s, %lx)",
  204. a->pcid->irq, a, a->name, flags);
  205. if (request_irq(a->pcid->irq,
  206. (a->intr_mode ==
  207. INTR_MODE_LEGACY) ? esas2r_interrupt :
  208. esas2r_msi_interrupt,
  209. flags,
  210. a->name,
  211. a)) {
  212. esas2r_log(ESAS2R_LOG_CRIT, "unable to request IRQ %02X",
  213. a->pcid->irq);
  214. return;
  215. }
  216. set_bit(AF2_IRQ_CLAIMED, &a->flags2);
  217. esas2r_log(ESAS2R_LOG_INFO,
  218. "claimed IRQ %d flags: 0x%lx",
  219. a->pcid->irq, flags);
  220. }
  221. int esas2r_init_adapter(struct Scsi_Host *host, struct pci_dev *pcid,
  222. int index)
  223. {
  224. struct esas2r_adapter *a;
  225. u64 bus_addr = 0;
  226. int i;
  227. void *next_uncached;
  228. struct esas2r_request *first_request, *last_request;
  229. bool dma64 = false;
  230. if (index >= MAX_ADAPTERS) {
  231. esas2r_log(ESAS2R_LOG_CRIT,
  232. "tried to init invalid adapter index %u!",
  233. index);
  234. return 0;
  235. }
  236. if (esas2r_adapters[index]) {
  237. esas2r_log(ESAS2R_LOG_CRIT,
  238. "tried to init existing adapter index %u!",
  239. index);
  240. return 0;
  241. }
  242. a = (struct esas2r_adapter *)host->hostdata;
  243. memset(a, 0, sizeof(struct esas2r_adapter));
  244. a->pcid = pcid;
  245. a->host = host;
  246. if (sizeof(dma_addr_t) > 4 &&
  247. dma_get_required_mask(&pcid->dev) > DMA_BIT_MASK(32) &&
  248. !dma_set_mask_and_coherent(&pcid->dev, DMA_BIT_MASK(64)))
  249. dma64 = true;
  250. if (!dma64 && dma_set_mask_and_coherent(&pcid->dev, DMA_BIT_MASK(32))) {
  251. esas2r_log(ESAS2R_LOG_CRIT, "failed to set DMA mask");
  252. esas2r_kill_adapter(index);
  253. return 0;
  254. }
  255. esas2r_log_dev(ESAS2R_LOG_INFO, &pcid->dev,
  256. "%s-bit PCI addressing enabled\n", dma64 ? "64" : "32");
  257. esas2r_adapters[index] = a;
  258. sprintf(a->name, ESAS2R_DRVR_NAME "_%02d", index);
  259. esas2r_debug("new adapter %p, name %s", a, a->name);
  260. spin_lock_init(&a->request_lock);
  261. spin_lock_init(&a->fw_event_lock);
  262. mutex_init(&a->fm_api_mutex);
  263. mutex_init(&a->fs_api_mutex);
  264. sema_init(&a->nvram_semaphore, 1);
  265. esas2r_fw_event_off(a);
  266. snprintf(a->fw_event_q_name, ESAS2R_KOBJ_NAME_LEN, "esas2r/%d",
  267. a->index);
  268. a->fw_event_q = create_singlethread_workqueue(a->fw_event_q_name);
  269. init_waitqueue_head(&a->buffered_ioctl_waiter);
  270. init_waitqueue_head(&a->nvram_waiter);
  271. init_waitqueue_head(&a->fm_api_waiter);
  272. init_waitqueue_head(&a->fs_api_waiter);
  273. init_waitqueue_head(&a->vda_waiter);
  274. INIT_LIST_HEAD(&a->general_req.req_list);
  275. INIT_LIST_HEAD(&a->active_list);
  276. INIT_LIST_HEAD(&a->defer_list);
  277. INIT_LIST_HEAD(&a->free_sg_list_head);
  278. INIT_LIST_HEAD(&a->avail_request);
  279. INIT_LIST_HEAD(&a->vrq_mds_head);
  280. INIT_LIST_HEAD(&a->fw_event_list);
  281. first_request = (struct esas2r_request *)((u8 *)(a + 1));
  282. for (last_request = first_request, i = 1; i < num_requests;
  283. last_request++, i++) {
  284. INIT_LIST_HEAD(&last_request->req_list);
  285. list_add_tail(&last_request->comp_list, &a->avail_request);
  286. if (!alloc_vda_req(a, last_request)) {
  287. esas2r_log(ESAS2R_LOG_CRIT,
  288. "failed to allocate a VDA request!");
  289. esas2r_kill_adapter(index);
  290. return 0;
  291. }
  292. }
  293. esas2r_debug("requests: %p to %p (%d, %d)", first_request,
  294. last_request,
  295. sizeof(*first_request),
  296. num_requests);
  297. if (esas2r_map_regions(a) != 0) {
  298. esas2r_log(ESAS2R_LOG_CRIT, "could not map PCI regions!");
  299. esas2r_kill_adapter(index);
  300. return 0;
  301. }
  302. a->index = index;
  303. /* interrupts will be disabled until we are done with init */
  304. atomic_inc(&a->dis_ints_cnt);
  305. atomic_inc(&a->disable_cnt);
  306. set_bit(AF_CHPRST_PENDING, &a->flags);
  307. set_bit(AF_DISC_PENDING, &a->flags);
  308. set_bit(AF_FIRST_INIT, &a->flags);
  309. set_bit(AF_LEGACY_SGE_MODE, &a->flags);
  310. a->init_msg = ESAS2R_INIT_MSG_START;
  311. a->max_vdareq_size = 128;
  312. a->build_sgl = esas2r_build_sg_list_sge;
  313. esas2r_setup_interrupts(a, interrupt_mode);
  314. a->uncached_size = esas2r_get_uncached_size(a);
  315. a->uncached = dma_alloc_coherent(&pcid->dev,
  316. (size_t)a->uncached_size,
  317. (dma_addr_t *)&bus_addr,
  318. GFP_KERNEL);
  319. if (a->uncached == NULL) {
  320. esas2r_log(ESAS2R_LOG_CRIT,
  321. "failed to allocate %d bytes of consistent memory!",
  322. a->uncached_size);
  323. esas2r_kill_adapter(index);
  324. return 0;
  325. }
  326. a->uncached_phys = bus_addr;
  327. esas2r_debug("%d bytes uncached memory allocated @ %p (%x:%x)",
  328. a->uncached_size,
  329. a->uncached,
  330. upper_32_bits(bus_addr),
  331. lower_32_bits(bus_addr));
  332. memset(a->uncached, 0, a->uncached_size);
  333. next_uncached = a->uncached;
  334. if (!esas2r_init_adapter_struct(a,
  335. &next_uncached)) {
  336. esas2r_log(ESAS2R_LOG_CRIT,
  337. "failed to initialize adapter structure (2)!");
  338. esas2r_kill_adapter(index);
  339. return 0;
  340. }
  341. tasklet_init(&a->tasklet,
  342. esas2r_adapter_tasklet,
  343. (unsigned long)a);
  344. /*
  345. * Disable chip interrupts to prevent spurious interrupts
  346. * until we claim the IRQ.
  347. */
  348. esas2r_disable_chip_interrupts(a);
  349. esas2r_check_adapter(a);
  350. if (!esas2r_init_adapter_hw(a, true)) {
  351. esas2r_log(ESAS2R_LOG_CRIT, "failed to initialize hardware!");
  352. } else {
  353. esas2r_debug("esas2r_init_adapter ok");
  354. }
  355. esas2r_claim_interrupts(a);
  356. if (test_bit(AF2_IRQ_CLAIMED, &a->flags2))
  357. esas2r_enable_chip_interrupts(a);
  358. set_bit(AF2_INIT_DONE, &a->flags2);
  359. if (!test_bit(AF_DEGRADED_MODE, &a->flags))
  360. esas2r_kickoff_timer(a);
  361. esas2r_debug("esas2r_init_adapter done for %p (%d)",
  362. a, a->disable_cnt);
  363. return 1;
  364. }
  365. static void esas2r_adapter_power_down(struct esas2r_adapter *a,
  366. int power_management)
  367. {
  368. struct esas2r_mem_desc *memdesc, *next;
  369. if ((test_bit(AF2_INIT_DONE, &a->flags2))
  370. && (!test_bit(AF_DEGRADED_MODE, &a->flags))) {
  371. if (!power_management) {
  372. del_timer_sync(&a->timer);
  373. tasklet_kill(&a->tasklet);
  374. }
  375. esas2r_power_down(a);
  376. /*
  377. * There are versions of firmware that do not handle the sync
  378. * cache command correctly. Stall here to ensure that the
  379. * cache is lazily flushed.
  380. */
  381. mdelay(500);
  382. esas2r_debug("chip halted");
  383. }
  384. /* Remove sysfs binary files */
  385. if (a->sysfs_fw_created) {
  386. sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_fw);
  387. a->sysfs_fw_created = 0;
  388. }
  389. if (a->sysfs_fs_created) {
  390. sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_fs);
  391. a->sysfs_fs_created = 0;
  392. }
  393. if (a->sysfs_vda_created) {
  394. sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_vda);
  395. a->sysfs_vda_created = 0;
  396. }
  397. if (a->sysfs_hw_created) {
  398. sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_hw);
  399. a->sysfs_hw_created = 0;
  400. }
  401. if (a->sysfs_live_nvram_created) {
  402. sysfs_remove_bin_file(&a->host->shost_dev.kobj,
  403. &bin_attr_live_nvram);
  404. a->sysfs_live_nvram_created = 0;
  405. }
  406. if (a->sysfs_default_nvram_created) {
  407. sysfs_remove_bin_file(&a->host->shost_dev.kobj,
  408. &bin_attr_default_nvram);
  409. a->sysfs_default_nvram_created = 0;
  410. }
  411. /* Clean up interrupts */
  412. if (test_bit(AF2_IRQ_CLAIMED, &a->flags2)) {
  413. esas2r_log_dev(ESAS2R_LOG_INFO,
  414. &(a->pcid->dev),
  415. "free_irq(%d) called", a->pcid->irq);
  416. free_irq(a->pcid->irq, a);
  417. esas2r_debug("IRQ released");
  418. clear_bit(AF2_IRQ_CLAIMED, &a->flags2);
  419. }
  420. if (test_bit(AF2_MSI_ENABLED, &a->flags2)) {
  421. pci_disable_msi(a->pcid);
  422. clear_bit(AF2_MSI_ENABLED, &a->flags2);
  423. esas2r_debug("MSI disabled");
  424. }
  425. if (a->inbound_list_md.virt_addr)
  426. esas2r_initmem_free(a, &a->inbound_list_md);
  427. if (a->outbound_list_md.virt_addr)
  428. esas2r_initmem_free(a, &a->outbound_list_md);
  429. list_for_each_entry_safe(memdesc, next, &a->free_sg_list_head,
  430. next_desc) {
  431. esas2r_initmem_free(a, memdesc);
  432. }
  433. /* Following frees everything allocated via alloc_vda_req */
  434. list_for_each_entry_safe(memdesc, next, &a->vrq_mds_head, next_desc) {
  435. esas2r_initmem_free(a, memdesc);
  436. list_del(&memdesc->next_desc);
  437. kfree(memdesc);
  438. }
  439. kfree(a->first_ae_req);
  440. a->first_ae_req = NULL;
  441. kfree(a->sg_list_mds);
  442. a->sg_list_mds = NULL;
  443. kfree(a->req_table);
  444. a->req_table = NULL;
  445. if (a->regs) {
  446. esas2r_unmap_regions(a);
  447. a->regs = NULL;
  448. a->data_window = NULL;
  449. esas2r_debug("regions unmapped");
  450. }
  451. }
  452. /* Release/free allocated resources for specified adapters. */
  453. void esas2r_kill_adapter(int i)
  454. {
  455. struct esas2r_adapter *a = esas2r_adapters[i];
  456. if (a) {
  457. unsigned long flags;
  458. struct workqueue_struct *wq;
  459. esas2r_debug("killing adapter %p [%d] ", a, i);
  460. esas2r_fw_event_off(a);
  461. esas2r_adapter_power_down(a, 0);
  462. if (esas2r_buffered_ioctl &&
  463. (a->pcid == esas2r_buffered_ioctl_pcid)) {
  464. dma_free_coherent(&a->pcid->dev,
  465. (size_t)esas2r_buffered_ioctl_size,
  466. esas2r_buffered_ioctl,
  467. esas2r_buffered_ioctl_addr);
  468. esas2r_buffered_ioctl = NULL;
  469. }
  470. if (a->vda_buffer) {
  471. dma_free_coherent(&a->pcid->dev,
  472. (size_t)VDA_MAX_BUFFER_SIZE,
  473. a->vda_buffer,
  474. (dma_addr_t)a->ppvda_buffer);
  475. a->vda_buffer = NULL;
  476. }
  477. if (a->fs_api_buffer) {
  478. dma_free_coherent(&a->pcid->dev,
  479. (size_t)a->fs_api_buffer_size,
  480. a->fs_api_buffer,
  481. (dma_addr_t)a->ppfs_api_buffer);
  482. a->fs_api_buffer = NULL;
  483. }
  484. kfree(a->local_atto_ioctl);
  485. a->local_atto_ioctl = NULL;
  486. spin_lock_irqsave(&a->fw_event_lock, flags);
  487. wq = a->fw_event_q;
  488. a->fw_event_q = NULL;
  489. spin_unlock_irqrestore(&a->fw_event_lock, flags);
  490. if (wq)
  491. destroy_workqueue(wq);
  492. if (a->uncached) {
  493. dma_free_coherent(&a->pcid->dev,
  494. (size_t)a->uncached_size,
  495. a->uncached,
  496. (dma_addr_t)a->uncached_phys);
  497. a->uncached = NULL;
  498. esas2r_debug("uncached area freed");
  499. }
  500. esas2r_log_dev(ESAS2R_LOG_INFO,
  501. &(a->pcid->dev),
  502. "pci_disable_device() called. msix_enabled: %d "
  503. "msi_enabled: %d irq: %d pin: %d",
  504. a->pcid->msix_enabled,
  505. a->pcid->msi_enabled,
  506. a->pcid->irq,
  507. a->pcid->pin);
  508. esas2r_log_dev(ESAS2R_LOG_INFO,
  509. &(a->pcid->dev),
  510. "before pci_disable_device() enable_cnt: %d",
  511. a->pcid->enable_cnt.counter);
  512. pci_disable_device(a->pcid);
  513. esas2r_log_dev(ESAS2R_LOG_INFO,
  514. &(a->pcid->dev),
  515. "after pci_disable_device() enable_cnt: %d",
  516. a->pcid->enable_cnt.counter);
  517. esas2r_log_dev(ESAS2R_LOG_INFO,
  518. &(a->pcid->dev),
  519. "pci_set_drv_data(%p, NULL) called",
  520. a->pcid);
  521. pci_set_drvdata(a->pcid, NULL);
  522. esas2r_adapters[i] = NULL;
  523. if (test_bit(AF2_INIT_DONE, &a->flags2)) {
  524. clear_bit(AF2_INIT_DONE, &a->flags2);
  525. set_bit(AF_DEGRADED_MODE, &a->flags);
  526. esas2r_log_dev(ESAS2R_LOG_INFO,
  527. &(a->host->shost_gendev),
  528. "scsi_remove_host() called");
  529. scsi_remove_host(a->host);
  530. esas2r_log_dev(ESAS2R_LOG_INFO,
  531. &(a->host->shost_gendev),
  532. "scsi_host_put() called");
  533. scsi_host_put(a->host);
  534. }
  535. }
  536. }
  537. static int __maybe_unused esas2r_suspend(struct device *dev)
  538. {
  539. struct Scsi_Host *host = dev_get_drvdata(dev);
  540. struct esas2r_adapter *a = (struct esas2r_adapter *)host->hostdata;
  541. esas2r_log_dev(ESAS2R_LOG_INFO, dev, "suspending adapter()");
  542. if (!a)
  543. return -ENODEV;
  544. esas2r_adapter_power_down(a, 1);
  545. esas2r_log_dev(ESAS2R_LOG_INFO, dev, "esas2r_suspend(): 0");
  546. return 0;
  547. }
  548. static int __maybe_unused esas2r_resume(struct device *dev)
  549. {
  550. struct Scsi_Host *host = dev_get_drvdata(dev);
  551. struct esas2r_adapter *a = (struct esas2r_adapter *)host->hostdata;
  552. int rez = 0;
  553. esas2r_log_dev(ESAS2R_LOG_INFO, dev, "resuming adapter()");
  554. if (!a) {
  555. rez = -ENODEV;
  556. goto error_exit;
  557. }
  558. if (esas2r_map_regions(a) != 0) {
  559. esas2r_log(ESAS2R_LOG_CRIT, "could not re-map PCI regions!");
  560. rez = -ENOMEM;
  561. goto error_exit;
  562. }
  563. /* Set up interupt mode */
  564. esas2r_setup_interrupts(a, a->intr_mode);
  565. /*
  566. * Disable chip interrupts to prevent spurious interrupts until we
  567. * claim the IRQ.
  568. */
  569. esas2r_disable_chip_interrupts(a);
  570. if (!esas2r_power_up(a, true)) {
  571. esas2r_debug("yikes, esas2r_power_up failed");
  572. rez = -ENOMEM;
  573. goto error_exit;
  574. }
  575. esas2r_claim_interrupts(a);
  576. if (test_bit(AF2_IRQ_CLAIMED, &a->flags2)) {
  577. /*
  578. * Now that system interrupt(s) are claimed, we can enable
  579. * chip interrupts.
  580. */
  581. esas2r_enable_chip_interrupts(a);
  582. esas2r_kickoff_timer(a);
  583. } else {
  584. esas2r_debug("yikes, unable to claim IRQ");
  585. esas2r_log(ESAS2R_LOG_CRIT, "could not re-claim IRQ!");
  586. rez = -ENOMEM;
  587. goto error_exit;
  588. }
  589. error_exit:
  590. esas2r_log_dev(ESAS2R_LOG_CRIT, dev, "esas2r_resume(): %d",
  591. rez);
  592. return rez;
  593. }
  594. SIMPLE_DEV_PM_OPS(esas2r_pm_ops, esas2r_suspend, esas2r_resume);
  595. bool esas2r_set_degraded_mode(struct esas2r_adapter *a, char *error_str)
  596. {
  597. set_bit(AF_DEGRADED_MODE, &a->flags);
  598. esas2r_log(ESAS2R_LOG_CRIT,
  599. "setting adapter to degraded mode: %s\n", error_str);
  600. return false;
  601. }
  602. u32 esas2r_get_uncached_size(struct esas2r_adapter *a)
  603. {
  604. return sizeof(struct esas2r_sas_nvram)
  605. + ALIGN(ESAS2R_DISC_BUF_LEN, 8)
  606. + ALIGN(sizeof(u32), 8) /* outbound list copy pointer */
  607. + 8
  608. + (num_sg_lists * (u16)sgl_page_size)
  609. + ALIGN((num_requests + num_ae_requests + 1 +
  610. ESAS2R_LIST_EXTRA) *
  611. sizeof(struct esas2r_inbound_list_source_entry),
  612. 8)
  613. + ALIGN((num_requests + num_ae_requests + 1 +
  614. ESAS2R_LIST_EXTRA) *
  615. sizeof(struct atto_vda_ob_rsp), 8)
  616. + 256; /* VDA request and buffer align */
  617. }
  618. static void esas2r_init_pci_cfg_space(struct esas2r_adapter *a)
  619. {
  620. if (pci_is_pcie(a->pcid)) {
  621. u16 devcontrol;
  622. pcie_capability_read_word(a->pcid, PCI_EXP_DEVCTL, &devcontrol);
  623. if ((devcontrol & PCI_EXP_DEVCTL_READRQ) >
  624. PCI_EXP_DEVCTL_READRQ_512B) {
  625. esas2r_log(ESAS2R_LOG_INFO,
  626. "max read request size > 512B");
  627. devcontrol &= ~PCI_EXP_DEVCTL_READRQ;
  628. devcontrol |= PCI_EXP_DEVCTL_READRQ_512B;
  629. pcie_capability_write_word(a->pcid, PCI_EXP_DEVCTL,
  630. devcontrol);
  631. }
  632. }
  633. }
  634. /*
  635. * Determine the organization of the uncached data area and
  636. * finish initializing the adapter structure
  637. */
  638. bool esas2r_init_adapter_struct(struct esas2r_adapter *a,
  639. void **uncached_area)
  640. {
  641. u32 i;
  642. u8 *high;
  643. struct esas2r_inbound_list_source_entry *element;
  644. struct esas2r_request *rq;
  645. struct esas2r_mem_desc *sgl;
  646. spin_lock_init(&a->sg_list_lock);
  647. spin_lock_init(&a->mem_lock);
  648. spin_lock_init(&a->queue_lock);
  649. a->targetdb_end = &a->targetdb[ESAS2R_MAX_TARGETS];
  650. if (!alloc_vda_req(a, &a->general_req)) {
  651. esas2r_hdebug(
  652. "failed to allocate a VDA request for the general req!");
  653. return false;
  654. }
  655. /* allocate requests for asynchronous events */
  656. a->first_ae_req =
  657. kcalloc(num_ae_requests, sizeof(struct esas2r_request),
  658. GFP_KERNEL);
  659. if (a->first_ae_req == NULL) {
  660. esas2r_log(ESAS2R_LOG_CRIT,
  661. "failed to allocate memory for asynchronous events");
  662. return false;
  663. }
  664. /* allocate the S/G list memory descriptors */
  665. a->sg_list_mds = kcalloc(num_sg_lists, sizeof(struct esas2r_mem_desc),
  666. GFP_KERNEL);
  667. if (a->sg_list_mds == NULL) {
  668. esas2r_log(ESAS2R_LOG_CRIT,
  669. "failed to allocate memory for s/g list descriptors");
  670. return false;
  671. }
  672. /* allocate the request table */
  673. a->req_table =
  674. kcalloc(num_requests + num_ae_requests + 1,
  675. sizeof(struct esas2r_request *),
  676. GFP_KERNEL);
  677. if (a->req_table == NULL) {
  678. esas2r_log(ESAS2R_LOG_CRIT,
  679. "failed to allocate memory for the request table");
  680. return false;
  681. }
  682. /* initialize PCI configuration space */
  683. esas2r_init_pci_cfg_space(a);
  684. /*
  685. * the thunder_stream boards all have a serial flash part that has a
  686. * different base address on the AHB bus.
  687. */
  688. if ((a->pcid->subsystem_vendor == ATTO_VENDOR_ID)
  689. && (a->pcid->subsystem_device & ATTO_SSDID_TBT))
  690. a->flags2 |= AF2_THUNDERBOLT;
  691. if (test_bit(AF2_THUNDERBOLT, &a->flags2))
  692. a->flags2 |= AF2_SERIAL_FLASH;
  693. if (a->pcid->subsystem_device == ATTO_TLSH_1068)
  694. a->flags2 |= AF2_THUNDERLINK;
  695. /* Uncached Area */
  696. high = (u8 *)*uncached_area;
  697. /* initialize the scatter/gather table pages */
  698. for (i = 0, sgl = a->sg_list_mds; i < num_sg_lists; i++, sgl++) {
  699. sgl->size = sgl_page_size;
  700. list_add_tail(&sgl->next_desc, &a->free_sg_list_head);
  701. if (!esas2r_initmem_alloc(a, sgl, ESAS2R_SGL_ALIGN)) {
  702. /* Allow the driver to load if the minimum count met. */
  703. if (i < NUM_SGL_MIN)
  704. return false;
  705. break;
  706. }
  707. }
  708. /* compute the size of the lists */
  709. a->list_size = num_requests + ESAS2R_LIST_EXTRA;
  710. /* allocate the inbound list */
  711. a->inbound_list_md.size = a->list_size *
  712. sizeof(struct
  713. esas2r_inbound_list_source_entry);
  714. if (!esas2r_initmem_alloc(a, &a->inbound_list_md, ESAS2R_LIST_ALIGN)) {
  715. esas2r_hdebug("failed to allocate IB list");
  716. return false;
  717. }
  718. /* allocate the outbound list */
  719. a->outbound_list_md.size = a->list_size *
  720. sizeof(struct atto_vda_ob_rsp);
  721. if (!esas2r_initmem_alloc(a, &a->outbound_list_md,
  722. ESAS2R_LIST_ALIGN)) {
  723. esas2r_hdebug("failed to allocate IB list");
  724. return false;
  725. }
  726. /* allocate the NVRAM structure */
  727. a->nvram = (struct esas2r_sas_nvram *)high;
  728. high += sizeof(struct esas2r_sas_nvram);
  729. /* allocate the discovery buffer */
  730. a->disc_buffer = high;
  731. high += ESAS2R_DISC_BUF_LEN;
  732. high = PTR_ALIGN(high, 8);
  733. /* allocate the outbound list copy pointer */
  734. a->outbound_copy = (u32 volatile *)high;
  735. high += sizeof(u32);
  736. if (!test_bit(AF_NVR_VALID, &a->flags))
  737. esas2r_nvram_set_defaults(a);
  738. /* update the caller's uncached memory area pointer */
  739. *uncached_area = (void *)high;
  740. /* initialize the allocated memory */
  741. if (test_bit(AF_FIRST_INIT, &a->flags)) {
  742. esas2r_targ_db_initialize(a);
  743. /* prime parts of the inbound list */
  744. element =
  745. (struct esas2r_inbound_list_source_entry *)a->
  746. inbound_list_md.
  747. virt_addr;
  748. for (i = 0; i < a->list_size; i++) {
  749. element->address = 0;
  750. element->reserved = 0;
  751. element->length = cpu_to_le32(HWILSE_INTERFACE_F0
  752. | (sizeof(union
  753. atto_vda_req)
  754. /
  755. sizeof(u32)));
  756. element++;
  757. }
  758. /* init the AE requests */
  759. for (rq = a->first_ae_req, i = 0; i < num_ae_requests; rq++,
  760. i++) {
  761. INIT_LIST_HEAD(&rq->req_list);
  762. if (!alloc_vda_req(a, rq)) {
  763. esas2r_hdebug(
  764. "failed to allocate a VDA request!");
  765. return false;
  766. }
  767. esas2r_rq_init_request(rq, a);
  768. /* override the completion function */
  769. rq->comp_cb = esas2r_ae_complete;
  770. }
  771. }
  772. return true;
  773. }
  774. /* This code will verify that the chip is operational. */
  775. bool esas2r_check_adapter(struct esas2r_adapter *a)
  776. {
  777. u32 starttime;
  778. u32 doorbell;
  779. u64 ppaddr;
  780. u32 dw;
  781. /*
  782. * if the chip reset detected flag is set, we can bypass a bunch of
  783. * stuff.
  784. */
  785. if (test_bit(AF_CHPRST_DETECTED, &a->flags))
  786. goto skip_chip_reset;
  787. /*
  788. * BEFORE WE DO ANYTHING, disable the chip interrupts! the boot driver
  789. * may have left them enabled or we may be recovering from a fault.
  790. */
  791. esas2r_write_register_dword(a, MU_INT_MASK_OUT, ESAS2R_INT_DIS_MASK);
  792. esas2r_flush_register_dword(a, MU_INT_MASK_OUT);
  793. /*
  794. * wait for the firmware to become ready by forcing an interrupt and
  795. * waiting for a response.
  796. */
  797. starttime = jiffies_to_msecs(jiffies);
  798. while (true) {
  799. esas2r_force_interrupt(a);
  800. doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
  801. if (doorbell == 0xFFFFFFFF) {
  802. /*
  803. * Give the firmware up to two seconds to enable
  804. * register access after a reset.
  805. */
  806. if ((jiffies_to_msecs(jiffies) - starttime) > 2000)
  807. return esas2r_set_degraded_mode(a,
  808. "unable to access registers");
  809. } else if (doorbell & DRBL_FORCE_INT) {
  810. u32 ver = (doorbell & DRBL_FW_VER_MSK);
  811. /*
  812. * This driver supports version 0 and version 1 of
  813. * the API
  814. */
  815. esas2r_write_register_dword(a, MU_DOORBELL_OUT,
  816. doorbell);
  817. if (ver == DRBL_FW_VER_0) {
  818. set_bit(AF_LEGACY_SGE_MODE, &a->flags);
  819. a->max_vdareq_size = 128;
  820. a->build_sgl = esas2r_build_sg_list_sge;
  821. } else if (ver == DRBL_FW_VER_1) {
  822. clear_bit(AF_LEGACY_SGE_MODE, &a->flags);
  823. a->max_vdareq_size = 1024;
  824. a->build_sgl = esas2r_build_sg_list_prd;
  825. } else {
  826. return esas2r_set_degraded_mode(a,
  827. "unknown firmware version");
  828. }
  829. break;
  830. }
  831. schedule_timeout_interruptible(msecs_to_jiffies(100));
  832. if ((jiffies_to_msecs(jiffies) - starttime) > 180000) {
  833. esas2r_hdebug("FW ready TMO");
  834. esas2r_bugon();
  835. return esas2r_set_degraded_mode(a,
  836. "firmware start has timed out");
  837. }
  838. }
  839. /* purge any asynchronous events since we will repost them later */
  840. esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_MSG_IFC_DOWN);
  841. starttime = jiffies_to_msecs(jiffies);
  842. while (true) {
  843. doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
  844. if (doorbell & DRBL_MSG_IFC_DOWN) {
  845. esas2r_write_register_dword(a, MU_DOORBELL_OUT,
  846. doorbell);
  847. break;
  848. }
  849. schedule_timeout_interruptible(msecs_to_jiffies(50));
  850. if ((jiffies_to_msecs(jiffies) - starttime) > 3000) {
  851. esas2r_hdebug("timeout waiting for interface down");
  852. break;
  853. }
  854. }
  855. skip_chip_reset:
  856. /*
  857. * first things first, before we go changing any of these registers
  858. * disable the communication lists.
  859. */
  860. dw = esas2r_read_register_dword(a, MU_IN_LIST_CONFIG);
  861. dw &= ~MU_ILC_ENABLE;
  862. esas2r_write_register_dword(a, MU_IN_LIST_CONFIG, dw);
  863. dw = esas2r_read_register_dword(a, MU_OUT_LIST_CONFIG);
  864. dw &= ~MU_OLC_ENABLE;
  865. esas2r_write_register_dword(a, MU_OUT_LIST_CONFIG, dw);
  866. /* configure the communication list addresses */
  867. ppaddr = a->inbound_list_md.phys_addr;
  868. esas2r_write_register_dword(a, MU_IN_LIST_ADDR_LO,
  869. lower_32_bits(ppaddr));
  870. esas2r_write_register_dword(a, MU_IN_LIST_ADDR_HI,
  871. upper_32_bits(ppaddr));
  872. ppaddr = a->outbound_list_md.phys_addr;
  873. esas2r_write_register_dword(a, MU_OUT_LIST_ADDR_LO,
  874. lower_32_bits(ppaddr));
  875. esas2r_write_register_dword(a, MU_OUT_LIST_ADDR_HI,
  876. upper_32_bits(ppaddr));
  877. ppaddr = a->uncached_phys +
  878. ((u8 *)a->outbound_copy - a->uncached);
  879. esas2r_write_register_dword(a, MU_OUT_LIST_COPY_PTR_LO,
  880. lower_32_bits(ppaddr));
  881. esas2r_write_register_dword(a, MU_OUT_LIST_COPY_PTR_HI,
  882. upper_32_bits(ppaddr));
  883. /* reset the read and write pointers */
  884. *a->outbound_copy =
  885. a->last_write =
  886. a->last_read = a->list_size - 1;
  887. set_bit(AF_COMM_LIST_TOGGLE, &a->flags);
  888. esas2r_write_register_dword(a, MU_IN_LIST_WRITE, MU_ILW_TOGGLE |
  889. a->last_write);
  890. esas2r_write_register_dword(a, MU_OUT_LIST_COPY, MU_OLC_TOGGLE |
  891. a->last_write);
  892. esas2r_write_register_dword(a, MU_IN_LIST_READ, MU_ILR_TOGGLE |
  893. a->last_write);
  894. esas2r_write_register_dword(a, MU_OUT_LIST_WRITE,
  895. MU_OLW_TOGGLE | a->last_write);
  896. /* configure the interface select fields */
  897. dw = esas2r_read_register_dword(a, MU_IN_LIST_IFC_CONFIG);
  898. dw &= ~(MU_ILIC_LIST | MU_ILIC_DEST);
  899. esas2r_write_register_dword(a, MU_IN_LIST_IFC_CONFIG,
  900. (dw | MU_ILIC_LIST_F0 | MU_ILIC_DEST_DDR));
  901. dw = esas2r_read_register_dword(a, MU_OUT_LIST_IFC_CONFIG);
  902. dw &= ~(MU_OLIC_LIST | MU_OLIC_SOURCE);
  903. esas2r_write_register_dword(a, MU_OUT_LIST_IFC_CONFIG,
  904. (dw | MU_OLIC_LIST_F0 |
  905. MU_OLIC_SOURCE_DDR));
  906. /* finish configuring the communication lists */
  907. dw = esas2r_read_register_dword(a, MU_IN_LIST_CONFIG);
  908. dw &= ~(MU_ILC_ENTRY_MASK | MU_ILC_NUMBER_MASK);
  909. dw |= MU_ILC_ENTRY_4_DW | MU_ILC_DYNAMIC_SRC
  910. | (a->list_size << MU_ILC_NUMBER_SHIFT);
  911. esas2r_write_register_dword(a, MU_IN_LIST_CONFIG, dw);
  912. dw = esas2r_read_register_dword(a, MU_OUT_LIST_CONFIG);
  913. dw &= ~(MU_OLC_ENTRY_MASK | MU_OLC_NUMBER_MASK);
  914. dw |= MU_OLC_ENTRY_4_DW | (a->list_size << MU_OLC_NUMBER_SHIFT);
  915. esas2r_write_register_dword(a, MU_OUT_LIST_CONFIG, dw);
  916. /*
  917. * notify the firmware that we're done setting up the communication
  918. * list registers. wait here until the firmware is done configuring
  919. * its lists. it will signal that it is done by enabling the lists.
  920. */
  921. esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_MSG_IFC_INIT);
  922. starttime = jiffies_to_msecs(jiffies);
  923. while (true) {
  924. doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
  925. if (doorbell & DRBL_MSG_IFC_INIT) {
  926. esas2r_write_register_dword(a, MU_DOORBELL_OUT,
  927. doorbell);
  928. break;
  929. }
  930. schedule_timeout_interruptible(msecs_to_jiffies(100));
  931. if ((jiffies_to_msecs(jiffies) - starttime) > 3000) {
  932. esas2r_hdebug(
  933. "timeout waiting for communication list init");
  934. esas2r_bugon();
  935. return esas2r_set_degraded_mode(a,
  936. "timeout waiting for communication list init");
  937. }
  938. }
  939. /*
  940. * flag whether the firmware supports the power down doorbell. we
  941. * determine this by reading the inbound doorbell enable mask.
  942. */
  943. doorbell = esas2r_read_register_dword(a, MU_DOORBELL_IN_ENB);
  944. if (doorbell & DRBL_POWER_DOWN)
  945. set_bit(AF2_VDA_POWER_DOWN, &a->flags2);
  946. else
  947. clear_bit(AF2_VDA_POWER_DOWN, &a->flags2);
  948. /*
  949. * enable assertion of outbound queue and doorbell interrupts in the
  950. * main interrupt cause register.
  951. */
  952. esas2r_write_register_dword(a, MU_OUT_LIST_INT_MASK, MU_OLIS_MASK);
  953. esas2r_write_register_dword(a, MU_DOORBELL_OUT_ENB, DRBL_ENB_MASK);
  954. return true;
  955. }
  956. /* Process the initialization message just completed and format the next one. */
  957. static bool esas2r_format_init_msg(struct esas2r_adapter *a,
  958. struct esas2r_request *rq)
  959. {
  960. u32 msg = a->init_msg;
  961. struct atto_vda_cfg_init *ci;
  962. a->init_msg = 0;
  963. switch (msg) {
  964. case ESAS2R_INIT_MSG_START:
  965. case ESAS2R_INIT_MSG_REINIT:
  966. {
  967. esas2r_hdebug("CFG init");
  968. esas2r_build_cfg_req(a,
  969. rq,
  970. VDA_CFG_INIT,
  971. 0,
  972. NULL);
  973. ci = (struct atto_vda_cfg_init *)&rq->vrq->cfg.data.init;
  974. ci->sgl_page_size = cpu_to_le32(sgl_page_size);
  975. /* firmware interface overflows in y2106 */
  976. ci->epoch_time = cpu_to_le32(ktime_get_real_seconds());
  977. rq->flags |= RF_FAILURE_OK;
  978. a->init_msg = ESAS2R_INIT_MSG_INIT;
  979. break;
  980. }
  981. case ESAS2R_INIT_MSG_INIT:
  982. if (rq->req_stat == RS_SUCCESS) {
  983. u32 major;
  984. u32 minor;
  985. u16 fw_release;
  986. a->fw_version = le16_to_cpu(
  987. rq->func_rsp.cfg_rsp.vda_version);
  988. a->fw_build = rq->func_rsp.cfg_rsp.fw_build;
  989. fw_release = le16_to_cpu(
  990. rq->func_rsp.cfg_rsp.fw_release);
  991. major = LOBYTE(fw_release);
  992. minor = HIBYTE(fw_release);
  993. a->fw_version += (major << 16) + (minor << 24);
  994. } else {
  995. esas2r_hdebug("FAILED");
  996. }
  997. /*
  998. * the 2.71 and earlier releases of R6xx firmware did not error
  999. * unsupported config requests correctly.
  1000. */
  1001. if ((test_bit(AF2_THUNDERBOLT, &a->flags2))
  1002. || (be32_to_cpu(a->fw_version) > 0x00524702)) {
  1003. esas2r_hdebug("CFG get init");
  1004. esas2r_build_cfg_req(a,
  1005. rq,
  1006. VDA_CFG_GET_INIT2,
  1007. sizeof(struct atto_vda_cfg_init),
  1008. NULL);
  1009. rq->vrq->cfg.sg_list_offset = offsetof(
  1010. struct atto_vda_cfg_req,
  1011. data.sge);
  1012. rq->vrq->cfg.data.prde.ctl_len =
  1013. cpu_to_le32(sizeof(struct atto_vda_cfg_init));
  1014. rq->vrq->cfg.data.prde.address = cpu_to_le64(
  1015. rq->vrq_md->phys_addr +
  1016. sizeof(union atto_vda_req));
  1017. rq->flags |= RF_FAILURE_OK;
  1018. a->init_msg = ESAS2R_INIT_MSG_GET_INIT;
  1019. break;
  1020. }
  1021. fallthrough;
  1022. case ESAS2R_INIT_MSG_GET_INIT:
  1023. if (msg == ESAS2R_INIT_MSG_GET_INIT) {
  1024. ci = (struct atto_vda_cfg_init *)rq->data_buf;
  1025. if (rq->req_stat == RS_SUCCESS) {
  1026. a->num_targets_backend =
  1027. le32_to_cpu(ci->num_targets_backend);
  1028. a->ioctl_tunnel =
  1029. le32_to_cpu(ci->ioctl_tunnel);
  1030. } else {
  1031. esas2r_hdebug("FAILED");
  1032. }
  1033. }
  1034. fallthrough;
  1035. default:
  1036. rq->req_stat = RS_SUCCESS;
  1037. return false;
  1038. }
  1039. return true;
  1040. }
  1041. /*
  1042. * Perform initialization messages via the request queue. Messages are
  1043. * performed with interrupts disabled.
  1044. */
  1045. bool esas2r_init_msgs(struct esas2r_adapter *a)
  1046. {
  1047. bool success = true;
  1048. struct esas2r_request *rq = &a->general_req;
  1049. esas2r_rq_init_request(rq, a);
  1050. rq->comp_cb = esas2r_dummy_complete;
  1051. if (a->init_msg == 0)
  1052. a->init_msg = ESAS2R_INIT_MSG_REINIT;
  1053. while (a->init_msg) {
  1054. if (esas2r_format_init_msg(a, rq)) {
  1055. unsigned long flags;
  1056. while (true) {
  1057. spin_lock_irqsave(&a->queue_lock, flags);
  1058. esas2r_start_vda_request(a, rq);
  1059. spin_unlock_irqrestore(&a->queue_lock, flags);
  1060. esas2r_wait_request(a, rq);
  1061. if (rq->req_stat != RS_PENDING)
  1062. break;
  1063. }
  1064. }
  1065. if (rq->req_stat == RS_SUCCESS
  1066. || ((rq->flags & RF_FAILURE_OK)
  1067. && rq->req_stat != RS_TIMEOUT))
  1068. continue;
  1069. esas2r_log(ESAS2R_LOG_CRIT, "init message %x failed (%x, %x)",
  1070. a->init_msg, rq->req_stat, rq->flags);
  1071. a->init_msg = ESAS2R_INIT_MSG_START;
  1072. success = false;
  1073. break;
  1074. }
  1075. esas2r_rq_destroy_request(rq, a);
  1076. return success;
  1077. }
  1078. /* Initialize the adapter chip */
  1079. bool esas2r_init_adapter_hw(struct esas2r_adapter *a, bool init_poll)
  1080. {
  1081. bool rslt = false;
  1082. struct esas2r_request *rq;
  1083. u32 i;
  1084. if (test_bit(AF_DEGRADED_MODE, &a->flags))
  1085. goto exit;
  1086. if (!test_bit(AF_NVR_VALID, &a->flags)) {
  1087. if (!esas2r_nvram_read_direct(a))
  1088. esas2r_log(ESAS2R_LOG_WARN,
  1089. "invalid/missing NVRAM parameters");
  1090. }
  1091. if (!esas2r_init_msgs(a)) {
  1092. esas2r_set_degraded_mode(a, "init messages failed");
  1093. goto exit;
  1094. }
  1095. /* The firmware is ready. */
  1096. clear_bit(AF_DEGRADED_MODE, &a->flags);
  1097. clear_bit(AF_CHPRST_PENDING, &a->flags);
  1098. /* Post all the async event requests */
  1099. for (i = 0, rq = a->first_ae_req; i < num_ae_requests; i++, rq++)
  1100. esas2r_start_ae_request(a, rq);
  1101. if (!a->flash_rev[0])
  1102. esas2r_read_flash_rev(a);
  1103. if (!a->image_type[0])
  1104. esas2r_read_image_type(a);
  1105. if (a->fw_version == 0)
  1106. a->fw_rev[0] = 0;
  1107. else
  1108. sprintf(a->fw_rev, "%1d.%02d",
  1109. (int)LOBYTE(HIWORD(a->fw_version)),
  1110. (int)HIBYTE(HIWORD(a->fw_version)));
  1111. esas2r_hdebug("firmware revision: %s", a->fw_rev);
  1112. if (test_bit(AF_CHPRST_DETECTED, &a->flags)
  1113. && (test_bit(AF_FIRST_INIT, &a->flags))) {
  1114. esas2r_enable_chip_interrupts(a);
  1115. return true;
  1116. }
  1117. /* initialize discovery */
  1118. esas2r_disc_initialize(a);
  1119. /*
  1120. * wait for the device wait time to expire here if requested. this is
  1121. * usually requested during initial driver load and possibly when
  1122. * resuming from a low power state. deferred device waiting will use
  1123. * interrupts. chip reset recovery always defers device waiting to
  1124. * avoid being in a TASKLET too long.
  1125. */
  1126. if (init_poll) {
  1127. u32 currtime = a->disc_start_time;
  1128. u32 nexttick = 100;
  1129. u32 deltatime;
  1130. /*
  1131. * Block Tasklets from getting scheduled and indicate this is
  1132. * polled discovery.
  1133. */
  1134. set_bit(AF_TASKLET_SCHEDULED, &a->flags);
  1135. set_bit(AF_DISC_POLLED, &a->flags);
  1136. /*
  1137. * Temporarily bring the disable count to zero to enable
  1138. * deferred processing. Note that the count is already zero
  1139. * after the first initialization.
  1140. */
  1141. if (test_bit(AF_FIRST_INIT, &a->flags))
  1142. atomic_dec(&a->disable_cnt);
  1143. while (test_bit(AF_DISC_PENDING, &a->flags)) {
  1144. schedule_timeout_interruptible(msecs_to_jiffies(100));
  1145. /*
  1146. * Determine the need for a timer tick based on the
  1147. * delta time between this and the last iteration of
  1148. * this loop. We don't use the absolute time because
  1149. * then we would have to worry about when nexttick
  1150. * wraps and currtime hasn't yet.
  1151. */
  1152. deltatime = jiffies_to_msecs(jiffies) - currtime;
  1153. currtime += deltatime;
  1154. /*
  1155. * Process any waiting discovery as long as the chip is
  1156. * up. If a chip reset happens during initial polling,
  1157. * we have to make sure the timer tick processes the
  1158. * doorbell indicating the firmware is ready.
  1159. */
  1160. if (!test_bit(AF_CHPRST_PENDING, &a->flags))
  1161. esas2r_disc_check_for_work(a);
  1162. /* Simulate a timer tick. */
  1163. if (nexttick <= deltatime) {
  1164. /* Time for a timer tick */
  1165. nexttick += 100;
  1166. esas2r_timer_tick(a);
  1167. }
  1168. if (nexttick > deltatime)
  1169. nexttick -= deltatime;
  1170. /* Do any deferred processing */
  1171. if (esas2r_is_tasklet_pending(a))
  1172. esas2r_do_tasklet_tasks(a);
  1173. }
  1174. if (test_bit(AF_FIRST_INIT, &a->flags))
  1175. atomic_inc(&a->disable_cnt);
  1176. clear_bit(AF_DISC_POLLED, &a->flags);
  1177. clear_bit(AF_TASKLET_SCHEDULED, &a->flags);
  1178. }
  1179. esas2r_targ_db_report_changes(a);
  1180. /*
  1181. * For cases where (a) the initialization messages processing may
  1182. * handle an interrupt for a port event and a discovery is waiting, but
  1183. * we are not waiting for devices, or (b) the device wait time has been
  1184. * exhausted but there is still discovery pending, start any leftover
  1185. * discovery in interrupt driven mode.
  1186. */
  1187. esas2r_disc_start_waiting(a);
  1188. /* Enable chip interrupts */
  1189. a->int_mask = ESAS2R_INT_STS_MASK;
  1190. esas2r_enable_chip_interrupts(a);
  1191. esas2r_enable_heartbeat(a);
  1192. rslt = true;
  1193. exit:
  1194. /*
  1195. * Regardless of whether initialization was successful, certain things
  1196. * need to get done before we exit.
  1197. */
  1198. if (test_bit(AF_CHPRST_DETECTED, &a->flags) &&
  1199. test_bit(AF_FIRST_INIT, &a->flags)) {
  1200. /*
  1201. * Reinitialization was performed during the first
  1202. * initialization. Only clear the chip reset flag so the
  1203. * original device polling is not cancelled.
  1204. */
  1205. if (!rslt)
  1206. clear_bit(AF_CHPRST_PENDING, &a->flags);
  1207. } else {
  1208. /* First initialization or a subsequent re-init is complete. */
  1209. if (!rslt) {
  1210. clear_bit(AF_CHPRST_PENDING, &a->flags);
  1211. clear_bit(AF_DISC_PENDING, &a->flags);
  1212. }
  1213. /* Enable deferred processing after the first initialization. */
  1214. if (test_bit(AF_FIRST_INIT, &a->flags)) {
  1215. clear_bit(AF_FIRST_INIT, &a->flags);
  1216. if (atomic_dec_return(&a->disable_cnt) == 0)
  1217. esas2r_do_deferred_processes(a);
  1218. }
  1219. }
  1220. return rslt;
  1221. }
  1222. void esas2r_reset_adapter(struct esas2r_adapter *a)
  1223. {
  1224. set_bit(AF_OS_RESET, &a->flags);
  1225. esas2r_local_reset_adapter(a);
  1226. esas2r_schedule_tasklet(a);
  1227. }
  1228. void esas2r_reset_chip(struct esas2r_adapter *a)
  1229. {
  1230. if (!esas2r_is_adapter_present(a))
  1231. return;
  1232. /*
  1233. * Before we reset the chip, save off the VDA core dump. The VDA core
  1234. * dump is located in the upper 512KB of the onchip SRAM. Make sure
  1235. * to not overwrite a previous crash that was saved.
  1236. */
  1237. if (test_bit(AF2_COREDUMP_AVAIL, &a->flags2) &&
  1238. !test_bit(AF2_COREDUMP_SAVED, &a->flags2)) {
  1239. esas2r_read_mem_block(a,
  1240. a->fw_coredump_buff,
  1241. MW_DATA_ADDR_SRAM + 0x80000,
  1242. ESAS2R_FWCOREDUMP_SZ);
  1243. set_bit(AF2_COREDUMP_SAVED, &a->flags2);
  1244. }
  1245. clear_bit(AF2_COREDUMP_AVAIL, &a->flags2);
  1246. /* Reset the chip */
  1247. if (a->pcid->revision == MVR_FREY_B2)
  1248. esas2r_write_register_dword(a, MU_CTL_STATUS_IN_B2,
  1249. MU_CTL_IN_FULL_RST2);
  1250. else
  1251. esas2r_write_register_dword(a, MU_CTL_STATUS_IN,
  1252. MU_CTL_IN_FULL_RST);
  1253. /* Stall a little while to let the reset condition clear */
  1254. mdelay(10);
  1255. }
  1256. static void esas2r_power_down_notify_firmware(struct esas2r_adapter *a)
  1257. {
  1258. u32 starttime;
  1259. u32 doorbell;
  1260. esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_POWER_DOWN);
  1261. starttime = jiffies_to_msecs(jiffies);
  1262. while (true) {
  1263. doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
  1264. if (doorbell & DRBL_POWER_DOWN) {
  1265. esas2r_write_register_dword(a, MU_DOORBELL_OUT,
  1266. doorbell);
  1267. break;
  1268. }
  1269. schedule_timeout_interruptible(msecs_to_jiffies(100));
  1270. if ((jiffies_to_msecs(jiffies) - starttime) > 30000) {
  1271. esas2r_hdebug("Timeout waiting for power down");
  1272. break;
  1273. }
  1274. }
  1275. }
  1276. /*
  1277. * Perform power management processing including managing device states, adapter
  1278. * states, interrupts, and I/O.
  1279. */
  1280. void esas2r_power_down(struct esas2r_adapter *a)
  1281. {
  1282. set_bit(AF_POWER_MGT, &a->flags);
  1283. set_bit(AF_POWER_DOWN, &a->flags);
  1284. if (!test_bit(AF_DEGRADED_MODE, &a->flags)) {
  1285. u32 starttime;
  1286. u32 doorbell;
  1287. /*
  1288. * We are currently running OK and will be reinitializing later.
  1289. * increment the disable count to coordinate with
  1290. * esas2r_init_adapter. We don't have to do this in degraded
  1291. * mode since we never enabled interrupts in the first place.
  1292. */
  1293. esas2r_disable_chip_interrupts(a);
  1294. esas2r_disable_heartbeat(a);
  1295. /* wait for any VDA activity to clear before continuing */
  1296. esas2r_write_register_dword(a, MU_DOORBELL_IN,
  1297. DRBL_MSG_IFC_DOWN);
  1298. starttime = jiffies_to_msecs(jiffies);
  1299. while (true) {
  1300. doorbell =
  1301. esas2r_read_register_dword(a, MU_DOORBELL_OUT);
  1302. if (doorbell & DRBL_MSG_IFC_DOWN) {
  1303. esas2r_write_register_dword(a, MU_DOORBELL_OUT,
  1304. doorbell);
  1305. break;
  1306. }
  1307. schedule_timeout_interruptible(msecs_to_jiffies(100));
  1308. if ((jiffies_to_msecs(jiffies) - starttime) > 3000) {
  1309. esas2r_hdebug(
  1310. "timeout waiting for interface down");
  1311. break;
  1312. }
  1313. }
  1314. /*
  1315. * For versions of firmware that support it tell them the driver
  1316. * is powering down.
  1317. */
  1318. if (test_bit(AF2_VDA_POWER_DOWN, &a->flags2))
  1319. esas2r_power_down_notify_firmware(a);
  1320. }
  1321. /* Suspend I/O processing. */
  1322. set_bit(AF_OS_RESET, &a->flags);
  1323. set_bit(AF_DISC_PENDING, &a->flags);
  1324. set_bit(AF_CHPRST_PENDING, &a->flags);
  1325. esas2r_process_adapter_reset(a);
  1326. /* Remove devices now that I/O is cleaned up. */
  1327. a->prev_dev_cnt = esas2r_targ_db_get_tgt_cnt(a);
  1328. esas2r_targ_db_remove_all(a, false);
  1329. }
  1330. /*
  1331. * Perform power management processing including managing device states, adapter
  1332. * states, interrupts, and I/O.
  1333. */
  1334. bool esas2r_power_up(struct esas2r_adapter *a, bool init_poll)
  1335. {
  1336. bool ret;
  1337. clear_bit(AF_POWER_DOWN, &a->flags);
  1338. esas2r_init_pci_cfg_space(a);
  1339. set_bit(AF_FIRST_INIT, &a->flags);
  1340. atomic_inc(&a->disable_cnt);
  1341. /* reinitialize the adapter */
  1342. ret = esas2r_check_adapter(a);
  1343. if (!esas2r_init_adapter_hw(a, init_poll))
  1344. ret = false;
  1345. /* send the reset asynchronous event */
  1346. esas2r_send_reset_ae(a, true);
  1347. /* clear this flag after initialization. */
  1348. clear_bit(AF_POWER_MGT, &a->flags);
  1349. return ret;
  1350. }
  1351. bool esas2r_is_adapter_present(struct esas2r_adapter *a)
  1352. {
  1353. if (test_bit(AF_NOT_PRESENT, &a->flags))
  1354. return false;
  1355. if (esas2r_read_register_dword(a, MU_DOORBELL_OUT) == 0xFFFFFFFF) {
  1356. set_bit(AF_NOT_PRESENT, &a->flags);
  1357. return false;
  1358. }
  1359. return true;
  1360. }
  1361. const char *esas2r_get_model_name(struct esas2r_adapter *a)
  1362. {
  1363. switch (a->pcid->subsystem_device) {
  1364. case ATTO_ESAS_R680:
  1365. return "ATTO ExpressSAS R680";
  1366. case ATTO_ESAS_R608:
  1367. return "ATTO ExpressSAS R608";
  1368. case ATTO_ESAS_R60F:
  1369. return "ATTO ExpressSAS R60F";
  1370. case ATTO_ESAS_R6F0:
  1371. return "ATTO ExpressSAS R6F0";
  1372. case ATTO_ESAS_R644:
  1373. return "ATTO ExpressSAS R644";
  1374. case ATTO_ESAS_R648:
  1375. return "ATTO ExpressSAS R648";
  1376. case ATTO_TSSC_3808:
  1377. return "ATTO ThunderStream SC 3808D";
  1378. case ATTO_TSSC_3808E:
  1379. return "ATTO ThunderStream SC 3808E";
  1380. case ATTO_TLSH_1068:
  1381. return "ATTO ThunderLink SH 1068";
  1382. }
  1383. return "ATTO SAS Controller";
  1384. }
  1385. const char *esas2r_get_model_name_short(struct esas2r_adapter *a)
  1386. {
  1387. switch (a->pcid->subsystem_device) {
  1388. case ATTO_ESAS_R680:
  1389. return "R680";
  1390. case ATTO_ESAS_R608:
  1391. return "R608";
  1392. case ATTO_ESAS_R60F:
  1393. return "R60F";
  1394. case ATTO_ESAS_R6F0:
  1395. return "R6F0";
  1396. case ATTO_ESAS_R644:
  1397. return "R644";
  1398. case ATTO_ESAS_R648:
  1399. return "R648";
  1400. case ATTO_TSSC_3808:
  1401. return "SC 3808D";
  1402. case ATTO_TSSC_3808E:
  1403. return "SC 3808E";
  1404. case ATTO_TLSH_1068:
  1405. return "SH 1068";
  1406. }
  1407. return "unknown";
  1408. }