aic94xx_tmf.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Aic94xx Task Management Functions
  4. *
  5. * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
  6. * Copyright (C) 2005 Luben Tuikov <[email protected]>
  7. */
  8. #include <linux/spinlock.h>
  9. #include <linux/gfp.h>
  10. #include "aic94xx.h"
  11. #include "aic94xx_sas.h"
  12. #include "aic94xx_hwi.h"
  13. /* ---------- Internal enqueue ---------- */
  14. static int asd_enqueue_internal(struct asd_ascb *ascb,
  15. void (*tasklet_complete)(struct asd_ascb *,
  16. struct done_list_struct *),
  17. void (*timed_out)(struct timer_list *t))
  18. {
  19. int res;
  20. ascb->tasklet_complete = tasklet_complete;
  21. ascb->uldd_timer = 1;
  22. ascb->timer.function = timed_out;
  23. ascb->timer.expires = jiffies + AIC94XX_SCB_TIMEOUT;
  24. add_timer(&ascb->timer);
  25. res = asd_post_ascb_list(ascb->ha, ascb, 1);
  26. if (unlikely(res))
  27. del_timer(&ascb->timer);
  28. return res;
  29. }
  30. /* ---------- CLEAR NEXUS ---------- */
  31. struct tasklet_completion_status {
  32. int dl_opcode;
  33. int tmf_state;
  34. u8 tag_valid:1;
  35. __be16 tag;
  36. };
  37. #define DECLARE_TCS(tcs) \
  38. struct tasklet_completion_status tcs = { \
  39. .dl_opcode = 0, \
  40. .tmf_state = 0, \
  41. .tag_valid = 0, \
  42. .tag = 0, \
  43. }
  44. static void asd_clear_nexus_tasklet_complete(struct asd_ascb *ascb,
  45. struct done_list_struct *dl)
  46. {
  47. struct tasklet_completion_status *tcs = ascb->uldd_task;
  48. ASD_DPRINTK("%s: here\n", __func__);
  49. if (!del_timer(&ascb->timer)) {
  50. ASD_DPRINTK("%s: couldn't delete timer\n", __func__);
  51. return;
  52. }
  53. ASD_DPRINTK("%s: opcode: 0x%x\n", __func__, dl->opcode);
  54. tcs->dl_opcode = dl->opcode;
  55. complete(ascb->completion);
  56. asd_ascb_free(ascb);
  57. }
  58. static void asd_clear_nexus_timedout(struct timer_list *t)
  59. {
  60. struct asd_ascb *ascb = from_timer(ascb, t, timer);
  61. struct tasklet_completion_status *tcs = ascb->uldd_task;
  62. ASD_DPRINTK("%s: here\n", __func__);
  63. tcs->dl_opcode = TMF_RESP_FUNC_FAILED;
  64. complete(ascb->completion);
  65. }
  66. #define CLEAR_NEXUS_PRE \
  67. struct asd_ascb *ascb; \
  68. struct scb *scb; \
  69. int res; \
  70. DECLARE_COMPLETION_ONSTACK(completion); \
  71. DECLARE_TCS(tcs); \
  72. \
  73. ASD_DPRINTK("%s: PRE\n", __func__); \
  74. res = 1; \
  75. ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL); \
  76. if (!ascb) \
  77. return -ENOMEM; \
  78. \
  79. ascb->completion = &completion; \
  80. ascb->uldd_task = &tcs; \
  81. scb = ascb->scb; \
  82. scb->header.opcode = CLEAR_NEXUS
  83. #define CLEAR_NEXUS_POST \
  84. ASD_DPRINTK("%s: POST\n", __func__); \
  85. res = asd_enqueue_internal(ascb, asd_clear_nexus_tasklet_complete, \
  86. asd_clear_nexus_timedout); \
  87. if (res) \
  88. goto out_err; \
  89. ASD_DPRINTK("%s: clear nexus posted, waiting...\n", __func__); \
  90. wait_for_completion(&completion); \
  91. res = tcs.dl_opcode; \
  92. if (res == TC_NO_ERROR) \
  93. res = TMF_RESP_FUNC_COMPLETE; \
  94. return res; \
  95. out_err: \
  96. asd_ascb_free(ascb); \
  97. return res
  98. int asd_clear_nexus_ha(struct sas_ha_struct *sas_ha)
  99. {
  100. struct asd_ha_struct *asd_ha = sas_ha->lldd_ha;
  101. CLEAR_NEXUS_PRE;
  102. scb->clear_nexus.nexus = NEXUS_ADAPTER;
  103. CLEAR_NEXUS_POST;
  104. }
  105. int asd_clear_nexus_port(struct asd_sas_port *port)
  106. {
  107. struct asd_ha_struct *asd_ha = port->ha->lldd_ha;
  108. CLEAR_NEXUS_PRE;
  109. scb->clear_nexus.nexus = NEXUS_PORT;
  110. scb->clear_nexus.conn_mask = port->phy_mask;
  111. CLEAR_NEXUS_POST;
  112. }
  113. enum clear_nexus_phase {
  114. NEXUS_PHASE_PRE,
  115. NEXUS_PHASE_POST,
  116. NEXUS_PHASE_RESUME,
  117. };
  118. static int asd_clear_nexus_I_T(struct domain_device *dev,
  119. enum clear_nexus_phase phase)
  120. {
  121. struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
  122. CLEAR_NEXUS_PRE;
  123. scb->clear_nexus.nexus = NEXUS_I_T;
  124. switch (phase) {
  125. case NEXUS_PHASE_PRE:
  126. scb->clear_nexus.flags = EXEC_Q | SUSPEND_TX;
  127. break;
  128. case NEXUS_PHASE_POST:
  129. scb->clear_nexus.flags = SEND_Q | NOTINQ;
  130. break;
  131. case NEXUS_PHASE_RESUME:
  132. scb->clear_nexus.flags = RESUME_TX;
  133. }
  134. scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
  135. dev->lldd_dev);
  136. CLEAR_NEXUS_POST;
  137. }
  138. int asd_I_T_nexus_reset(struct domain_device *dev)
  139. {
  140. int res, tmp_res, i;
  141. struct sas_phy *phy = sas_get_local_phy(dev);
  142. /* Standard mandates link reset for ATA (type 0) and
  143. * hard reset for SSP (type 1) */
  144. int reset_type = (dev->dev_type == SAS_SATA_DEV ||
  145. (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
  146. asd_clear_nexus_I_T(dev, NEXUS_PHASE_PRE);
  147. /* send a hard reset */
  148. ASD_DPRINTK("sending %s reset to %s\n",
  149. reset_type ? "hard" : "soft", dev_name(&phy->dev));
  150. res = sas_phy_reset(phy, reset_type);
  151. if (res == TMF_RESP_FUNC_COMPLETE || res == -ENODEV) {
  152. /* wait for the maximum settle time */
  153. msleep(500);
  154. /* clear all outstanding commands (keep nexus suspended) */
  155. asd_clear_nexus_I_T(dev, NEXUS_PHASE_POST);
  156. }
  157. for (i = 0 ; i < 3; i++) {
  158. tmp_res = asd_clear_nexus_I_T(dev, NEXUS_PHASE_RESUME);
  159. if (tmp_res == TC_RESUME)
  160. goto out;
  161. msleep(500);
  162. }
  163. /* This is a bit of a problem: the sequencer is still suspended
  164. * and is refusing to resume. Hope it will resume on a bigger hammer
  165. * or the disk is lost */
  166. dev_printk(KERN_ERR, &phy->dev,
  167. "Failed to resume nexus after reset 0x%x\n", tmp_res);
  168. res = TMF_RESP_FUNC_FAILED;
  169. out:
  170. sas_put_local_phy(phy);
  171. return res;
  172. }
  173. static int asd_clear_nexus_I_T_L(struct domain_device *dev, u8 *lun)
  174. {
  175. struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
  176. CLEAR_NEXUS_PRE;
  177. scb->clear_nexus.nexus = NEXUS_I_T_L;
  178. scb->clear_nexus.flags = SEND_Q | EXEC_Q | NOTINQ;
  179. memcpy(scb->clear_nexus.ssp_task.lun, lun, 8);
  180. scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
  181. dev->lldd_dev);
  182. CLEAR_NEXUS_POST;
  183. }
  184. static int asd_clear_nexus_tag(struct sas_task *task)
  185. {
  186. struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha;
  187. struct asd_ascb *tascb = task->lldd_task;
  188. CLEAR_NEXUS_PRE;
  189. scb->clear_nexus.nexus = NEXUS_TAG;
  190. memcpy(scb->clear_nexus.ssp_task.lun, task->ssp_task.LUN, 8);
  191. scb->clear_nexus.ssp_task.tag = tascb->tag;
  192. if (task->dev->tproto)
  193. scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
  194. task->dev->lldd_dev);
  195. CLEAR_NEXUS_POST;
  196. }
  197. static int asd_clear_nexus_index(struct sas_task *task)
  198. {
  199. struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha;
  200. struct asd_ascb *tascb = task->lldd_task;
  201. CLEAR_NEXUS_PRE;
  202. scb->clear_nexus.nexus = NEXUS_TRANS_CX;
  203. if (task->dev->tproto)
  204. scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
  205. task->dev->lldd_dev);
  206. scb->clear_nexus.index = cpu_to_le16(tascb->tc_index);
  207. CLEAR_NEXUS_POST;
  208. }
  209. /* ---------- TMFs ---------- */
  210. static void asd_tmf_timedout(struct timer_list *t)
  211. {
  212. struct asd_ascb *ascb = from_timer(ascb, t, timer);
  213. struct tasklet_completion_status *tcs = ascb->uldd_task;
  214. ASD_DPRINTK("tmf timed out\n");
  215. tcs->tmf_state = TMF_RESP_FUNC_FAILED;
  216. complete(ascb->completion);
  217. }
  218. static int asd_get_tmf_resp_tasklet(struct asd_ascb *ascb,
  219. struct done_list_struct *dl)
  220. {
  221. struct asd_ha_struct *asd_ha = ascb->ha;
  222. unsigned long flags;
  223. struct tc_resp_sb_struct {
  224. __le16 index_escb;
  225. u8 len_lsb;
  226. u8 flags;
  227. } __attribute__ ((packed)) *resp_sb = (void *) dl->status_block;
  228. int edb_id = ((resp_sb->flags & 0x70) >> 4)-1;
  229. struct asd_ascb *escb;
  230. struct asd_dma_tok *edb;
  231. struct ssp_frame_hdr *fh;
  232. struct ssp_response_iu *ru;
  233. int res = TMF_RESP_FUNC_FAILED;
  234. ASD_DPRINTK("tmf resp tasklet\n");
  235. spin_lock_irqsave(&asd_ha->seq.tc_index_lock, flags);
  236. escb = asd_tc_index_find(&asd_ha->seq,
  237. (int)le16_to_cpu(resp_sb->index_escb));
  238. spin_unlock_irqrestore(&asd_ha->seq.tc_index_lock, flags);
  239. if (!escb) {
  240. ASD_DPRINTK("Uh-oh! No escb for this dl?!\n");
  241. return res;
  242. }
  243. edb = asd_ha->seq.edb_arr[edb_id + escb->edb_index];
  244. ascb->tag = *(__be16 *)(edb->vaddr+4);
  245. fh = edb->vaddr + 16;
  246. ru = edb->vaddr + 16 + sizeof(*fh);
  247. res = ru->status;
  248. if (ru->datapres == SAS_DATAPRES_RESPONSE_DATA)
  249. res = ru->resp_data[3];
  250. #if 0
  251. ascb->tag = fh->tag;
  252. #endif
  253. ascb->tag_valid = 1;
  254. asd_invalidate_edb(escb, edb_id);
  255. return res;
  256. }
  257. static void asd_tmf_tasklet_complete(struct asd_ascb *ascb,
  258. struct done_list_struct *dl)
  259. {
  260. struct tasklet_completion_status *tcs;
  261. if (!del_timer(&ascb->timer))
  262. return;
  263. tcs = ascb->uldd_task;
  264. ASD_DPRINTK("tmf tasklet complete\n");
  265. tcs->dl_opcode = dl->opcode;
  266. if (dl->opcode == TC_SSP_RESP) {
  267. tcs->tmf_state = asd_get_tmf_resp_tasklet(ascb, dl);
  268. tcs->tag_valid = ascb->tag_valid;
  269. tcs->tag = ascb->tag;
  270. }
  271. complete(ascb->completion);
  272. asd_ascb_free(ascb);
  273. }
  274. static int asd_clear_nexus(struct sas_task *task)
  275. {
  276. int res = TMF_RESP_FUNC_FAILED;
  277. int leftover;
  278. struct asd_ascb *tascb = task->lldd_task;
  279. DECLARE_COMPLETION_ONSTACK(completion);
  280. unsigned long flags;
  281. tascb->completion = &completion;
  282. ASD_DPRINTK("task not done, clearing nexus\n");
  283. if (tascb->tag_valid)
  284. res = asd_clear_nexus_tag(task);
  285. else
  286. res = asd_clear_nexus_index(task);
  287. leftover = wait_for_completion_timeout(&completion,
  288. AIC94XX_SCB_TIMEOUT);
  289. tascb->completion = NULL;
  290. ASD_DPRINTK("came back from clear nexus\n");
  291. spin_lock_irqsave(&task->task_state_lock, flags);
  292. if (leftover < 1)
  293. res = TMF_RESP_FUNC_FAILED;
  294. if (task->task_state_flags & SAS_TASK_STATE_DONE)
  295. res = TMF_RESP_FUNC_COMPLETE;
  296. spin_unlock_irqrestore(&task->task_state_lock, flags);
  297. return res;
  298. }
  299. /**
  300. * asd_abort_task -- ABORT TASK TMF
  301. * @task: the task to be aborted
  302. *
  303. * Before calling ABORT TASK the task state flags should be ORed with
  304. * SAS_TASK_STATE_ABORTED (unless SAS_TASK_STATE_DONE is set) under
  305. * the task_state_lock IRQ spinlock, then ABORT TASK *must* be called.
  306. *
  307. * Implements the ABORT TASK TMF, I_T_L_Q nexus.
  308. * Returns: SAS TMF responses (see sas_task.h),
  309. * -ENOMEM,
  310. * -SAS_QUEUE_FULL.
  311. *
  312. * When ABORT TASK returns, the caller of ABORT TASK checks first the
  313. * task->task_state_flags, and then the return value of ABORT TASK.
  314. *
  315. * If the task has task state bit SAS_TASK_STATE_DONE set, then the
  316. * task was completed successfully prior to it being aborted. The
  317. * caller of ABORT TASK has responsibility to call task->task_done()
  318. * xor free the task, depending on their framework. The return code
  319. * is TMF_RESP_FUNC_FAILED in this case.
  320. *
  321. * Else the SAS_TASK_STATE_DONE bit is not set,
  322. * If the return code is TMF_RESP_FUNC_COMPLETE, then
  323. * the task was aborted successfully. The caller of
  324. * ABORT TASK has responsibility to call task->task_done()
  325. * to finish the task, xor free the task depending on their
  326. * framework.
  327. * else
  328. * the ABORT TASK returned some kind of error. The task
  329. * was _not_ cancelled. Nothing can be assumed.
  330. * The caller of ABORT TASK may wish to retry.
  331. */
  332. int asd_abort_task(struct sas_task *task)
  333. {
  334. struct asd_ascb *tascb = task->lldd_task;
  335. struct asd_ha_struct *asd_ha = tascb->ha;
  336. int res = 1;
  337. unsigned long flags;
  338. struct asd_ascb *ascb = NULL;
  339. struct scb *scb;
  340. int leftover;
  341. DECLARE_TCS(tcs);
  342. DECLARE_COMPLETION_ONSTACK(completion);
  343. DECLARE_COMPLETION_ONSTACK(tascb_completion);
  344. tascb->completion = &tascb_completion;
  345. spin_lock_irqsave(&task->task_state_lock, flags);
  346. if (task->task_state_flags & SAS_TASK_STATE_DONE) {
  347. spin_unlock_irqrestore(&task->task_state_lock, flags);
  348. res = TMF_RESP_FUNC_COMPLETE;
  349. ASD_DPRINTK("%s: task 0x%p done\n", __func__, task);
  350. goto out_done;
  351. }
  352. spin_unlock_irqrestore(&task->task_state_lock, flags);
  353. ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL);
  354. if (!ascb)
  355. return -ENOMEM;
  356. ascb->uldd_task = &tcs;
  357. ascb->completion = &completion;
  358. scb = ascb->scb;
  359. scb->header.opcode = SCB_ABORT_TASK;
  360. switch (task->task_proto) {
  361. case SAS_PROTOCOL_SATA:
  362. case SAS_PROTOCOL_STP:
  363. scb->abort_task.proto_conn_rate = (1 << 5); /* STP */
  364. break;
  365. case SAS_PROTOCOL_SSP:
  366. scb->abort_task.proto_conn_rate = (1 << 4); /* SSP */
  367. scb->abort_task.proto_conn_rate |= task->dev->linkrate;
  368. break;
  369. case SAS_PROTOCOL_SMP:
  370. break;
  371. default:
  372. break;
  373. }
  374. if (task->task_proto == SAS_PROTOCOL_SSP) {
  375. scb->abort_task.ssp_frame.frame_type = SSP_TASK;
  376. memcpy(scb->abort_task.ssp_frame.hashed_dest_addr,
  377. task->dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
  378. memcpy(scb->abort_task.ssp_frame.hashed_src_addr,
  379. task->dev->port->ha->hashed_sas_addr,
  380. HASHED_SAS_ADDR_SIZE);
  381. scb->abort_task.ssp_frame.tptt = cpu_to_be16(0xFFFF);
  382. memcpy(scb->abort_task.ssp_task.lun, task->ssp_task.LUN, 8);
  383. scb->abort_task.ssp_task.tmf = TMF_ABORT_TASK;
  384. scb->abort_task.ssp_task.tag = cpu_to_be16(0xFFFF);
  385. }
  386. scb->abort_task.sister_scb = cpu_to_le16(0xFFFF);
  387. scb->abort_task.conn_handle = cpu_to_le16(
  388. (u16)(unsigned long)task->dev->lldd_dev);
  389. scb->abort_task.retry_count = 1;
  390. scb->abort_task.index = cpu_to_le16((u16)tascb->tc_index);
  391. scb->abort_task.itnl_to = cpu_to_le16(ITNL_TIMEOUT_CONST);
  392. res = asd_enqueue_internal(ascb, asd_tmf_tasklet_complete,
  393. asd_tmf_timedout);
  394. if (res)
  395. goto out_free;
  396. wait_for_completion(&completion);
  397. ASD_DPRINTK("tmf came back\n");
  398. tascb->tag = tcs.tag;
  399. tascb->tag_valid = tcs.tag_valid;
  400. spin_lock_irqsave(&task->task_state_lock, flags);
  401. if (task->task_state_flags & SAS_TASK_STATE_DONE) {
  402. spin_unlock_irqrestore(&task->task_state_lock, flags);
  403. res = TMF_RESP_FUNC_COMPLETE;
  404. ASD_DPRINTK("%s: task 0x%p done\n", __func__, task);
  405. goto out_done;
  406. }
  407. spin_unlock_irqrestore(&task->task_state_lock, flags);
  408. if (tcs.dl_opcode == TC_SSP_RESP) {
  409. /* The task to be aborted has been sent to the device.
  410. * We got a Response IU for the ABORT TASK TMF. */
  411. if (tcs.tmf_state == TMF_RESP_FUNC_COMPLETE)
  412. res = asd_clear_nexus(task);
  413. else
  414. res = tcs.tmf_state;
  415. } else if (tcs.dl_opcode == TC_NO_ERROR &&
  416. tcs.tmf_state == TMF_RESP_FUNC_FAILED) {
  417. /* timeout */
  418. res = TMF_RESP_FUNC_FAILED;
  419. } else {
  420. /* In the following we assume that the managing layer
  421. * will _never_ make a mistake, when issuing ABORT
  422. * TASK.
  423. */
  424. switch (tcs.dl_opcode) {
  425. default:
  426. res = asd_clear_nexus(task);
  427. fallthrough;
  428. case TC_NO_ERROR:
  429. break;
  430. /* The task hasn't been sent to the device xor
  431. * we never got a (sane) Response IU for the
  432. * ABORT TASK TMF.
  433. */
  434. case TF_NAK_RECV:
  435. res = TMF_RESP_INVALID_FRAME;
  436. break;
  437. case TF_TMF_TASK_DONE: /* done but not reported yet */
  438. res = TMF_RESP_FUNC_FAILED;
  439. leftover =
  440. wait_for_completion_timeout(&tascb_completion,
  441. AIC94XX_SCB_TIMEOUT);
  442. spin_lock_irqsave(&task->task_state_lock, flags);
  443. if (leftover < 1)
  444. res = TMF_RESP_FUNC_FAILED;
  445. if (task->task_state_flags & SAS_TASK_STATE_DONE)
  446. res = TMF_RESP_FUNC_COMPLETE;
  447. spin_unlock_irqrestore(&task->task_state_lock, flags);
  448. break;
  449. case TF_TMF_NO_TAG:
  450. case TF_TMF_TAG_FREE: /* the tag is in the free list */
  451. case TF_TMF_NO_CONN_HANDLE: /* no such device */
  452. res = TMF_RESP_FUNC_COMPLETE;
  453. break;
  454. case TF_TMF_NO_CTX: /* not in seq, or proto != SSP */
  455. res = TMF_RESP_FUNC_ESUPP;
  456. break;
  457. }
  458. }
  459. out_done:
  460. tascb->completion = NULL;
  461. if (res == TMF_RESP_FUNC_COMPLETE) {
  462. task->lldd_task = NULL;
  463. mb();
  464. asd_ascb_free(tascb);
  465. }
  466. ASD_DPRINTK("task 0x%p aborted, res: 0x%x\n", task, res);
  467. return res;
  468. out_free:
  469. asd_ascb_free(ascb);
  470. ASD_DPRINTK("task 0x%p aborted, res: 0x%x\n", task, res);
  471. return res;
  472. }
  473. /**
  474. * asd_initiate_ssp_tmf -- send a TMF to an I_T_L or I_T_L_Q nexus
  475. * @dev: pointer to struct domain_device of interest
  476. * @lun: pointer to u8[8] which is the LUN
  477. * @tmf: the TMF to be performed (see sas_task.h or the SAS spec)
  478. * @index: the transaction context of the task to be queried if QT TMF
  479. *
  480. * This function is used to send ABORT TASK SET, CLEAR ACA,
  481. * CLEAR TASK SET, LU RESET and QUERY TASK TMFs.
  482. *
  483. * No SCBs should be queued to the I_T_L nexus when this SCB is
  484. * pending.
  485. *
  486. * Returns: TMF response code (see sas_task.h or the SAS spec)
  487. */
  488. static int asd_initiate_ssp_tmf(struct domain_device *dev, u8 *lun,
  489. int tmf, int index)
  490. {
  491. struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
  492. struct asd_ascb *ascb;
  493. int res = 1;
  494. struct scb *scb;
  495. DECLARE_COMPLETION_ONSTACK(completion);
  496. DECLARE_TCS(tcs);
  497. if (!(dev->tproto & SAS_PROTOCOL_SSP))
  498. return TMF_RESP_FUNC_ESUPP;
  499. ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL);
  500. if (!ascb)
  501. return -ENOMEM;
  502. ascb->completion = &completion;
  503. ascb->uldd_task = &tcs;
  504. scb = ascb->scb;
  505. if (tmf == TMF_QUERY_TASK)
  506. scb->header.opcode = QUERY_SSP_TASK;
  507. else
  508. scb->header.opcode = INITIATE_SSP_TMF;
  509. scb->ssp_tmf.proto_conn_rate = (1 << 4); /* SSP */
  510. scb->ssp_tmf.proto_conn_rate |= dev->linkrate;
  511. /* SSP frame header */
  512. scb->ssp_tmf.ssp_frame.frame_type = SSP_TASK;
  513. memcpy(scb->ssp_tmf.ssp_frame.hashed_dest_addr,
  514. dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
  515. memcpy(scb->ssp_tmf.ssp_frame.hashed_src_addr,
  516. dev->port->ha->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
  517. scb->ssp_tmf.ssp_frame.tptt = cpu_to_be16(0xFFFF);
  518. /* SSP Task IU */
  519. memcpy(scb->ssp_tmf.ssp_task.lun, lun, 8);
  520. scb->ssp_tmf.ssp_task.tmf = tmf;
  521. scb->ssp_tmf.sister_scb = cpu_to_le16(0xFFFF);
  522. scb->ssp_tmf.conn_handle= cpu_to_le16((u16)(unsigned long)
  523. dev->lldd_dev);
  524. scb->ssp_tmf.retry_count = 1;
  525. scb->ssp_tmf.itnl_to = cpu_to_le16(ITNL_TIMEOUT_CONST);
  526. if (tmf == TMF_QUERY_TASK)
  527. scb->ssp_tmf.index = cpu_to_le16(index);
  528. res = asd_enqueue_internal(ascb, asd_tmf_tasklet_complete,
  529. asd_tmf_timedout);
  530. if (res)
  531. goto out_err;
  532. wait_for_completion(&completion);
  533. switch (tcs.dl_opcode) {
  534. case TC_NO_ERROR:
  535. res = TMF_RESP_FUNC_COMPLETE;
  536. break;
  537. case TF_NAK_RECV:
  538. res = TMF_RESP_INVALID_FRAME;
  539. break;
  540. case TF_TMF_TASK_DONE:
  541. res = TMF_RESP_FUNC_FAILED;
  542. break;
  543. case TF_TMF_NO_TAG:
  544. case TF_TMF_TAG_FREE: /* the tag is in the free list */
  545. case TF_TMF_NO_CONN_HANDLE: /* no such device */
  546. res = TMF_RESP_FUNC_COMPLETE;
  547. break;
  548. case TF_TMF_NO_CTX: /* not in seq, or proto != SSP */
  549. res = TMF_RESP_FUNC_ESUPP;
  550. break;
  551. default:
  552. /* Allow TMF response codes to propagate upwards */
  553. res = tcs.dl_opcode;
  554. break;
  555. }
  556. return res;
  557. out_err:
  558. asd_ascb_free(ascb);
  559. return res;
  560. }
  561. int asd_abort_task_set(struct domain_device *dev, u8 *lun)
  562. {
  563. int res = asd_initiate_ssp_tmf(dev, lun, TMF_ABORT_TASK_SET, 0);
  564. if (res == TMF_RESP_FUNC_COMPLETE)
  565. asd_clear_nexus_I_T_L(dev, lun);
  566. return res;
  567. }
  568. int asd_clear_task_set(struct domain_device *dev, u8 *lun)
  569. {
  570. int res = asd_initiate_ssp_tmf(dev, lun, TMF_CLEAR_TASK_SET, 0);
  571. if (res == TMF_RESP_FUNC_COMPLETE)
  572. asd_clear_nexus_I_T_L(dev, lun);
  573. return res;
  574. }
  575. int asd_lu_reset(struct domain_device *dev, u8 *lun)
  576. {
  577. int res = asd_initiate_ssp_tmf(dev, lun, TMF_LU_RESET, 0);
  578. if (res == TMF_RESP_FUNC_COMPLETE)
  579. asd_clear_nexus_I_T_L(dev, lun);
  580. return res;
  581. }
  582. /**
  583. * asd_query_task -- send a QUERY TASK TMF to an I_T_L_Q nexus
  584. * @task: pointer to sas_task struct of interest
  585. *
  586. * Returns: TMF_RESP_FUNC_COMPLETE if the task is not in the task set,
  587. * or TMF_RESP_FUNC_SUCC if the task is in the task set.
  588. *
  589. * Normally the management layer sets the task to aborted state,
  590. * and then calls query task and then abort task.
  591. */
  592. int asd_query_task(struct sas_task *task)
  593. {
  594. struct asd_ascb *ascb = task->lldd_task;
  595. int index;
  596. if (ascb) {
  597. index = ascb->tc_index;
  598. return asd_initiate_ssp_tmf(task->dev, task->ssp_task.LUN,
  599. TMF_QUERY_TASK, index);
  600. }
  601. return TMF_RESP_FUNC_COMPLETE;
  602. }