device_status.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright IBM Corp. 2002
  4. * Author(s): Cornelia Huck ([email protected])
  5. * Martin Schwidefsky ([email protected])
  6. *
  7. * Status accumulation and basic sense functions.
  8. */
  9. #include <linux/module.h>
  10. #include <linux/init.h>
  11. #include <asm/ccwdev.h>
  12. #include <asm/cio.h>
  13. #include "cio.h"
  14. #include "cio_debug.h"
  15. #include "css.h"
  16. #include "device.h"
  17. #include "ioasm.h"
  18. #include "io_sch.h"
  19. /*
  20. * Check for any kind of channel or interface control check but don't
  21. * issue the message for the console device
  22. */
  23. static void
  24. ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb)
  25. {
  26. struct subchannel *sch = to_subchannel(cdev->dev.parent);
  27. char dbf_text[15];
  28. if (!scsw_is_valid_cstat(&irb->scsw) ||
  29. !(scsw_cstat(&irb->scsw) & (SCHN_STAT_CHN_DATA_CHK |
  30. SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK)))
  31. return;
  32. CIO_MSG_EVENT(0, "Channel-Check or Interface-Control-Check "
  33. "received"
  34. " ... device %04x on subchannel 0.%x.%04x, dev_stat "
  35. ": %02X sch_stat : %02X\n",
  36. cdev->private->dev_id.devno, sch->schid.ssid,
  37. sch->schid.sch_no,
  38. scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw));
  39. sprintf(dbf_text, "chk%x", sch->schid.sch_no);
  40. CIO_TRACE_EVENT(0, dbf_text);
  41. CIO_HEX_EVENT(0, irb, sizeof(struct irb));
  42. }
  43. /*
  44. * Some paths became not operational (pno bit in scsw is set).
  45. */
  46. static void
  47. ccw_device_path_notoper(struct ccw_device *cdev)
  48. {
  49. struct subchannel *sch;
  50. sch = to_subchannel(cdev->dev.parent);
  51. if (cio_update_schib(sch))
  52. goto doverify;
  53. CIO_MSG_EVENT(0, "%s(0.%x.%04x) - path(s) %02x are "
  54. "not operational \n", __func__,
  55. sch->schid.ssid, sch->schid.sch_no,
  56. sch->schib.pmcw.pnom);
  57. sch->lpm &= ~sch->schib.pmcw.pnom;
  58. doverify:
  59. cdev->private->flags.doverify = 1;
  60. }
  61. /*
  62. * Copy valid bits from the extended control word to device irb.
  63. */
  64. static void
  65. ccw_device_accumulate_ecw(struct ccw_device *cdev, struct irb *irb)
  66. {
  67. /*
  68. * Copy extended control bit if it is valid... yes there
  69. * are condition that have to be met for the extended control
  70. * bit to have meaning. Sick.
  71. */
  72. cdev->private->dma_area->irb.scsw.cmd.ectl = 0;
  73. if ((irb->scsw.cmd.stctl & SCSW_STCTL_ALERT_STATUS) &&
  74. !(irb->scsw.cmd.stctl & SCSW_STCTL_INTER_STATUS))
  75. cdev->private->dma_area->irb.scsw.cmd.ectl = irb->scsw.cmd.ectl;
  76. /* Check if extended control word is valid. */
  77. if (!cdev->private->dma_area->irb.scsw.cmd.ectl)
  78. return;
  79. /* Copy concurrent sense / model dependent information. */
  80. memcpy(&cdev->private->dma_area->irb.ecw, irb->ecw, sizeof(irb->ecw));
  81. }
  82. /*
  83. * Check if extended status word is valid.
  84. */
  85. static int
  86. ccw_device_accumulate_esw_valid(struct irb *irb)
  87. {
  88. if (!irb->scsw.cmd.eswf &&
  89. (irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND))
  90. return 0;
  91. if (irb->scsw.cmd.stctl ==
  92. (SCSW_STCTL_INTER_STATUS|SCSW_STCTL_STATUS_PEND) &&
  93. !(irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED))
  94. return 0;
  95. return 1;
  96. }
  97. /*
  98. * Copy valid bits from the extended status word to device irb.
  99. */
  100. static void
  101. ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb)
  102. {
  103. struct irb *cdev_irb;
  104. struct sublog *cdev_sublog, *sublog;
  105. if (!ccw_device_accumulate_esw_valid(irb))
  106. return;
  107. cdev_irb = &cdev->private->dma_area->irb;
  108. /* Copy last path used mask. */
  109. cdev_irb->esw.esw1.lpum = irb->esw.esw1.lpum;
  110. /* Copy subchannel logout information if esw is of format 0. */
  111. if (irb->scsw.cmd.eswf) {
  112. cdev_sublog = &cdev_irb->esw.esw0.sublog;
  113. sublog = &irb->esw.esw0.sublog;
  114. /* Copy extended status flags. */
  115. cdev_sublog->esf = sublog->esf;
  116. /*
  117. * Copy fields that have a meaning for channel data check
  118. * channel control check and interface control check.
  119. */
  120. if (irb->scsw.cmd.cstat & (SCHN_STAT_CHN_DATA_CHK |
  121. SCHN_STAT_CHN_CTRL_CHK |
  122. SCHN_STAT_INTF_CTRL_CHK)) {
  123. /* Copy ancillary report bit. */
  124. cdev_sublog->arep = sublog->arep;
  125. /* Copy field-validity-flags. */
  126. cdev_sublog->fvf = sublog->fvf;
  127. /* Copy storage access code. */
  128. cdev_sublog->sacc = sublog->sacc;
  129. /* Copy termination code. */
  130. cdev_sublog->termc = sublog->termc;
  131. /* Copy sequence code. */
  132. cdev_sublog->seqc = sublog->seqc;
  133. }
  134. /* Copy device status check. */
  135. cdev_sublog->devsc = sublog->devsc;
  136. /* Copy secondary error. */
  137. cdev_sublog->serr = sublog->serr;
  138. /* Copy i/o-error alert. */
  139. cdev_sublog->ioerr = sublog->ioerr;
  140. /* Copy channel path timeout bit. */
  141. if (irb->scsw.cmd.cstat & SCHN_STAT_INTF_CTRL_CHK)
  142. cdev_irb->esw.esw0.erw.cpt = irb->esw.esw0.erw.cpt;
  143. /* Copy failing storage address validity flag. */
  144. cdev_irb->esw.esw0.erw.fsavf = irb->esw.esw0.erw.fsavf;
  145. if (cdev_irb->esw.esw0.erw.fsavf) {
  146. /* ... and copy the failing storage address. */
  147. memcpy(cdev_irb->esw.esw0.faddr, irb->esw.esw0.faddr,
  148. sizeof (irb->esw.esw0.faddr));
  149. /* ... and copy the failing storage address format. */
  150. cdev_irb->esw.esw0.erw.fsaf = irb->esw.esw0.erw.fsaf;
  151. }
  152. /* Copy secondary ccw address validity bit. */
  153. cdev_irb->esw.esw0.erw.scavf = irb->esw.esw0.erw.scavf;
  154. if (irb->esw.esw0.erw.scavf)
  155. /* ... and copy the secondary ccw address. */
  156. cdev_irb->esw.esw0.saddr = irb->esw.esw0.saddr;
  157. }
  158. /* FIXME: DCTI for format 2? */
  159. /* Copy authorization bit. */
  160. cdev_irb->esw.esw0.erw.auth = irb->esw.esw0.erw.auth;
  161. /* Copy path verification required flag. */
  162. cdev_irb->esw.esw0.erw.pvrf = irb->esw.esw0.erw.pvrf;
  163. if (irb->esw.esw0.erw.pvrf)
  164. cdev->private->flags.doverify = 1;
  165. /* Copy concurrent sense bit. */
  166. cdev_irb->esw.esw0.erw.cons = irb->esw.esw0.erw.cons;
  167. if (irb->esw.esw0.erw.cons)
  168. cdev_irb->esw.esw0.erw.scnt = irb->esw.esw0.erw.scnt;
  169. }
  170. /*
  171. * Accumulate status from irb to devstat.
  172. */
  173. void
  174. ccw_device_accumulate_irb(struct ccw_device *cdev, struct irb *irb)
  175. {
  176. struct irb *cdev_irb;
  177. /*
  178. * Check if the status pending bit is set in stctl.
  179. * If not, the remaining bit have no meaning and we must ignore them.
  180. * The esw is not meaningful as well...
  181. */
  182. if (!(scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND))
  183. return;
  184. /* Check for channel checks and interface control checks. */
  185. ccw_device_msg_control_check(cdev, irb);
  186. /* Check for path not operational. */
  187. if (scsw_is_valid_pno(&irb->scsw) && scsw_pno(&irb->scsw))
  188. ccw_device_path_notoper(cdev);
  189. /* No irb accumulation for transport mode irbs. */
  190. if (scsw_is_tm(&irb->scsw)) {
  191. memcpy(&cdev->private->dma_area->irb, irb, sizeof(struct irb));
  192. return;
  193. }
  194. /*
  195. * Don't accumulate unsolicited interrupts.
  196. */
  197. if (!scsw_is_solicited(&irb->scsw))
  198. return;
  199. cdev_irb = &cdev->private->dma_area->irb;
  200. /*
  201. * If the clear function had been performed, all formerly pending
  202. * status at the subchannel has been cleared and we must not pass
  203. * intermediate accumulated status to the device driver.
  204. */
  205. if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC)
  206. memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb));
  207. /* Copy bits which are valid only for the start function. */
  208. if (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) {
  209. /* Copy key. */
  210. cdev_irb->scsw.cmd.key = irb->scsw.cmd.key;
  211. /* Copy suspend control bit. */
  212. cdev_irb->scsw.cmd.sctl = irb->scsw.cmd.sctl;
  213. /* Accumulate deferred condition code. */
  214. cdev_irb->scsw.cmd.cc |= irb->scsw.cmd.cc;
  215. /* Copy ccw format bit. */
  216. cdev_irb->scsw.cmd.fmt = irb->scsw.cmd.fmt;
  217. /* Copy prefetch bit. */
  218. cdev_irb->scsw.cmd.pfch = irb->scsw.cmd.pfch;
  219. /* Copy initial-status-interruption-control. */
  220. cdev_irb->scsw.cmd.isic = irb->scsw.cmd.isic;
  221. /* Copy address limit checking control. */
  222. cdev_irb->scsw.cmd.alcc = irb->scsw.cmd.alcc;
  223. /* Copy suppress suspend bit. */
  224. cdev_irb->scsw.cmd.ssi = irb->scsw.cmd.ssi;
  225. }
  226. /* Take care of the extended control bit and extended control word. */
  227. ccw_device_accumulate_ecw(cdev, irb);
  228. /* Accumulate function control. */
  229. cdev_irb->scsw.cmd.fctl |= irb->scsw.cmd.fctl;
  230. /* Copy activity control. */
  231. cdev_irb->scsw.cmd.actl = irb->scsw.cmd.actl;
  232. /* Accumulate status control. */
  233. cdev_irb->scsw.cmd.stctl |= irb->scsw.cmd.stctl;
  234. /*
  235. * Copy ccw address if it is valid. This is a bit simplified
  236. * but should be close enough for all practical purposes.
  237. */
  238. if ((irb->scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) ||
  239. ((irb->scsw.cmd.stctl ==
  240. (SCSW_STCTL_INTER_STATUS|SCSW_STCTL_STATUS_PEND)) &&
  241. (irb->scsw.cmd.actl & SCSW_ACTL_DEVACT) &&
  242. (irb->scsw.cmd.actl & SCSW_ACTL_SCHACT)) ||
  243. (irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED))
  244. cdev_irb->scsw.cmd.cpa = irb->scsw.cmd.cpa;
  245. /* Accumulate device status, but not the device busy flag. */
  246. cdev_irb->scsw.cmd.dstat &= ~DEV_STAT_BUSY;
  247. /* dstat is not always valid. */
  248. if (irb->scsw.cmd.stctl &
  249. (SCSW_STCTL_PRIM_STATUS | SCSW_STCTL_SEC_STATUS
  250. | SCSW_STCTL_INTER_STATUS | SCSW_STCTL_ALERT_STATUS))
  251. cdev_irb->scsw.cmd.dstat |= irb->scsw.cmd.dstat;
  252. /* Accumulate subchannel status. */
  253. cdev_irb->scsw.cmd.cstat |= irb->scsw.cmd.cstat;
  254. /* Copy residual count if it is valid. */
  255. if ((irb->scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) &&
  256. (irb->scsw.cmd.cstat & ~(SCHN_STAT_PCI | SCHN_STAT_INCORR_LEN))
  257. == 0)
  258. cdev_irb->scsw.cmd.count = irb->scsw.cmd.count;
  259. /* Take care of bits in the extended status word. */
  260. ccw_device_accumulate_esw(cdev, irb);
  261. /*
  262. * Check whether we must issue a SENSE CCW ourselves if there is no
  263. * concurrent sense facility installed for the subchannel.
  264. * No sense is required if no delayed sense is pending
  265. * and we did not get a unit check without sense information.
  266. *
  267. * Note: We should check for ioinfo[irq]->flags.consns but VM
  268. * violates the ESA/390 architecture and doesn't present an
  269. * operand exception for virtual devices without concurrent
  270. * sense facility available/supported when enabling the
  271. * concurrent sense facility.
  272. */
  273. if ((cdev_irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
  274. !(cdev_irb->esw.esw0.erw.cons))
  275. cdev->private->flags.dosense = 1;
  276. }
  277. /*
  278. * Do a basic sense.
  279. */
  280. int
  281. ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb)
  282. {
  283. struct subchannel *sch;
  284. struct ccw1 *sense_ccw;
  285. int rc;
  286. sch = to_subchannel(cdev->dev.parent);
  287. /* A sense is required, can we do it now ? */
  288. if (scsw_actl(&irb->scsw) & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT))
  289. /*
  290. * we received an Unit Check but we have no final
  291. * status yet, therefore we must delay the SENSE
  292. * processing. We must not report this intermediate
  293. * status to the device interrupt handler.
  294. */
  295. return -EBUSY;
  296. /*
  297. * We have ending status but no sense information. Do a basic sense.
  298. */
  299. sense_ccw = &to_io_private(sch)->dma_area->sense_ccw;
  300. sense_ccw->cmd_code = CCW_CMD_BASIC_SENSE;
  301. sense_ccw->cda = (__u32) __pa(cdev->private->dma_area->irb.ecw);
  302. sense_ccw->count = SENSE_MAX_COUNT;
  303. sense_ccw->flags = CCW_FLAG_SLI;
  304. rc = cio_start(sch, sense_ccw, 0xff);
  305. if (rc == -ENODEV || rc == -EACCES)
  306. dev_fsm_event(cdev, DEV_EVENT_VERIFY);
  307. return rc;
  308. }
  309. /*
  310. * Add information from basic sense to devstat.
  311. */
  312. void
  313. ccw_device_accumulate_basic_sense(struct ccw_device *cdev, struct irb *irb)
  314. {
  315. /*
  316. * Check if the status pending bit is set in stctl.
  317. * If not, the remaining bit have no meaning and we must ignore them.
  318. * The esw is not meaningful as well...
  319. */
  320. if (!(scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND))
  321. return;
  322. /* Check for channel checks and interface control checks. */
  323. ccw_device_msg_control_check(cdev, irb);
  324. /* Check for path not operational. */
  325. if (scsw_is_valid_pno(&irb->scsw) && scsw_pno(&irb->scsw))
  326. ccw_device_path_notoper(cdev);
  327. if (!(irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
  328. (irb->scsw.cmd.dstat & DEV_STAT_CHN_END)) {
  329. cdev->private->dma_area->irb.esw.esw0.erw.cons = 1;
  330. cdev->private->flags.dosense = 0;
  331. }
  332. /* Check if path verification is required. */
  333. if (ccw_device_accumulate_esw_valid(irb) &&
  334. irb->esw.esw0.erw.pvrf)
  335. cdev->private->flags.doverify = 1;
  336. }
  337. /*
  338. * This function accumulates the status into the private devstat and
  339. * starts a basic sense if one is needed.
  340. */
  341. int
  342. ccw_device_accumulate_and_sense(struct ccw_device *cdev, struct irb *irb)
  343. {
  344. ccw_device_accumulate_irb(cdev, irb);
  345. if ((irb->scsw.cmd.actl & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) != 0)
  346. return -EBUSY;
  347. /* Check for basic sense. */
  348. if (cdev->private->flags.dosense &&
  349. !(irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)) {
  350. cdev->private->dma_area->irb.esw.esw0.erw.cons = 1;
  351. cdev->private->flags.dosense = 0;
  352. return 0;
  353. }
  354. if (cdev->private->flags.dosense) {
  355. ccw_device_do_sense(cdev, irb);
  356. return -EBUSY;
  357. }
  358. return 0;
  359. }