ap_queue.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright IBM Corp. 2016
  4. * Author(s): Martin Schwidefsky <[email protected]>
  5. *
  6. * Adjunct processor bus, queue related code.
  7. */
  8. #define KMSG_COMPONENT "ap"
  9. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  10. #include <linux/init.h>
  11. #include <linux/slab.h>
  12. #include <asm/facility.h>
  13. #include "ap_bus.h"
  14. #include "ap_debug.h"
  15. static void __ap_flush_queue(struct ap_queue *aq);
  16. /**
  17. * ap_queue_enable_irq(): Enable interrupt support on this AP queue.
  18. * @aq: The AP queue
  19. * @ind: the notification indicator byte
  20. *
  21. * Enables interruption on AP queue via ap_aqic(). Based on the return
  22. * value it waits a while and tests the AP queue if interrupts
  23. * have been switched on using ap_test_queue().
  24. */
  25. static int ap_queue_enable_irq(struct ap_queue *aq, void *ind)
  26. {
  27. struct ap_queue_status status;
  28. struct ap_qirq_ctrl qirqctrl = { 0 };
  29. qirqctrl.ir = 1;
  30. qirqctrl.isc = AP_ISC;
  31. status = ap_aqic(aq->qid, qirqctrl, virt_to_phys(ind));
  32. switch (status.response_code) {
  33. case AP_RESPONSE_NORMAL:
  34. case AP_RESPONSE_OTHERWISE_CHANGED:
  35. return 0;
  36. case AP_RESPONSE_Q_NOT_AVAIL:
  37. case AP_RESPONSE_DECONFIGURED:
  38. case AP_RESPONSE_CHECKSTOPPED:
  39. case AP_RESPONSE_INVALID_ADDRESS:
  40. pr_err("Registering adapter interrupts for AP device %02x.%04x failed\n",
  41. AP_QID_CARD(aq->qid),
  42. AP_QID_QUEUE(aq->qid));
  43. return -EOPNOTSUPP;
  44. case AP_RESPONSE_RESET_IN_PROGRESS:
  45. case AP_RESPONSE_BUSY:
  46. default:
  47. return -EBUSY;
  48. }
  49. }
  50. /**
  51. * __ap_send(): Send message to adjunct processor queue.
  52. * @qid: The AP queue number
  53. * @psmid: The program supplied message identifier
  54. * @msg: The message text
  55. * @length: The message length
  56. * @special: Special Bit
  57. *
  58. * Returns AP queue status structure.
  59. * Condition code 1 on NQAP can't happen because the L bit is 1.
  60. * Condition code 2 on NQAP also means the send is incomplete,
  61. * because a segment boundary was reached. The NQAP is repeated.
  62. */
  63. static inline struct ap_queue_status
  64. __ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length,
  65. int special)
  66. {
  67. if (special)
  68. qid |= 0x400000UL;
  69. return ap_nqap(qid, psmid, msg, length);
  70. }
  71. int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
  72. {
  73. struct ap_queue_status status;
  74. status = __ap_send(qid, psmid, msg, length, 0);
  75. switch (status.response_code) {
  76. case AP_RESPONSE_NORMAL:
  77. return 0;
  78. case AP_RESPONSE_Q_FULL:
  79. case AP_RESPONSE_RESET_IN_PROGRESS:
  80. return -EBUSY;
  81. case AP_RESPONSE_REQ_FAC_NOT_INST:
  82. return -EINVAL;
  83. default: /* Device is gone. */
  84. return -ENODEV;
  85. }
  86. }
  87. EXPORT_SYMBOL(ap_send);
  88. int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
  89. {
  90. struct ap_queue_status status;
  91. if (!msg)
  92. return -EINVAL;
  93. status = ap_dqap(qid, psmid, msg, length, NULL, NULL);
  94. switch (status.response_code) {
  95. case AP_RESPONSE_NORMAL:
  96. return 0;
  97. case AP_RESPONSE_NO_PENDING_REPLY:
  98. if (status.queue_empty)
  99. return -ENOENT;
  100. return -EBUSY;
  101. case AP_RESPONSE_RESET_IN_PROGRESS:
  102. return -EBUSY;
  103. default:
  104. return -ENODEV;
  105. }
  106. }
  107. EXPORT_SYMBOL(ap_recv);
  108. /* State machine definitions and helpers */
  109. static enum ap_sm_wait ap_sm_nop(struct ap_queue *aq)
  110. {
  111. return AP_SM_WAIT_NONE;
  112. }
  113. /**
  114. * ap_sm_recv(): Receive pending reply messages from an AP queue but do
  115. * not change the state of the device.
  116. * @aq: pointer to the AP queue
  117. *
  118. * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
  119. */
  120. static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
  121. {
  122. struct ap_queue_status status;
  123. struct ap_message *ap_msg;
  124. bool found = false;
  125. size_t reslen;
  126. unsigned long resgr0 = 0;
  127. int parts = 0;
  128. /*
  129. * DQAP loop until response code and resgr0 indicate that
  130. * the msg is totally received. As we use the very same buffer
  131. * the msg is overwritten with each invocation. That's intended
  132. * and the receiver of the msg is informed with a msg rc code
  133. * of EMSGSIZE in such a case.
  134. */
  135. do {
  136. status = ap_dqap(aq->qid, &aq->reply->psmid,
  137. aq->reply->msg, aq->reply->bufsize,
  138. &reslen, &resgr0);
  139. parts++;
  140. } while (status.response_code == 0xFF && resgr0 != 0);
  141. switch (status.response_code) {
  142. case AP_RESPONSE_NORMAL:
  143. aq->queue_count = max_t(int, 0, aq->queue_count - 1);
  144. if (!status.queue_empty && !aq->queue_count)
  145. aq->queue_count++;
  146. if (aq->queue_count > 0)
  147. mod_timer(&aq->timeout,
  148. jiffies + aq->request_timeout);
  149. list_for_each_entry(ap_msg, &aq->pendingq, list) {
  150. if (ap_msg->psmid != aq->reply->psmid)
  151. continue;
  152. list_del_init(&ap_msg->list);
  153. aq->pendingq_count--;
  154. if (parts > 1) {
  155. ap_msg->rc = -EMSGSIZE;
  156. ap_msg->receive(aq, ap_msg, NULL);
  157. } else {
  158. ap_msg->receive(aq, ap_msg, aq->reply);
  159. }
  160. found = true;
  161. break;
  162. }
  163. if (!found) {
  164. AP_DBF_WARN("%s unassociated reply psmid=0x%016llx on 0x%02x.%04x\n",
  165. __func__, aq->reply->psmid,
  166. AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
  167. }
  168. fallthrough;
  169. case AP_RESPONSE_NO_PENDING_REPLY:
  170. if (!status.queue_empty || aq->queue_count <= 0)
  171. break;
  172. /* The card shouldn't forget requests but who knows. */
  173. aq->queue_count = 0;
  174. list_splice_init(&aq->pendingq, &aq->requestq);
  175. aq->requestq_count += aq->pendingq_count;
  176. aq->pendingq_count = 0;
  177. break;
  178. default:
  179. break;
  180. }
  181. return status;
  182. }
  183. /**
  184. * ap_sm_read(): Receive pending reply messages from an AP queue.
  185. * @aq: pointer to the AP queue
  186. *
  187. * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
  188. */
  189. static enum ap_sm_wait ap_sm_read(struct ap_queue *aq)
  190. {
  191. struct ap_queue_status status;
  192. if (!aq->reply)
  193. return AP_SM_WAIT_NONE;
  194. status = ap_sm_recv(aq);
  195. switch (status.response_code) {
  196. case AP_RESPONSE_NORMAL:
  197. if (aq->queue_count > 0) {
  198. aq->sm_state = AP_SM_STATE_WORKING;
  199. return AP_SM_WAIT_AGAIN;
  200. }
  201. aq->sm_state = AP_SM_STATE_IDLE;
  202. return AP_SM_WAIT_NONE;
  203. case AP_RESPONSE_NO_PENDING_REPLY:
  204. if (aq->queue_count > 0)
  205. return aq->interrupt ?
  206. AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_TIMEOUT;
  207. aq->sm_state = AP_SM_STATE_IDLE;
  208. return AP_SM_WAIT_NONE;
  209. default:
  210. aq->dev_state = AP_DEV_STATE_ERROR;
  211. aq->last_err_rc = status.response_code;
  212. AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
  213. __func__, status.response_code,
  214. AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
  215. return AP_SM_WAIT_NONE;
  216. }
  217. }
  218. /**
  219. * ap_sm_write(): Send messages from the request queue to an AP queue.
  220. * @aq: pointer to the AP queue
  221. *
  222. * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
  223. */
  224. static enum ap_sm_wait ap_sm_write(struct ap_queue *aq)
  225. {
  226. struct ap_queue_status status;
  227. struct ap_message *ap_msg;
  228. ap_qid_t qid = aq->qid;
  229. if (aq->requestq_count <= 0)
  230. return AP_SM_WAIT_NONE;
  231. /* Start the next request on the queue. */
  232. ap_msg = list_entry(aq->requestq.next, struct ap_message, list);
  233. #ifdef CONFIG_ZCRYPT_DEBUG
  234. if (ap_msg->fi.action == AP_FI_ACTION_NQAP_QID_INVAL) {
  235. AP_DBF_WARN("%s fi cmd 0x%04x: forcing invalid qid 0xFF00\n",
  236. __func__, ap_msg->fi.cmd);
  237. qid = 0xFF00;
  238. }
  239. #endif
  240. status = __ap_send(qid, ap_msg->psmid,
  241. ap_msg->msg, ap_msg->len,
  242. ap_msg->flags & AP_MSG_FLAG_SPECIAL);
  243. switch (status.response_code) {
  244. case AP_RESPONSE_NORMAL:
  245. aq->queue_count = max_t(int, 1, aq->queue_count + 1);
  246. if (aq->queue_count == 1)
  247. mod_timer(&aq->timeout, jiffies + aq->request_timeout);
  248. list_move_tail(&ap_msg->list, &aq->pendingq);
  249. aq->requestq_count--;
  250. aq->pendingq_count++;
  251. if (aq->queue_count < aq->card->queue_depth) {
  252. aq->sm_state = AP_SM_STATE_WORKING;
  253. return AP_SM_WAIT_AGAIN;
  254. }
  255. fallthrough;
  256. case AP_RESPONSE_Q_FULL:
  257. aq->sm_state = AP_SM_STATE_QUEUE_FULL;
  258. return aq->interrupt ?
  259. AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_TIMEOUT;
  260. case AP_RESPONSE_RESET_IN_PROGRESS:
  261. aq->sm_state = AP_SM_STATE_RESET_WAIT;
  262. return AP_SM_WAIT_TIMEOUT;
  263. case AP_RESPONSE_INVALID_DOMAIN:
  264. AP_DBF_WARN("%s RESPONSE_INVALID_DOMAIN on NQAP\n", __func__);
  265. fallthrough;
  266. case AP_RESPONSE_MESSAGE_TOO_BIG:
  267. case AP_RESPONSE_REQ_FAC_NOT_INST:
  268. list_del_init(&ap_msg->list);
  269. aq->requestq_count--;
  270. ap_msg->rc = -EINVAL;
  271. ap_msg->receive(aq, ap_msg, NULL);
  272. return AP_SM_WAIT_AGAIN;
  273. default:
  274. aq->dev_state = AP_DEV_STATE_ERROR;
  275. aq->last_err_rc = status.response_code;
  276. AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
  277. __func__, status.response_code,
  278. AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
  279. return AP_SM_WAIT_NONE;
  280. }
  281. }
  282. /**
  283. * ap_sm_read_write(): Send and receive messages to/from an AP queue.
  284. * @aq: pointer to the AP queue
  285. *
  286. * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
  287. */
  288. static enum ap_sm_wait ap_sm_read_write(struct ap_queue *aq)
  289. {
  290. return min(ap_sm_read(aq), ap_sm_write(aq));
  291. }
  292. /**
  293. * ap_sm_reset(): Reset an AP queue.
  294. * @aq: The AP queue
  295. *
  296. * Submit the Reset command to an AP queue.
  297. */
  298. static enum ap_sm_wait ap_sm_reset(struct ap_queue *aq)
  299. {
  300. struct ap_queue_status status;
  301. status = ap_rapq(aq->qid);
  302. switch (status.response_code) {
  303. case AP_RESPONSE_NORMAL:
  304. case AP_RESPONSE_RESET_IN_PROGRESS:
  305. aq->sm_state = AP_SM_STATE_RESET_WAIT;
  306. aq->interrupt = false;
  307. return AP_SM_WAIT_TIMEOUT;
  308. default:
  309. aq->dev_state = AP_DEV_STATE_ERROR;
  310. aq->last_err_rc = status.response_code;
  311. AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
  312. __func__, status.response_code,
  313. AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
  314. return AP_SM_WAIT_NONE;
  315. }
  316. }
  317. /**
  318. * ap_sm_reset_wait(): Test queue for completion of the reset operation
  319. * @aq: pointer to the AP queue
  320. *
  321. * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
  322. */
  323. static enum ap_sm_wait ap_sm_reset_wait(struct ap_queue *aq)
  324. {
  325. struct ap_queue_status status;
  326. void *lsi_ptr;
  327. if (aq->queue_count > 0 && aq->reply)
  328. /* Try to read a completed message and get the status */
  329. status = ap_sm_recv(aq);
  330. else
  331. /* Get the status with TAPQ */
  332. status = ap_tapq(aq->qid, NULL);
  333. switch (status.response_code) {
  334. case AP_RESPONSE_NORMAL:
  335. lsi_ptr = ap_airq_ptr();
  336. if (lsi_ptr && ap_queue_enable_irq(aq, lsi_ptr) == 0)
  337. aq->sm_state = AP_SM_STATE_SETIRQ_WAIT;
  338. else
  339. aq->sm_state = (aq->queue_count > 0) ?
  340. AP_SM_STATE_WORKING : AP_SM_STATE_IDLE;
  341. return AP_SM_WAIT_AGAIN;
  342. case AP_RESPONSE_BUSY:
  343. case AP_RESPONSE_RESET_IN_PROGRESS:
  344. return AP_SM_WAIT_TIMEOUT;
  345. case AP_RESPONSE_Q_NOT_AVAIL:
  346. case AP_RESPONSE_DECONFIGURED:
  347. case AP_RESPONSE_CHECKSTOPPED:
  348. default:
  349. aq->dev_state = AP_DEV_STATE_ERROR;
  350. aq->last_err_rc = status.response_code;
  351. AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
  352. __func__, status.response_code,
  353. AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
  354. return AP_SM_WAIT_NONE;
  355. }
  356. }
  357. /**
  358. * ap_sm_setirq_wait(): Test queue for completion of the irq enablement
  359. * @aq: pointer to the AP queue
  360. *
  361. * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
  362. */
  363. static enum ap_sm_wait ap_sm_setirq_wait(struct ap_queue *aq)
  364. {
  365. struct ap_queue_status status;
  366. if (aq->queue_count > 0 && aq->reply)
  367. /* Try to read a completed message and get the status */
  368. status = ap_sm_recv(aq);
  369. else
  370. /* Get the status with TAPQ */
  371. status = ap_tapq(aq->qid, NULL);
  372. if (status.irq_enabled == 1) {
  373. /* Irqs are now enabled */
  374. aq->interrupt = true;
  375. aq->sm_state = (aq->queue_count > 0) ?
  376. AP_SM_STATE_WORKING : AP_SM_STATE_IDLE;
  377. }
  378. switch (status.response_code) {
  379. case AP_RESPONSE_NORMAL:
  380. if (aq->queue_count > 0)
  381. return AP_SM_WAIT_AGAIN;
  382. fallthrough;
  383. case AP_RESPONSE_NO_PENDING_REPLY:
  384. return AP_SM_WAIT_TIMEOUT;
  385. default:
  386. aq->dev_state = AP_DEV_STATE_ERROR;
  387. aq->last_err_rc = status.response_code;
  388. AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
  389. __func__, status.response_code,
  390. AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
  391. return AP_SM_WAIT_NONE;
  392. }
  393. }
  394. /*
  395. * AP state machine jump table
  396. */
  397. static ap_func_t *ap_jumptable[NR_AP_SM_STATES][NR_AP_SM_EVENTS] = {
  398. [AP_SM_STATE_RESET_START] = {
  399. [AP_SM_EVENT_POLL] = ap_sm_reset,
  400. [AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
  401. },
  402. [AP_SM_STATE_RESET_WAIT] = {
  403. [AP_SM_EVENT_POLL] = ap_sm_reset_wait,
  404. [AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
  405. },
  406. [AP_SM_STATE_SETIRQ_WAIT] = {
  407. [AP_SM_EVENT_POLL] = ap_sm_setirq_wait,
  408. [AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
  409. },
  410. [AP_SM_STATE_IDLE] = {
  411. [AP_SM_EVENT_POLL] = ap_sm_write,
  412. [AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
  413. },
  414. [AP_SM_STATE_WORKING] = {
  415. [AP_SM_EVENT_POLL] = ap_sm_read_write,
  416. [AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
  417. },
  418. [AP_SM_STATE_QUEUE_FULL] = {
  419. [AP_SM_EVENT_POLL] = ap_sm_read,
  420. [AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
  421. },
  422. };
  423. enum ap_sm_wait ap_sm_event(struct ap_queue *aq, enum ap_sm_event event)
  424. {
  425. if (aq->config && !aq->chkstop &&
  426. aq->dev_state > AP_DEV_STATE_UNINITIATED)
  427. return ap_jumptable[aq->sm_state][event](aq);
  428. else
  429. return AP_SM_WAIT_NONE;
  430. }
  431. enum ap_sm_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_sm_event event)
  432. {
  433. enum ap_sm_wait wait;
  434. while ((wait = ap_sm_event(aq, event)) == AP_SM_WAIT_AGAIN)
  435. ;
  436. return wait;
  437. }
  438. /*
  439. * AP queue related attributes.
  440. */
  441. static ssize_t request_count_show(struct device *dev,
  442. struct device_attribute *attr,
  443. char *buf)
  444. {
  445. struct ap_queue *aq = to_ap_queue(dev);
  446. bool valid = false;
  447. u64 req_cnt;
  448. spin_lock_bh(&aq->lock);
  449. if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
  450. req_cnt = aq->total_request_count;
  451. valid = true;
  452. }
  453. spin_unlock_bh(&aq->lock);
  454. if (valid)
  455. return scnprintf(buf, PAGE_SIZE, "%llu\n", req_cnt);
  456. else
  457. return scnprintf(buf, PAGE_SIZE, "-\n");
  458. }
  459. static ssize_t request_count_store(struct device *dev,
  460. struct device_attribute *attr,
  461. const char *buf, size_t count)
  462. {
  463. struct ap_queue *aq = to_ap_queue(dev);
  464. spin_lock_bh(&aq->lock);
  465. aq->total_request_count = 0;
  466. spin_unlock_bh(&aq->lock);
  467. return count;
  468. }
  469. static DEVICE_ATTR_RW(request_count);
  470. static ssize_t requestq_count_show(struct device *dev,
  471. struct device_attribute *attr, char *buf)
  472. {
  473. struct ap_queue *aq = to_ap_queue(dev);
  474. unsigned int reqq_cnt = 0;
  475. spin_lock_bh(&aq->lock);
  476. if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
  477. reqq_cnt = aq->requestq_count;
  478. spin_unlock_bh(&aq->lock);
  479. return scnprintf(buf, PAGE_SIZE, "%d\n", reqq_cnt);
  480. }
  481. static DEVICE_ATTR_RO(requestq_count);
  482. static ssize_t pendingq_count_show(struct device *dev,
  483. struct device_attribute *attr, char *buf)
  484. {
  485. struct ap_queue *aq = to_ap_queue(dev);
  486. unsigned int penq_cnt = 0;
  487. spin_lock_bh(&aq->lock);
  488. if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
  489. penq_cnt = aq->pendingq_count;
  490. spin_unlock_bh(&aq->lock);
  491. return scnprintf(buf, PAGE_SIZE, "%d\n", penq_cnt);
  492. }
  493. static DEVICE_ATTR_RO(pendingq_count);
  494. static ssize_t reset_show(struct device *dev,
  495. struct device_attribute *attr, char *buf)
  496. {
  497. struct ap_queue *aq = to_ap_queue(dev);
  498. int rc = 0;
  499. spin_lock_bh(&aq->lock);
  500. switch (aq->sm_state) {
  501. case AP_SM_STATE_RESET_START:
  502. case AP_SM_STATE_RESET_WAIT:
  503. rc = scnprintf(buf, PAGE_SIZE, "Reset in progress.\n");
  504. break;
  505. case AP_SM_STATE_WORKING:
  506. case AP_SM_STATE_QUEUE_FULL:
  507. rc = scnprintf(buf, PAGE_SIZE, "Reset Timer armed.\n");
  508. break;
  509. default:
  510. rc = scnprintf(buf, PAGE_SIZE, "No Reset Timer set.\n");
  511. }
  512. spin_unlock_bh(&aq->lock);
  513. return rc;
  514. }
  515. static ssize_t reset_store(struct device *dev,
  516. struct device_attribute *attr,
  517. const char *buf, size_t count)
  518. {
  519. struct ap_queue *aq = to_ap_queue(dev);
  520. spin_lock_bh(&aq->lock);
  521. __ap_flush_queue(aq);
  522. aq->sm_state = AP_SM_STATE_RESET_START;
  523. ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
  524. spin_unlock_bh(&aq->lock);
  525. AP_DBF_INFO("%s reset queue=%02x.%04x triggered by user\n",
  526. __func__, AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
  527. return count;
  528. }
  529. static DEVICE_ATTR_RW(reset);
  530. static ssize_t interrupt_show(struct device *dev,
  531. struct device_attribute *attr, char *buf)
  532. {
  533. struct ap_queue *aq = to_ap_queue(dev);
  534. int rc = 0;
  535. spin_lock_bh(&aq->lock);
  536. if (aq->sm_state == AP_SM_STATE_SETIRQ_WAIT)
  537. rc = scnprintf(buf, PAGE_SIZE, "Enable Interrupt pending.\n");
  538. else if (aq->interrupt)
  539. rc = scnprintf(buf, PAGE_SIZE, "Interrupts enabled.\n");
  540. else
  541. rc = scnprintf(buf, PAGE_SIZE, "Interrupts disabled.\n");
  542. spin_unlock_bh(&aq->lock);
  543. return rc;
  544. }
  545. static DEVICE_ATTR_RO(interrupt);
  546. static ssize_t config_show(struct device *dev,
  547. struct device_attribute *attr, char *buf)
  548. {
  549. struct ap_queue *aq = to_ap_queue(dev);
  550. int rc;
  551. spin_lock_bh(&aq->lock);
  552. rc = scnprintf(buf, PAGE_SIZE, "%d\n", aq->config ? 1 : 0);
  553. spin_unlock_bh(&aq->lock);
  554. return rc;
  555. }
  556. static DEVICE_ATTR_RO(config);
  557. static ssize_t chkstop_show(struct device *dev,
  558. struct device_attribute *attr, char *buf)
  559. {
  560. struct ap_queue *aq = to_ap_queue(dev);
  561. int rc;
  562. spin_lock_bh(&aq->lock);
  563. rc = scnprintf(buf, PAGE_SIZE, "%d\n", aq->chkstop ? 1 : 0);
  564. spin_unlock_bh(&aq->lock);
  565. return rc;
  566. }
  567. static DEVICE_ATTR_RO(chkstop);
  568. #ifdef CONFIG_ZCRYPT_DEBUG
  569. static ssize_t states_show(struct device *dev,
  570. struct device_attribute *attr, char *buf)
  571. {
  572. struct ap_queue *aq = to_ap_queue(dev);
  573. int rc = 0;
  574. spin_lock_bh(&aq->lock);
  575. /* queue device state */
  576. switch (aq->dev_state) {
  577. case AP_DEV_STATE_UNINITIATED:
  578. rc = scnprintf(buf, PAGE_SIZE, "UNINITIATED\n");
  579. break;
  580. case AP_DEV_STATE_OPERATING:
  581. rc = scnprintf(buf, PAGE_SIZE, "OPERATING");
  582. break;
  583. case AP_DEV_STATE_SHUTDOWN:
  584. rc = scnprintf(buf, PAGE_SIZE, "SHUTDOWN");
  585. break;
  586. case AP_DEV_STATE_ERROR:
  587. rc = scnprintf(buf, PAGE_SIZE, "ERROR");
  588. break;
  589. default:
  590. rc = scnprintf(buf, PAGE_SIZE, "UNKNOWN");
  591. }
  592. /* state machine state */
  593. if (aq->dev_state) {
  594. switch (aq->sm_state) {
  595. case AP_SM_STATE_RESET_START:
  596. rc += scnprintf(buf + rc, PAGE_SIZE - rc,
  597. " [RESET_START]\n");
  598. break;
  599. case AP_SM_STATE_RESET_WAIT:
  600. rc += scnprintf(buf + rc, PAGE_SIZE - rc,
  601. " [RESET_WAIT]\n");
  602. break;
  603. case AP_SM_STATE_SETIRQ_WAIT:
  604. rc += scnprintf(buf + rc, PAGE_SIZE - rc,
  605. " [SETIRQ_WAIT]\n");
  606. break;
  607. case AP_SM_STATE_IDLE:
  608. rc += scnprintf(buf + rc, PAGE_SIZE - rc,
  609. " [IDLE]\n");
  610. break;
  611. case AP_SM_STATE_WORKING:
  612. rc += scnprintf(buf + rc, PAGE_SIZE - rc,
  613. " [WORKING]\n");
  614. break;
  615. case AP_SM_STATE_QUEUE_FULL:
  616. rc += scnprintf(buf + rc, PAGE_SIZE - rc,
  617. " [FULL]\n");
  618. break;
  619. default:
  620. rc += scnprintf(buf + rc, PAGE_SIZE - rc,
  621. " [UNKNOWN]\n");
  622. }
  623. }
  624. spin_unlock_bh(&aq->lock);
  625. return rc;
  626. }
  627. static DEVICE_ATTR_RO(states);
  628. static ssize_t last_err_rc_show(struct device *dev,
  629. struct device_attribute *attr, char *buf)
  630. {
  631. struct ap_queue *aq = to_ap_queue(dev);
  632. int rc;
  633. spin_lock_bh(&aq->lock);
  634. rc = aq->last_err_rc;
  635. spin_unlock_bh(&aq->lock);
  636. switch (rc) {
  637. case AP_RESPONSE_NORMAL:
  638. return scnprintf(buf, PAGE_SIZE, "NORMAL\n");
  639. case AP_RESPONSE_Q_NOT_AVAIL:
  640. return scnprintf(buf, PAGE_SIZE, "Q_NOT_AVAIL\n");
  641. case AP_RESPONSE_RESET_IN_PROGRESS:
  642. return scnprintf(buf, PAGE_SIZE, "RESET_IN_PROGRESS\n");
  643. case AP_RESPONSE_DECONFIGURED:
  644. return scnprintf(buf, PAGE_SIZE, "DECONFIGURED\n");
  645. case AP_RESPONSE_CHECKSTOPPED:
  646. return scnprintf(buf, PAGE_SIZE, "CHECKSTOPPED\n");
  647. case AP_RESPONSE_BUSY:
  648. return scnprintf(buf, PAGE_SIZE, "BUSY\n");
  649. case AP_RESPONSE_INVALID_ADDRESS:
  650. return scnprintf(buf, PAGE_SIZE, "INVALID_ADDRESS\n");
  651. case AP_RESPONSE_OTHERWISE_CHANGED:
  652. return scnprintf(buf, PAGE_SIZE, "OTHERWISE_CHANGED\n");
  653. case AP_RESPONSE_Q_FULL:
  654. return scnprintf(buf, PAGE_SIZE, "Q_FULL/NO_PENDING_REPLY\n");
  655. case AP_RESPONSE_INDEX_TOO_BIG:
  656. return scnprintf(buf, PAGE_SIZE, "INDEX_TOO_BIG\n");
  657. case AP_RESPONSE_NO_FIRST_PART:
  658. return scnprintf(buf, PAGE_SIZE, "NO_FIRST_PART\n");
  659. case AP_RESPONSE_MESSAGE_TOO_BIG:
  660. return scnprintf(buf, PAGE_SIZE, "MESSAGE_TOO_BIG\n");
  661. case AP_RESPONSE_REQ_FAC_NOT_INST:
  662. return scnprintf(buf, PAGE_SIZE, "REQ_FAC_NOT_INST\n");
  663. default:
  664. return scnprintf(buf, PAGE_SIZE, "response code %d\n", rc);
  665. }
  666. }
  667. static DEVICE_ATTR_RO(last_err_rc);
  668. #endif
  669. static struct attribute *ap_queue_dev_attrs[] = {
  670. &dev_attr_request_count.attr,
  671. &dev_attr_requestq_count.attr,
  672. &dev_attr_pendingq_count.attr,
  673. &dev_attr_reset.attr,
  674. &dev_attr_interrupt.attr,
  675. &dev_attr_config.attr,
  676. &dev_attr_chkstop.attr,
  677. #ifdef CONFIG_ZCRYPT_DEBUG
  678. &dev_attr_states.attr,
  679. &dev_attr_last_err_rc.attr,
  680. #endif
  681. NULL
  682. };
  683. static struct attribute_group ap_queue_dev_attr_group = {
  684. .attrs = ap_queue_dev_attrs
  685. };
  686. static const struct attribute_group *ap_queue_dev_attr_groups[] = {
  687. &ap_queue_dev_attr_group,
  688. NULL
  689. };
  690. static struct device_type ap_queue_type = {
  691. .name = "ap_queue",
  692. .groups = ap_queue_dev_attr_groups,
  693. };
  694. static void ap_queue_device_release(struct device *dev)
  695. {
  696. struct ap_queue *aq = to_ap_queue(dev);
  697. spin_lock_bh(&ap_queues_lock);
  698. hash_del(&aq->hnode);
  699. spin_unlock_bh(&ap_queues_lock);
  700. kfree(aq);
  701. }
  702. struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type)
  703. {
  704. struct ap_queue *aq;
  705. aq = kzalloc(sizeof(*aq), GFP_KERNEL);
  706. if (!aq)
  707. return NULL;
  708. aq->ap_dev.device.release = ap_queue_device_release;
  709. aq->ap_dev.device.type = &ap_queue_type;
  710. aq->ap_dev.device_type = device_type;
  711. aq->qid = qid;
  712. aq->interrupt = false;
  713. spin_lock_init(&aq->lock);
  714. INIT_LIST_HEAD(&aq->pendingq);
  715. INIT_LIST_HEAD(&aq->requestq);
  716. timer_setup(&aq->timeout, ap_request_timeout, 0);
  717. return aq;
  718. }
  719. void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *reply)
  720. {
  721. aq->reply = reply;
  722. spin_lock_bh(&aq->lock);
  723. ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
  724. spin_unlock_bh(&aq->lock);
  725. }
  726. EXPORT_SYMBOL(ap_queue_init_reply);
  727. /**
  728. * ap_queue_message(): Queue a request to an AP device.
  729. * @aq: The AP device to queue the message to
  730. * @ap_msg: The message that is to be added
  731. */
  732. int ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg)
  733. {
  734. int rc = 0;
  735. /* msg needs to have a valid receive-callback */
  736. BUG_ON(!ap_msg->receive);
  737. spin_lock_bh(&aq->lock);
  738. /* only allow to queue new messages if device state is ok */
  739. if (aq->dev_state == AP_DEV_STATE_OPERATING) {
  740. list_add_tail(&ap_msg->list, &aq->requestq);
  741. aq->requestq_count++;
  742. aq->total_request_count++;
  743. atomic64_inc(&aq->card->total_request_count);
  744. } else {
  745. rc = -ENODEV;
  746. }
  747. /* Send/receive as many request from the queue as possible. */
  748. ap_wait(ap_sm_event_loop(aq, AP_SM_EVENT_POLL));
  749. spin_unlock_bh(&aq->lock);
  750. return rc;
  751. }
  752. EXPORT_SYMBOL(ap_queue_message);
  753. /**
  754. * ap_cancel_message(): Cancel a crypto request.
  755. * @aq: The AP device that has the message queued
  756. * @ap_msg: The message that is to be removed
  757. *
  758. * Cancel a crypto request. This is done by removing the request
  759. * from the device pending or request queue. Note that the
  760. * request stays on the AP queue. When it finishes the message
  761. * reply will be discarded because the psmid can't be found.
  762. */
  763. void ap_cancel_message(struct ap_queue *aq, struct ap_message *ap_msg)
  764. {
  765. struct ap_message *tmp;
  766. spin_lock_bh(&aq->lock);
  767. if (!list_empty(&ap_msg->list)) {
  768. list_for_each_entry(tmp, &aq->pendingq, list)
  769. if (tmp->psmid == ap_msg->psmid) {
  770. aq->pendingq_count--;
  771. goto found;
  772. }
  773. aq->requestq_count--;
  774. found:
  775. list_del_init(&ap_msg->list);
  776. }
  777. spin_unlock_bh(&aq->lock);
  778. }
  779. EXPORT_SYMBOL(ap_cancel_message);
  780. /**
  781. * __ap_flush_queue(): Flush requests.
  782. * @aq: Pointer to the AP queue
  783. *
  784. * Flush all requests from the request/pending queue of an AP device.
  785. */
  786. static void __ap_flush_queue(struct ap_queue *aq)
  787. {
  788. struct ap_message *ap_msg, *next;
  789. list_for_each_entry_safe(ap_msg, next, &aq->pendingq, list) {
  790. list_del_init(&ap_msg->list);
  791. aq->pendingq_count--;
  792. ap_msg->rc = -EAGAIN;
  793. ap_msg->receive(aq, ap_msg, NULL);
  794. }
  795. list_for_each_entry_safe(ap_msg, next, &aq->requestq, list) {
  796. list_del_init(&ap_msg->list);
  797. aq->requestq_count--;
  798. ap_msg->rc = -EAGAIN;
  799. ap_msg->receive(aq, ap_msg, NULL);
  800. }
  801. aq->queue_count = 0;
  802. }
  803. void ap_flush_queue(struct ap_queue *aq)
  804. {
  805. spin_lock_bh(&aq->lock);
  806. __ap_flush_queue(aq);
  807. spin_unlock_bh(&aq->lock);
  808. }
  809. EXPORT_SYMBOL(ap_flush_queue);
  810. void ap_queue_prepare_remove(struct ap_queue *aq)
  811. {
  812. spin_lock_bh(&aq->lock);
  813. /* flush queue */
  814. __ap_flush_queue(aq);
  815. /* move queue device state to SHUTDOWN in progress */
  816. aq->dev_state = AP_DEV_STATE_SHUTDOWN;
  817. spin_unlock_bh(&aq->lock);
  818. del_timer_sync(&aq->timeout);
  819. }
  820. void ap_queue_remove(struct ap_queue *aq)
  821. {
  822. /*
  823. * all messages have been flushed and the device state
  824. * is SHUTDOWN. Now reset with zero which also clears
  825. * the irq registration and move the device state
  826. * to the initial value AP_DEV_STATE_UNINITIATED.
  827. */
  828. spin_lock_bh(&aq->lock);
  829. ap_zapq(aq->qid);
  830. aq->dev_state = AP_DEV_STATE_UNINITIATED;
  831. spin_unlock_bh(&aq->lock);
  832. }
  833. void ap_queue_init_state(struct ap_queue *aq)
  834. {
  835. spin_lock_bh(&aq->lock);
  836. aq->dev_state = AP_DEV_STATE_OPERATING;
  837. aq->sm_state = AP_SM_STATE_RESET_START;
  838. aq->last_err_rc = 0;
  839. ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
  840. spin_unlock_bh(&aq->lock);
  841. }
  842. EXPORT_SYMBOL(ap_queue_init_state);