hbm.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * ISHTP bus layer messages handling
  4. *
  5. * Copyright (c) 2003-2016, Intel Corporation.
  6. */
  7. #include <linux/export.h>
  8. #include <linux/slab.h>
  9. #include <linux/sched.h>
  10. #include <linux/wait.h>
  11. #include <linux/spinlock.h>
  12. #include "ishtp-dev.h"
  13. #include "hbm.h"
  14. #include "client.h"
  15. /**
  16. * ishtp_hbm_fw_cl_allocate() - Allocate FW clients
  17. * @dev: ISHTP device instance
  18. *
  19. * Allocates storage for fw clients
  20. */
  21. static void ishtp_hbm_fw_cl_allocate(struct ishtp_device *dev)
  22. {
  23. struct ishtp_fw_client *clients;
  24. int b;
  25. /* count how many ISH clients we have */
  26. for_each_set_bit(b, dev->fw_clients_map, ISHTP_CLIENTS_MAX)
  27. dev->fw_clients_num++;
  28. if (dev->fw_clients_num <= 0)
  29. return;
  30. /* allocate storage for fw clients representation */
  31. clients = kcalloc(dev->fw_clients_num, sizeof(struct ishtp_fw_client),
  32. GFP_KERNEL);
  33. if (!clients) {
  34. dev->dev_state = ISHTP_DEV_RESETTING;
  35. ish_hw_reset(dev);
  36. return;
  37. }
  38. dev->fw_clients = clients;
  39. }
  40. /**
  41. * ishtp_hbm_cl_hdr() - construct client hbm header
  42. * @cl: client
  43. * @hbm_cmd: host bus message command
  44. * @buf: buffer for cl header
  45. * @len: buffer length
  46. *
  47. * Initialize HBM buffer
  48. */
  49. static inline void ishtp_hbm_cl_hdr(struct ishtp_cl *cl, uint8_t hbm_cmd,
  50. void *buf, size_t len)
  51. {
  52. struct ishtp_hbm_cl_cmd *cmd = buf;
  53. memset(cmd, 0, len);
  54. cmd->hbm_cmd = hbm_cmd;
  55. cmd->host_addr = cl->host_client_id;
  56. cmd->fw_addr = cl->fw_client_id;
  57. }
  58. /**
  59. * ishtp_hbm_cl_addr_equal() - Compare client address
  60. * @cl: client
  61. * @buf: Client command buffer
  62. *
  63. * Compare client address with the address in command buffer
  64. *
  65. * Return: True if they have the same address
  66. */
  67. static inline bool ishtp_hbm_cl_addr_equal(struct ishtp_cl *cl, void *buf)
  68. {
  69. struct ishtp_hbm_cl_cmd *cmd = buf;
  70. return cl->host_client_id == cmd->host_addr &&
  71. cl->fw_client_id == cmd->fw_addr;
  72. }
  73. /**
  74. * ishtp_hbm_start_wait() - Wait for HBM start message
  75. * @dev: ISHTP device instance
  76. *
  77. * Wait for HBM start message from firmware
  78. *
  79. * Return: 0 if HBM start is/was received else timeout error
  80. */
  81. int ishtp_hbm_start_wait(struct ishtp_device *dev)
  82. {
  83. int ret;
  84. if (dev->hbm_state > ISHTP_HBM_START)
  85. return 0;
  86. dev_dbg(dev->devc, "Going to wait for ishtp start. hbm_state=%08X\n",
  87. dev->hbm_state);
  88. ret = wait_event_interruptible_timeout(dev->wait_hbm_recvd_msg,
  89. dev->hbm_state >= ISHTP_HBM_STARTED,
  90. (ISHTP_INTEROP_TIMEOUT * HZ));
  91. dev_dbg(dev->devc,
  92. "Woke up from waiting for ishtp start. hbm_state=%08X\n",
  93. dev->hbm_state);
  94. if (ret <= 0 && (dev->hbm_state <= ISHTP_HBM_START)) {
  95. dev->hbm_state = ISHTP_HBM_IDLE;
  96. dev_err(dev->devc,
  97. "waiting for ishtp start failed. ret=%d hbm_state=%08X\n",
  98. ret, dev->hbm_state);
  99. return -ETIMEDOUT;
  100. }
  101. return 0;
  102. }
  103. /**
  104. * ishtp_hbm_start_req() - Send HBM start message
  105. * @dev: ISHTP device instance
  106. *
  107. * Send HBM start message to firmware
  108. *
  109. * Return: 0 if success else error code
  110. */
  111. int ishtp_hbm_start_req(struct ishtp_device *dev)
  112. {
  113. struct ishtp_msg_hdr hdr;
  114. struct hbm_host_version_request start_req = { 0 };
  115. ishtp_hbm_hdr(&hdr, sizeof(start_req));
  116. /* host start message */
  117. start_req.hbm_cmd = HOST_START_REQ_CMD;
  118. start_req.host_version.major_version = HBM_MAJOR_VERSION;
  119. start_req.host_version.minor_version = HBM_MINOR_VERSION;
  120. /*
  121. * (!) Response to HBM start may be so quick that this thread would get
  122. * preempted BEFORE managing to set hbm_state = ISHTP_HBM_START.
  123. * So set it at first, change back to ISHTP_HBM_IDLE upon failure
  124. */
  125. dev->hbm_state = ISHTP_HBM_START;
  126. if (ishtp_write_message(dev, &hdr, &start_req)) {
  127. dev_err(dev->devc, "version message send failed\n");
  128. dev->dev_state = ISHTP_DEV_RESETTING;
  129. dev->hbm_state = ISHTP_HBM_IDLE;
  130. ish_hw_reset(dev);
  131. return -ENODEV;
  132. }
  133. return 0;
  134. }
  135. /**
  136. * ishtp_hbm_enum_clients_req() - Send client enum req
  137. * @dev: ISHTP device instance
  138. *
  139. * Send enumeration client request message
  140. *
  141. * Return: 0 if success else error code
  142. */
  143. void ishtp_hbm_enum_clients_req(struct ishtp_device *dev)
  144. {
  145. struct ishtp_msg_hdr hdr;
  146. struct hbm_host_enum_request enum_req = { 0 };
  147. /* enumerate clients */
  148. ishtp_hbm_hdr(&hdr, sizeof(enum_req));
  149. enum_req.hbm_cmd = HOST_ENUM_REQ_CMD;
  150. if (ishtp_write_message(dev, &hdr, &enum_req)) {
  151. dev->dev_state = ISHTP_DEV_RESETTING;
  152. dev_err(dev->devc, "enumeration request send failed\n");
  153. ish_hw_reset(dev);
  154. }
  155. dev->hbm_state = ISHTP_HBM_ENUM_CLIENTS;
  156. }
  157. /**
  158. * ishtp_hbm_prop_req() - Request property
  159. * @dev: ISHTP device instance
  160. *
  161. * Request property for a single client
  162. *
  163. * Return: 0 if success else error code
  164. */
  165. static int ishtp_hbm_prop_req(struct ishtp_device *dev)
  166. {
  167. struct ishtp_msg_hdr hdr;
  168. struct hbm_props_request prop_req = { 0 };
  169. unsigned long next_client_index;
  170. uint8_t client_num;
  171. client_num = dev->fw_client_presentation_num;
  172. next_client_index = find_next_bit(dev->fw_clients_map,
  173. ISHTP_CLIENTS_MAX, dev->fw_client_index);
  174. /* We got all client properties */
  175. if (next_client_index == ISHTP_CLIENTS_MAX) {
  176. dev->hbm_state = ISHTP_HBM_WORKING;
  177. dev->dev_state = ISHTP_DEV_ENABLED;
  178. for (dev->fw_client_presentation_num = 1;
  179. dev->fw_client_presentation_num < client_num + 1;
  180. ++dev->fw_client_presentation_num)
  181. /* Add new client device */
  182. ishtp_bus_new_client(dev);
  183. return 0;
  184. }
  185. dev->fw_clients[client_num].client_id = next_client_index;
  186. ishtp_hbm_hdr(&hdr, sizeof(prop_req));
  187. prop_req.hbm_cmd = HOST_CLIENT_PROPERTIES_REQ_CMD;
  188. prop_req.address = next_client_index;
  189. if (ishtp_write_message(dev, &hdr, &prop_req)) {
  190. dev->dev_state = ISHTP_DEV_RESETTING;
  191. dev_err(dev->devc, "properties request send failed\n");
  192. ish_hw_reset(dev);
  193. return -EIO;
  194. }
  195. dev->fw_client_index = next_client_index;
  196. return 0;
  197. }
  198. /**
  199. * ishtp_hbm_stop_req() - Send HBM stop
  200. * @dev: ISHTP device instance
  201. *
  202. * Send stop request message
  203. */
  204. static void ishtp_hbm_stop_req(struct ishtp_device *dev)
  205. {
  206. struct ishtp_msg_hdr hdr;
  207. struct hbm_host_stop_request stop_req = { 0 } ;
  208. ishtp_hbm_hdr(&hdr, sizeof(stop_req));
  209. stop_req.hbm_cmd = HOST_STOP_REQ_CMD;
  210. stop_req.reason = DRIVER_STOP_REQUEST;
  211. ishtp_write_message(dev, &hdr, &stop_req);
  212. }
  213. /**
  214. * ishtp_hbm_cl_flow_control_req() - Send flow control request
  215. * @dev: ISHTP device instance
  216. * @cl: ISHTP client instance
  217. *
  218. * Send flow control request
  219. *
  220. * Return: 0 if success else error code
  221. */
  222. int ishtp_hbm_cl_flow_control_req(struct ishtp_device *dev,
  223. struct ishtp_cl *cl)
  224. {
  225. struct ishtp_msg_hdr hdr;
  226. struct hbm_flow_control flow_ctrl;
  227. const size_t len = sizeof(flow_ctrl);
  228. int rv;
  229. unsigned long flags;
  230. spin_lock_irqsave(&cl->fc_spinlock, flags);
  231. ishtp_hbm_hdr(&hdr, len);
  232. ishtp_hbm_cl_hdr(cl, ISHTP_FLOW_CONTROL_CMD, &flow_ctrl, len);
  233. /*
  234. * Sync possible race when RB recycle and packet receive paths
  235. * both try to send an out FC
  236. */
  237. if (cl->out_flow_ctrl_creds) {
  238. spin_unlock_irqrestore(&cl->fc_spinlock, flags);
  239. return 0;
  240. }
  241. cl->recv_msg_num_frags = 0;
  242. rv = ishtp_write_message(dev, &hdr, &flow_ctrl);
  243. if (!rv) {
  244. ++cl->out_flow_ctrl_creds;
  245. ++cl->out_flow_ctrl_cnt;
  246. cl->ts_out_fc = ktime_get();
  247. if (cl->ts_rx) {
  248. ktime_t ts_diff = ktime_sub(cl->ts_out_fc, cl->ts_rx);
  249. if (ktime_after(ts_diff, cl->ts_max_fc_delay))
  250. cl->ts_max_fc_delay = ts_diff;
  251. }
  252. } else {
  253. ++cl->err_send_fc;
  254. }
  255. spin_unlock_irqrestore(&cl->fc_spinlock, flags);
  256. return rv;
  257. }
  258. /**
  259. * ishtp_hbm_cl_disconnect_req() - Send disconnect request
  260. * @dev: ISHTP device instance
  261. * @cl: ISHTP client instance
  262. *
  263. * Send disconnect message to fw
  264. *
  265. * Return: 0 if success else error code
  266. */
  267. int ishtp_hbm_cl_disconnect_req(struct ishtp_device *dev, struct ishtp_cl *cl)
  268. {
  269. struct ishtp_msg_hdr hdr;
  270. struct hbm_client_connect_request disconn_req;
  271. const size_t len = sizeof(disconn_req);
  272. ishtp_hbm_hdr(&hdr, len);
  273. ishtp_hbm_cl_hdr(cl, CLIENT_DISCONNECT_REQ_CMD, &disconn_req, len);
  274. return ishtp_write_message(dev, &hdr, &disconn_req);
  275. }
  276. /**
  277. * ishtp_hbm_cl_disconnect_res() - Get disconnect response
  278. * @dev: ISHTP device instance
  279. * @rs: Response message
  280. *
  281. * Received disconnect response from fw
  282. */
  283. static void ishtp_hbm_cl_disconnect_res(struct ishtp_device *dev,
  284. struct hbm_client_connect_response *rs)
  285. {
  286. struct ishtp_cl *cl = NULL;
  287. unsigned long flags;
  288. spin_lock_irqsave(&dev->cl_list_lock, flags);
  289. list_for_each_entry(cl, &dev->cl_list, link) {
  290. if (!rs->status && ishtp_hbm_cl_addr_equal(cl, rs)) {
  291. cl->state = ISHTP_CL_DISCONNECTED;
  292. wake_up_interruptible(&cl->wait_ctrl_res);
  293. break;
  294. }
  295. }
  296. spin_unlock_irqrestore(&dev->cl_list_lock, flags);
  297. }
  298. /**
  299. * ishtp_hbm_cl_connect_req() - Send connect request
  300. * @dev: ISHTP device instance
  301. * @cl: client device instance
  302. *
  303. * Send connection request to specific fw client
  304. *
  305. * Return: 0 if success else error code
  306. */
  307. int ishtp_hbm_cl_connect_req(struct ishtp_device *dev, struct ishtp_cl *cl)
  308. {
  309. struct ishtp_msg_hdr hdr;
  310. struct hbm_client_connect_request conn_req;
  311. const size_t len = sizeof(conn_req);
  312. ishtp_hbm_hdr(&hdr, len);
  313. ishtp_hbm_cl_hdr(cl, CLIENT_CONNECT_REQ_CMD, &conn_req, len);
  314. return ishtp_write_message(dev, &hdr, &conn_req);
  315. }
  316. /**
  317. * ishtp_hbm_cl_connect_res() - Get connect response
  318. * @dev: ISHTP device instance
  319. * @rs: Response message
  320. *
  321. * Received connect response from fw
  322. */
  323. static void ishtp_hbm_cl_connect_res(struct ishtp_device *dev,
  324. struct hbm_client_connect_response *rs)
  325. {
  326. struct ishtp_cl *cl = NULL;
  327. unsigned long flags;
  328. spin_lock_irqsave(&dev->cl_list_lock, flags);
  329. list_for_each_entry(cl, &dev->cl_list, link) {
  330. if (ishtp_hbm_cl_addr_equal(cl, rs)) {
  331. if (!rs->status) {
  332. cl->state = ISHTP_CL_CONNECTED;
  333. cl->status = 0;
  334. } else {
  335. cl->state = ISHTP_CL_DISCONNECTED;
  336. cl->status = -ENODEV;
  337. }
  338. wake_up_interruptible(&cl->wait_ctrl_res);
  339. break;
  340. }
  341. }
  342. spin_unlock_irqrestore(&dev->cl_list_lock, flags);
  343. }
  344. /**
  345. * ishtp_hbm_fw_disconnect_req() - Receive disconnect request
  346. * @dev: ISHTP device instance
  347. * @disconnect_req: disconnect request structure
  348. *
  349. * Disconnect request bus message from the fw. Send disconnect response.
  350. */
  351. static void ishtp_hbm_fw_disconnect_req(struct ishtp_device *dev,
  352. struct hbm_client_connect_request *disconnect_req)
  353. {
  354. struct ishtp_cl *cl;
  355. const size_t len = sizeof(struct hbm_client_connect_response);
  356. unsigned long flags;
  357. struct ishtp_msg_hdr hdr;
  358. unsigned char data[4]; /* All HBM messages are 4 bytes */
  359. spin_lock_irqsave(&dev->cl_list_lock, flags);
  360. list_for_each_entry(cl, &dev->cl_list, link) {
  361. if (ishtp_hbm_cl_addr_equal(cl, disconnect_req)) {
  362. cl->state = ISHTP_CL_DISCONNECTED;
  363. /* send disconnect response */
  364. ishtp_hbm_hdr(&hdr, len);
  365. ishtp_hbm_cl_hdr(cl, CLIENT_DISCONNECT_RES_CMD, data,
  366. len);
  367. ishtp_write_message(dev, &hdr, data);
  368. break;
  369. }
  370. }
  371. spin_unlock_irqrestore(&dev->cl_list_lock, flags);
  372. }
  373. /**
  374. * ishtp_hbm_dma_xfer_ack() - Receive transfer ACK
  375. * @dev: ISHTP device instance
  376. * @dma_xfer: HBM transfer message
  377. *
  378. * Receive ack for ISHTP-over-DMA client message
  379. */
  380. static void ishtp_hbm_dma_xfer_ack(struct ishtp_device *dev,
  381. struct dma_xfer_hbm *dma_xfer)
  382. {
  383. void *msg;
  384. uint64_t offs;
  385. struct ishtp_msg_hdr *ishtp_hdr =
  386. (struct ishtp_msg_hdr *)&dev->ishtp_msg_hdr;
  387. unsigned int msg_offs;
  388. struct ishtp_cl *cl;
  389. for (msg_offs = 0; msg_offs < ishtp_hdr->length;
  390. msg_offs += sizeof(struct dma_xfer_hbm)) {
  391. offs = dma_xfer->msg_addr - dev->ishtp_host_dma_tx_buf_phys;
  392. if (offs > dev->ishtp_host_dma_tx_buf_size) {
  393. dev_err(dev->devc, "Bad DMA Tx ack message address\n");
  394. return;
  395. }
  396. if (dma_xfer->msg_length >
  397. dev->ishtp_host_dma_tx_buf_size - offs) {
  398. dev_err(dev->devc, "Bad DMA Tx ack message size\n");
  399. return;
  400. }
  401. /* logical address of the acked mem */
  402. msg = (unsigned char *)dev->ishtp_host_dma_tx_buf + offs;
  403. ishtp_cl_release_dma_acked_mem(dev, msg, dma_xfer->msg_length);
  404. list_for_each_entry(cl, &dev->cl_list, link) {
  405. if (cl->fw_client_id == dma_xfer->fw_client_id &&
  406. cl->host_client_id == dma_xfer->host_client_id)
  407. /*
  408. * in case that a single ack may be sent
  409. * over several dma transfers, and the last msg
  410. * addr was inside the acked memory, but not in
  411. * its start
  412. */
  413. if (cl->last_dma_addr >=
  414. (unsigned char *)msg &&
  415. cl->last_dma_addr <
  416. (unsigned char *)msg +
  417. dma_xfer->msg_length) {
  418. cl->last_dma_acked = 1;
  419. if (!list_empty(&cl->tx_list.list) &&
  420. cl->ishtp_flow_ctrl_creds) {
  421. /*
  422. * start sending the first msg
  423. */
  424. ishtp_cl_send_msg(dev, cl);
  425. }
  426. }
  427. }
  428. ++dma_xfer;
  429. }
  430. }
  431. /**
  432. * ishtp_hbm_dma_xfer() - Receive DMA transfer message
  433. * @dev: ISHTP device instance
  434. * @dma_xfer: HBM transfer message
  435. *
  436. * Receive ISHTP-over-DMA client message
  437. */
  438. static void ishtp_hbm_dma_xfer(struct ishtp_device *dev,
  439. struct dma_xfer_hbm *dma_xfer)
  440. {
  441. void *msg;
  442. uint64_t offs;
  443. struct ishtp_msg_hdr hdr;
  444. struct ishtp_msg_hdr *ishtp_hdr =
  445. (struct ishtp_msg_hdr *) &dev->ishtp_msg_hdr;
  446. struct dma_xfer_hbm *prm = dma_xfer;
  447. unsigned int msg_offs;
  448. for (msg_offs = 0; msg_offs < ishtp_hdr->length;
  449. msg_offs += sizeof(struct dma_xfer_hbm)) {
  450. offs = dma_xfer->msg_addr - dev->ishtp_host_dma_rx_buf_phys;
  451. if (offs > dev->ishtp_host_dma_rx_buf_size) {
  452. dev_err(dev->devc, "Bad DMA Rx message address\n");
  453. return;
  454. }
  455. if (dma_xfer->msg_length >
  456. dev->ishtp_host_dma_rx_buf_size - offs) {
  457. dev_err(dev->devc, "Bad DMA Rx message size\n");
  458. return;
  459. }
  460. msg = dev->ishtp_host_dma_rx_buf + offs;
  461. recv_ishtp_cl_msg_dma(dev, msg, dma_xfer);
  462. dma_xfer->hbm = DMA_XFER_ACK; /* Prepare for response */
  463. ++dma_xfer;
  464. }
  465. /* Send DMA_XFER_ACK [...] */
  466. ishtp_hbm_hdr(&hdr, ishtp_hdr->length);
  467. ishtp_write_message(dev, &hdr, (unsigned char *)prm);
  468. }
  469. /**
  470. * ishtp_hbm_dispatch() - HBM dispatch function
  471. * @dev: ISHTP device instance
  472. * @hdr: bus message
  473. *
  474. * Bottom half read routine after ISR to handle the read bus message cmd
  475. * processing
  476. */
  477. void ishtp_hbm_dispatch(struct ishtp_device *dev,
  478. struct ishtp_bus_message *hdr)
  479. {
  480. struct ishtp_bus_message *ishtp_msg;
  481. struct ishtp_fw_client *fw_client;
  482. struct hbm_host_version_response *version_res;
  483. struct hbm_client_connect_response *connect_res;
  484. struct hbm_client_connect_response *disconnect_res;
  485. struct hbm_client_connect_request *disconnect_req;
  486. struct hbm_props_response *props_res;
  487. struct hbm_host_enum_response *enum_res;
  488. struct ishtp_msg_hdr ishtp_hdr;
  489. struct dma_alloc_notify dma_alloc_notify;
  490. struct dma_xfer_hbm *dma_xfer;
  491. ishtp_msg = hdr;
  492. switch (ishtp_msg->hbm_cmd) {
  493. case HOST_START_RES_CMD:
  494. version_res = (struct hbm_host_version_response *)ishtp_msg;
  495. if (!version_res->host_version_supported) {
  496. dev->version = version_res->fw_max_version;
  497. dev->hbm_state = ISHTP_HBM_STOPPED;
  498. ishtp_hbm_stop_req(dev);
  499. return;
  500. }
  501. dev->version.major_version = HBM_MAJOR_VERSION;
  502. dev->version.minor_version = HBM_MINOR_VERSION;
  503. if (dev->dev_state == ISHTP_DEV_INIT_CLIENTS &&
  504. dev->hbm_state == ISHTP_HBM_START) {
  505. dev->hbm_state = ISHTP_HBM_STARTED;
  506. ishtp_hbm_enum_clients_req(dev);
  507. } else {
  508. dev_err(dev->devc,
  509. "reset: wrong host start response\n");
  510. /* BUG: why do we arrive here? */
  511. ish_hw_reset(dev);
  512. return;
  513. }
  514. wake_up_interruptible(&dev->wait_hbm_recvd_msg);
  515. break;
  516. case CLIENT_CONNECT_RES_CMD:
  517. connect_res = (struct hbm_client_connect_response *)ishtp_msg;
  518. ishtp_hbm_cl_connect_res(dev, connect_res);
  519. break;
  520. case CLIENT_DISCONNECT_RES_CMD:
  521. disconnect_res =
  522. (struct hbm_client_connect_response *)ishtp_msg;
  523. ishtp_hbm_cl_disconnect_res(dev, disconnect_res);
  524. break;
  525. case HOST_CLIENT_PROPERTIES_RES_CMD:
  526. props_res = (struct hbm_props_response *)ishtp_msg;
  527. fw_client = &dev->fw_clients[dev->fw_client_presentation_num];
  528. if (props_res->status || !dev->fw_clients) {
  529. dev_err(dev->devc,
  530. "reset: properties response hbm wrong status\n");
  531. ish_hw_reset(dev);
  532. return;
  533. }
  534. if (fw_client->client_id != props_res->address) {
  535. dev_err(dev->devc,
  536. "reset: host properties response address mismatch [%02X %02X]\n",
  537. fw_client->client_id, props_res->address);
  538. ish_hw_reset(dev);
  539. return;
  540. }
  541. if (dev->dev_state != ISHTP_DEV_INIT_CLIENTS ||
  542. dev->hbm_state != ISHTP_HBM_CLIENT_PROPERTIES) {
  543. dev_err(dev->devc,
  544. "reset: unexpected properties response\n");
  545. ish_hw_reset(dev);
  546. return;
  547. }
  548. fw_client->props = props_res->client_properties;
  549. dev->fw_client_index++;
  550. dev->fw_client_presentation_num++;
  551. /* request property for the next client */
  552. ishtp_hbm_prop_req(dev);
  553. if (dev->dev_state != ISHTP_DEV_ENABLED)
  554. break;
  555. if (!ishtp_use_dma_transfer())
  556. break;
  557. dev_dbg(dev->devc, "Requesting to use DMA\n");
  558. ishtp_cl_alloc_dma_buf(dev);
  559. if (dev->ishtp_host_dma_rx_buf) {
  560. const size_t len = sizeof(dma_alloc_notify);
  561. memset(&dma_alloc_notify, 0, sizeof(dma_alloc_notify));
  562. dma_alloc_notify.hbm = DMA_BUFFER_ALLOC_NOTIFY;
  563. dma_alloc_notify.buf_size =
  564. dev->ishtp_host_dma_rx_buf_size;
  565. dma_alloc_notify.buf_address =
  566. dev->ishtp_host_dma_rx_buf_phys;
  567. ishtp_hbm_hdr(&ishtp_hdr, len);
  568. ishtp_write_message(dev, &ishtp_hdr,
  569. (unsigned char *)&dma_alloc_notify);
  570. }
  571. break;
  572. case HOST_ENUM_RES_CMD:
  573. enum_res = (struct hbm_host_enum_response *) ishtp_msg;
  574. memcpy(dev->fw_clients_map, enum_res->valid_addresses, 32);
  575. if (dev->dev_state == ISHTP_DEV_INIT_CLIENTS &&
  576. dev->hbm_state == ISHTP_HBM_ENUM_CLIENTS) {
  577. dev->fw_client_presentation_num = 0;
  578. dev->fw_client_index = 0;
  579. ishtp_hbm_fw_cl_allocate(dev);
  580. dev->hbm_state = ISHTP_HBM_CLIENT_PROPERTIES;
  581. /* first property request */
  582. ishtp_hbm_prop_req(dev);
  583. } else {
  584. dev_err(dev->devc,
  585. "reset: unexpected enumeration response hbm\n");
  586. ish_hw_reset(dev);
  587. return;
  588. }
  589. break;
  590. case HOST_STOP_RES_CMD:
  591. if (dev->hbm_state != ISHTP_HBM_STOPPED)
  592. dev_err(dev->devc, "unexpected stop response\n");
  593. dev->dev_state = ISHTP_DEV_DISABLED;
  594. dev_info(dev->devc, "reset: FW stop response\n");
  595. ish_hw_reset(dev);
  596. break;
  597. case CLIENT_DISCONNECT_REQ_CMD:
  598. /* search for client */
  599. disconnect_req =
  600. (struct hbm_client_connect_request *)ishtp_msg;
  601. ishtp_hbm_fw_disconnect_req(dev, disconnect_req);
  602. break;
  603. case FW_STOP_REQ_CMD:
  604. dev->hbm_state = ISHTP_HBM_STOPPED;
  605. break;
  606. case DMA_BUFFER_ALLOC_RESPONSE:
  607. dev->ishtp_host_dma_enabled = 1;
  608. break;
  609. case DMA_XFER:
  610. dma_xfer = (struct dma_xfer_hbm *)ishtp_msg;
  611. if (!dev->ishtp_host_dma_enabled) {
  612. dev_err(dev->devc,
  613. "DMA XFER requested but DMA is not enabled\n");
  614. break;
  615. }
  616. ishtp_hbm_dma_xfer(dev, dma_xfer);
  617. break;
  618. case DMA_XFER_ACK:
  619. dma_xfer = (struct dma_xfer_hbm *)ishtp_msg;
  620. if (!dev->ishtp_host_dma_enabled ||
  621. !dev->ishtp_host_dma_tx_buf) {
  622. dev_err(dev->devc,
  623. "DMA XFER acked but DMA Tx is not enabled\n");
  624. break;
  625. }
  626. ishtp_hbm_dma_xfer_ack(dev, dma_xfer);
  627. break;
  628. default:
  629. dev_err(dev->devc, "unknown HBM: %u\n",
  630. (unsigned int)ishtp_msg->hbm_cmd);
  631. break;
  632. }
  633. }
  634. /**
  635. * bh_hbm_work_fn() - HBM work function
  636. * @work: work struct
  637. *
  638. * Bottom half processing work function (instead of thread handler)
  639. * for processing hbm messages
  640. */
  641. void bh_hbm_work_fn(struct work_struct *work)
  642. {
  643. unsigned long flags;
  644. struct ishtp_device *dev;
  645. unsigned char hbm[IPC_PAYLOAD_SIZE];
  646. dev = container_of(work, struct ishtp_device, bh_hbm_work);
  647. spin_lock_irqsave(&dev->rd_msg_spinlock, flags);
  648. if (dev->rd_msg_fifo_head != dev->rd_msg_fifo_tail) {
  649. memcpy(hbm, dev->rd_msg_fifo + dev->rd_msg_fifo_head,
  650. IPC_PAYLOAD_SIZE);
  651. dev->rd_msg_fifo_head =
  652. (dev->rd_msg_fifo_head + IPC_PAYLOAD_SIZE) %
  653. (RD_INT_FIFO_SIZE * IPC_PAYLOAD_SIZE);
  654. spin_unlock_irqrestore(&dev->rd_msg_spinlock, flags);
  655. ishtp_hbm_dispatch(dev, (struct ishtp_bus_message *)hbm);
  656. } else {
  657. spin_unlock_irqrestore(&dev->rd_msg_spinlock, flags);
  658. }
  659. }
  660. /**
  661. * recv_hbm() - Receive HBM message
  662. * @dev: ISHTP device instance
  663. * @ishtp_hdr: received bus message
  664. *
  665. * Receive and process ISHTP bus messages in ISR context. This will schedule
  666. * work function to process message
  667. */
  668. void recv_hbm(struct ishtp_device *dev, struct ishtp_msg_hdr *ishtp_hdr)
  669. {
  670. uint8_t rd_msg_buf[ISHTP_RD_MSG_BUF_SIZE];
  671. struct ishtp_bus_message *ishtp_msg =
  672. (struct ishtp_bus_message *)rd_msg_buf;
  673. unsigned long flags;
  674. dev->ops->ishtp_read(dev, rd_msg_buf, ishtp_hdr->length);
  675. /* Flow control - handle in place */
  676. if (ishtp_msg->hbm_cmd == ISHTP_FLOW_CONTROL_CMD) {
  677. struct hbm_flow_control *flow_control =
  678. (struct hbm_flow_control *)ishtp_msg;
  679. struct ishtp_cl *cl = NULL;
  680. unsigned long flags, tx_flags;
  681. spin_lock_irqsave(&dev->cl_list_lock, flags);
  682. list_for_each_entry(cl, &dev->cl_list, link) {
  683. if (cl->host_client_id == flow_control->host_addr &&
  684. cl->fw_client_id ==
  685. flow_control->fw_addr) {
  686. /*
  687. * NOTE: It's valid only for counting
  688. * flow-control implementation to receive a
  689. * FC in the middle of sending. Meanwhile not
  690. * supported
  691. */
  692. if (cl->ishtp_flow_ctrl_creds)
  693. dev_err(dev->devc,
  694. "recv extra FC from FW client %u (host client %u) (FC count was %d)\n",
  695. (unsigned int)cl->fw_client_id,
  696. (unsigned int)cl->host_client_id,
  697. cl->ishtp_flow_ctrl_creds);
  698. else {
  699. ++cl->ishtp_flow_ctrl_creds;
  700. ++cl->ishtp_flow_ctrl_cnt;
  701. cl->last_ipc_acked = 1;
  702. spin_lock_irqsave(
  703. &cl->tx_list_spinlock,
  704. tx_flags);
  705. if (!list_empty(&cl->tx_list.list)) {
  706. /*
  707. * start sending the first msg
  708. * = the callback function
  709. */
  710. spin_unlock_irqrestore(
  711. &cl->tx_list_spinlock,
  712. tx_flags);
  713. ishtp_cl_send_msg(dev, cl);
  714. } else {
  715. spin_unlock_irqrestore(
  716. &cl->tx_list_spinlock,
  717. tx_flags);
  718. }
  719. }
  720. break;
  721. }
  722. }
  723. spin_unlock_irqrestore(&dev->cl_list_lock, flags);
  724. goto eoi;
  725. }
  726. /*
  727. * Some messages that are safe for ISR processing and important
  728. * to be done "quickly" and in-order, go here
  729. */
  730. if (ishtp_msg->hbm_cmd == CLIENT_CONNECT_RES_CMD ||
  731. ishtp_msg->hbm_cmd == CLIENT_DISCONNECT_RES_CMD ||
  732. ishtp_msg->hbm_cmd == CLIENT_DISCONNECT_REQ_CMD ||
  733. ishtp_msg->hbm_cmd == DMA_XFER) {
  734. ishtp_hbm_dispatch(dev, ishtp_msg);
  735. goto eoi;
  736. }
  737. /*
  738. * All other HBMs go here.
  739. * We schedule HBMs for processing serially by using system wq,
  740. * possibly there will be multiple HBMs scheduled at the same time.
  741. */
  742. spin_lock_irqsave(&dev->rd_msg_spinlock, flags);
  743. if ((dev->rd_msg_fifo_tail + IPC_PAYLOAD_SIZE) %
  744. (RD_INT_FIFO_SIZE * IPC_PAYLOAD_SIZE) ==
  745. dev->rd_msg_fifo_head) {
  746. spin_unlock_irqrestore(&dev->rd_msg_spinlock, flags);
  747. dev_err(dev->devc, "BH buffer overflow, dropping HBM %u\n",
  748. (unsigned int)ishtp_msg->hbm_cmd);
  749. goto eoi;
  750. }
  751. memcpy(dev->rd_msg_fifo + dev->rd_msg_fifo_tail, ishtp_msg,
  752. ishtp_hdr->length);
  753. dev->rd_msg_fifo_tail = (dev->rd_msg_fifo_tail + IPC_PAYLOAD_SIZE) %
  754. (RD_INT_FIFO_SIZE * IPC_PAYLOAD_SIZE);
  755. spin_unlock_irqrestore(&dev->rd_msg_spinlock, flags);
  756. schedule_work(&dev->bh_hbm_work);
  757. eoi:
  758. return;
  759. }
  760. /**
  761. * recv_fixed_cl_msg() - Receive fixed client message
  762. * @dev: ISHTP device instance
  763. * @ishtp_hdr: received bus message
  764. *
  765. * Receive and process ISHTP fixed client messages (address == 0)
  766. * in ISR context
  767. */
  768. void recv_fixed_cl_msg(struct ishtp_device *dev,
  769. struct ishtp_msg_hdr *ishtp_hdr)
  770. {
  771. uint8_t rd_msg_buf[ISHTP_RD_MSG_BUF_SIZE];
  772. dev->print_log(dev,
  773. "%s() got fixed client msg from client #%d\n",
  774. __func__, ishtp_hdr->fw_addr);
  775. dev->ops->ishtp_read(dev, rd_msg_buf, ishtp_hdr->length);
  776. if (ishtp_hdr->fw_addr == ISHTP_SYSTEM_STATE_CLIENT_ADDR) {
  777. struct ish_system_states_header *msg_hdr =
  778. (struct ish_system_states_header *)rd_msg_buf;
  779. if (msg_hdr->cmd == SYSTEM_STATE_SUBSCRIBE)
  780. ishtp_send_resume(dev);
  781. /* if FW request arrived here, the system is not suspended */
  782. else
  783. dev_err(dev->devc, "unknown fixed client msg [%02X]\n",
  784. msg_hdr->cmd);
  785. }
  786. }
  787. /**
  788. * fix_cl_hdr() - Initialize fixed client header
  789. * @hdr: message header
  790. * @length: length of message
  791. * @cl_addr: Client address
  792. *
  793. * Initialize message header for fixed client
  794. */
  795. static inline void fix_cl_hdr(struct ishtp_msg_hdr *hdr, size_t length,
  796. uint8_t cl_addr)
  797. {
  798. hdr->host_addr = 0;
  799. hdr->fw_addr = cl_addr;
  800. hdr->length = length;
  801. hdr->msg_complete = 1;
  802. hdr->reserved = 0;
  803. }
  804. /*** Suspend and resume notification ***/
  805. static uint32_t current_state;
  806. static uint32_t supported_states = SUSPEND_STATE_BIT | CONNECTED_STANDBY_STATE_BIT;
  807. /**
  808. * ishtp_send_suspend() - Send suspend message to FW
  809. * @dev: ISHTP device instance
  810. *
  811. * Send suspend message to FW. This is useful for system freeze (non S3) case
  812. */
  813. void ishtp_send_suspend(struct ishtp_device *dev)
  814. {
  815. struct ishtp_msg_hdr ishtp_hdr;
  816. struct ish_system_states_status state_status_msg;
  817. const size_t len = sizeof(struct ish_system_states_status);
  818. fix_cl_hdr(&ishtp_hdr, len, ISHTP_SYSTEM_STATE_CLIENT_ADDR);
  819. memset(&state_status_msg, 0, len);
  820. state_status_msg.hdr.cmd = SYSTEM_STATE_STATUS;
  821. state_status_msg.supported_states = supported_states;
  822. current_state |= (SUSPEND_STATE_BIT | CONNECTED_STANDBY_STATE_BIT);
  823. dev->print_log(dev, "%s() sends SUSPEND notification\n", __func__);
  824. state_status_msg.states_status = current_state;
  825. ishtp_write_message(dev, &ishtp_hdr,
  826. (unsigned char *)&state_status_msg);
  827. }
  828. EXPORT_SYMBOL(ishtp_send_suspend);
  829. /**
  830. * ishtp_send_resume() - Send resume message to FW
  831. * @dev: ISHTP device instance
  832. *
  833. * Send resume message to FW. This is useful for system freeze (non S3) case
  834. */
  835. void ishtp_send_resume(struct ishtp_device *dev)
  836. {
  837. struct ishtp_msg_hdr ishtp_hdr;
  838. struct ish_system_states_status state_status_msg;
  839. const size_t len = sizeof(struct ish_system_states_status);
  840. fix_cl_hdr(&ishtp_hdr, len, ISHTP_SYSTEM_STATE_CLIENT_ADDR);
  841. memset(&state_status_msg, 0, len);
  842. state_status_msg.hdr.cmd = SYSTEM_STATE_STATUS;
  843. state_status_msg.supported_states = supported_states;
  844. current_state &= ~(CONNECTED_STANDBY_STATE_BIT | SUSPEND_STATE_BIT);
  845. dev->print_log(dev, "%s() sends RESUME notification\n", __func__);
  846. state_status_msg.states_status = current_state;
  847. ishtp_write_message(dev, &ishtp_hdr,
  848. (unsigned char *)&state_status_msg);
  849. }
  850. EXPORT_SYMBOL(ishtp_send_resume);
  851. /**
  852. * ishtp_query_subscribers() - Send query subscribers message
  853. * @dev: ISHTP device instance
  854. *
  855. * Send message to query subscribers
  856. */
  857. void ishtp_query_subscribers(struct ishtp_device *dev)
  858. {
  859. struct ishtp_msg_hdr ishtp_hdr;
  860. struct ish_system_states_query_subscribers query_subscribers_msg;
  861. const size_t len = sizeof(struct ish_system_states_query_subscribers);
  862. fix_cl_hdr(&ishtp_hdr, len, ISHTP_SYSTEM_STATE_CLIENT_ADDR);
  863. memset(&query_subscribers_msg, 0, len);
  864. query_subscribers_msg.hdr.cmd = SYSTEM_STATE_QUERY_SUBSCRIBERS;
  865. ishtp_write_message(dev, &ishtp_hdr,
  866. (unsigned char *)&query_subscribers_msg);
  867. }