client.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * ISHTP client logic
  4. *
  5. * Copyright (c) 2003-2016, Intel Corporation.
  6. */
  7. #include <linux/slab.h>
  8. #include <linux/sched.h>
  9. #include <linux/wait.h>
  10. #include <linux/delay.h>
  11. #include <linux/dma-mapping.h>
  12. #include <asm/cacheflush.h>
  13. #include "hbm.h"
  14. #include "client.h"
  15. int ishtp_cl_get_tx_free_buffer_size(struct ishtp_cl *cl)
  16. {
  17. unsigned long tx_free_flags;
  18. int size;
  19. spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
  20. size = cl->tx_ring_free_size * cl->device->fw_client->props.max_msg_length;
  21. spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags);
  22. return size;
  23. }
  24. EXPORT_SYMBOL(ishtp_cl_get_tx_free_buffer_size);
  25. int ishtp_cl_get_tx_free_rings(struct ishtp_cl *cl)
  26. {
  27. return cl->tx_ring_free_size;
  28. }
  29. EXPORT_SYMBOL(ishtp_cl_get_tx_free_rings);
  30. /**
  31. * ishtp_read_list_flush() - Flush read queue
  32. * @cl: ishtp client instance
  33. *
  34. * Used to remove all entries from read queue for a client
  35. */
  36. static void ishtp_read_list_flush(struct ishtp_cl *cl)
  37. {
  38. struct ishtp_cl_rb *rb;
  39. struct ishtp_cl_rb *next;
  40. unsigned long flags;
  41. spin_lock_irqsave(&cl->dev->read_list_spinlock, flags);
  42. list_for_each_entry_safe(rb, next, &cl->dev->read_list.list, list)
  43. if (rb->cl && ishtp_cl_cmp_id(cl, rb->cl)) {
  44. list_del(&rb->list);
  45. ishtp_io_rb_free(rb);
  46. }
  47. spin_unlock_irqrestore(&cl->dev->read_list_spinlock, flags);
  48. }
  49. /**
  50. * ishtp_cl_flush_queues() - Flush all queues for a client
  51. * @cl: ishtp client instance
  52. *
  53. * Used to remove all queues for a client. This is called when a client device
  54. * needs reset due to error, S3 resume or during module removal
  55. *
  56. * Return: 0 on success else -EINVAL if device is NULL
  57. */
  58. int ishtp_cl_flush_queues(struct ishtp_cl *cl)
  59. {
  60. if (WARN_ON(!cl || !cl->dev))
  61. return -EINVAL;
  62. ishtp_read_list_flush(cl);
  63. return 0;
  64. }
  65. EXPORT_SYMBOL(ishtp_cl_flush_queues);
  66. /**
  67. * ishtp_cl_init() - Initialize all fields of a client device
  68. * @cl: ishtp client instance
  69. * @dev: ishtp device
  70. *
  71. * Initializes a client device fields: Init spinlocks, init queues etc.
  72. * This function is called during new client creation
  73. */
  74. static void ishtp_cl_init(struct ishtp_cl *cl, struct ishtp_device *dev)
  75. {
  76. memset(cl, 0, sizeof(struct ishtp_cl));
  77. init_waitqueue_head(&cl->wait_ctrl_res);
  78. spin_lock_init(&cl->free_list_spinlock);
  79. spin_lock_init(&cl->in_process_spinlock);
  80. spin_lock_init(&cl->tx_list_spinlock);
  81. spin_lock_init(&cl->tx_free_list_spinlock);
  82. spin_lock_init(&cl->fc_spinlock);
  83. INIT_LIST_HEAD(&cl->link);
  84. cl->dev = dev;
  85. INIT_LIST_HEAD(&cl->free_rb_list.list);
  86. INIT_LIST_HEAD(&cl->tx_list.list);
  87. INIT_LIST_HEAD(&cl->tx_free_list.list);
  88. INIT_LIST_HEAD(&cl->in_process_list.list);
  89. cl->rx_ring_size = CL_DEF_RX_RING_SIZE;
  90. cl->tx_ring_size = CL_DEF_TX_RING_SIZE;
  91. cl->tx_ring_free_size = cl->tx_ring_size;
  92. /* dma */
  93. cl->last_tx_path = CL_TX_PATH_IPC;
  94. cl->last_dma_acked = 1;
  95. cl->last_dma_addr = NULL;
  96. cl->last_ipc_acked = 1;
  97. }
  98. /**
  99. * ishtp_cl_allocate() - allocates client structure and sets it up.
  100. * @cl_device: ishtp client device
  101. *
  102. * Allocate memory for new client device and call to initialize each field.
  103. *
  104. * Return: The allocated client instance or NULL on failure
  105. */
  106. struct ishtp_cl *ishtp_cl_allocate(struct ishtp_cl_device *cl_device)
  107. {
  108. struct ishtp_cl *cl;
  109. cl = kmalloc(sizeof(struct ishtp_cl), GFP_KERNEL);
  110. if (!cl)
  111. return NULL;
  112. ishtp_cl_init(cl, cl_device->ishtp_dev);
  113. return cl;
  114. }
  115. EXPORT_SYMBOL(ishtp_cl_allocate);
  116. /**
  117. * ishtp_cl_free() - Frees a client device
  118. * @cl: client device instance
  119. *
  120. * Frees a client device
  121. */
  122. void ishtp_cl_free(struct ishtp_cl *cl)
  123. {
  124. struct ishtp_device *dev;
  125. unsigned long flags;
  126. if (!cl)
  127. return;
  128. dev = cl->dev;
  129. if (!dev)
  130. return;
  131. spin_lock_irqsave(&dev->cl_list_lock, flags);
  132. ishtp_cl_free_rx_ring(cl);
  133. ishtp_cl_free_tx_ring(cl);
  134. kfree(cl);
  135. spin_unlock_irqrestore(&dev->cl_list_lock, flags);
  136. }
  137. EXPORT_SYMBOL(ishtp_cl_free);
  138. /**
  139. * ishtp_cl_link() - Reserve a host id and link the client instance
  140. * @cl: client device instance
  141. *
  142. * This allocates a single bit in the hostmap. This function will make sure
  143. * that not many client sessions are opened at the same time. Once allocated
  144. * the client device instance is added to the ishtp device in the current
  145. * client list
  146. *
  147. * Return: 0 or error code on failure
  148. */
  149. int ishtp_cl_link(struct ishtp_cl *cl)
  150. {
  151. struct ishtp_device *dev;
  152. unsigned long flags, flags_cl;
  153. int id, ret = 0;
  154. if (WARN_ON(!cl || !cl->dev))
  155. return -EINVAL;
  156. dev = cl->dev;
  157. spin_lock_irqsave(&dev->device_lock, flags);
  158. if (dev->open_handle_count >= ISHTP_MAX_OPEN_HANDLE_COUNT) {
  159. ret = -EMFILE;
  160. goto unlock_dev;
  161. }
  162. id = find_first_zero_bit(dev->host_clients_map, ISHTP_CLIENTS_MAX);
  163. if (id >= ISHTP_CLIENTS_MAX) {
  164. spin_unlock_irqrestore(&dev->device_lock, flags);
  165. dev_err(&cl->device->dev, "id exceeded %d", ISHTP_CLIENTS_MAX);
  166. return -ENOENT;
  167. }
  168. dev->open_handle_count++;
  169. cl->host_client_id = id;
  170. spin_lock_irqsave(&dev->cl_list_lock, flags_cl);
  171. if (dev->dev_state != ISHTP_DEV_ENABLED) {
  172. ret = -ENODEV;
  173. goto unlock_cl;
  174. }
  175. list_add_tail(&cl->link, &dev->cl_list);
  176. set_bit(id, dev->host_clients_map);
  177. cl->state = ISHTP_CL_INITIALIZING;
  178. unlock_cl:
  179. spin_unlock_irqrestore(&dev->cl_list_lock, flags_cl);
  180. unlock_dev:
  181. spin_unlock_irqrestore(&dev->device_lock, flags);
  182. return ret;
  183. }
  184. EXPORT_SYMBOL(ishtp_cl_link);
  185. /**
  186. * ishtp_cl_unlink() - remove fw_cl from the client device list
  187. * @cl: client device instance
  188. *
  189. * Remove a previously linked device to a ishtp device
  190. */
  191. void ishtp_cl_unlink(struct ishtp_cl *cl)
  192. {
  193. struct ishtp_device *dev;
  194. struct ishtp_cl *pos;
  195. unsigned long flags;
  196. /* don't shout on error exit path */
  197. if (!cl || !cl->dev)
  198. return;
  199. dev = cl->dev;
  200. spin_lock_irqsave(&dev->device_lock, flags);
  201. if (dev->open_handle_count > 0) {
  202. clear_bit(cl->host_client_id, dev->host_clients_map);
  203. dev->open_handle_count--;
  204. }
  205. spin_unlock_irqrestore(&dev->device_lock, flags);
  206. /*
  207. * This checks that 'cl' is actually linked into device's structure,
  208. * before attempting 'list_del'
  209. */
  210. spin_lock_irqsave(&dev->cl_list_lock, flags);
  211. list_for_each_entry(pos, &dev->cl_list, link)
  212. if (cl->host_client_id == pos->host_client_id) {
  213. list_del_init(&pos->link);
  214. break;
  215. }
  216. spin_unlock_irqrestore(&dev->cl_list_lock, flags);
  217. }
  218. EXPORT_SYMBOL(ishtp_cl_unlink);
  219. /**
  220. * ishtp_cl_disconnect() - Send disconnect request to firmware
  221. * @cl: client device instance
  222. *
  223. * Send a disconnect request for a client to firmware.
  224. *
  225. * Return: 0 if successful disconnect response from the firmware or error
  226. * code on failure
  227. */
  228. int ishtp_cl_disconnect(struct ishtp_cl *cl)
  229. {
  230. struct ishtp_device *dev;
  231. if (WARN_ON(!cl || !cl->dev))
  232. return -ENODEV;
  233. dev = cl->dev;
  234. dev->print_log(dev, "%s() state %d\n", __func__, cl->state);
  235. if (cl->state != ISHTP_CL_DISCONNECTING) {
  236. dev->print_log(dev, "%s() Disconnect in progress\n", __func__);
  237. return 0;
  238. }
  239. if (ishtp_hbm_cl_disconnect_req(dev, cl)) {
  240. dev->print_log(dev, "%s() Failed to disconnect\n", __func__);
  241. dev_err(&cl->device->dev, "failed to disconnect.\n");
  242. return -ENODEV;
  243. }
  244. wait_event_interruptible_timeout(cl->wait_ctrl_res,
  245. (dev->dev_state != ISHTP_DEV_ENABLED ||
  246. cl->state == ISHTP_CL_DISCONNECTED),
  247. ishtp_secs_to_jiffies(ISHTP_CL_CONNECT_TIMEOUT));
  248. /*
  249. * If FW reset arrived, this will happen. Don't check cl->,
  250. * as 'cl' may be freed already
  251. */
  252. if (dev->dev_state != ISHTP_DEV_ENABLED) {
  253. dev->print_log(dev, "%s() dev_state != ISHTP_DEV_ENABLED\n",
  254. __func__);
  255. return -ENODEV;
  256. }
  257. if (cl->state == ISHTP_CL_DISCONNECTED) {
  258. dev->print_log(dev, "%s() successful\n", __func__);
  259. return 0;
  260. }
  261. return -ENODEV;
  262. }
  263. EXPORT_SYMBOL(ishtp_cl_disconnect);
  264. /**
  265. * ishtp_cl_is_other_connecting() - Check other client is connecting
  266. * @cl: client device instance
  267. *
  268. * Checks if other client with the same fw client id is connecting
  269. *
  270. * Return: true if other client is connected else false
  271. */
  272. static bool ishtp_cl_is_other_connecting(struct ishtp_cl *cl)
  273. {
  274. struct ishtp_device *dev;
  275. struct ishtp_cl *pos;
  276. unsigned long flags;
  277. if (WARN_ON(!cl || !cl->dev))
  278. return false;
  279. dev = cl->dev;
  280. spin_lock_irqsave(&dev->cl_list_lock, flags);
  281. list_for_each_entry(pos, &dev->cl_list, link) {
  282. if ((pos->state == ISHTP_CL_CONNECTING) && (pos != cl) &&
  283. cl->fw_client_id == pos->fw_client_id) {
  284. spin_unlock_irqrestore(&dev->cl_list_lock, flags);
  285. return true;
  286. }
  287. }
  288. spin_unlock_irqrestore(&dev->cl_list_lock, flags);
  289. return false;
  290. }
  291. /**
  292. * ishtp_cl_connect() - Send connect request to firmware
  293. * @cl: client device instance
  294. *
  295. * Send a connect request for a client to firmware. If successful it will
  296. * RX and TX ring buffers
  297. *
  298. * Return: 0 if successful connect response from the firmware and able
  299. * to bind and allocate ring buffers or error code on failure
  300. */
  301. int ishtp_cl_connect(struct ishtp_cl *cl)
  302. {
  303. struct ishtp_device *dev;
  304. int rets;
  305. if (WARN_ON(!cl || !cl->dev))
  306. return -ENODEV;
  307. dev = cl->dev;
  308. dev->print_log(dev, "%s() current_state = %d\n", __func__, cl->state);
  309. if (ishtp_cl_is_other_connecting(cl)) {
  310. dev->print_log(dev, "%s() Busy\n", __func__);
  311. return -EBUSY;
  312. }
  313. if (ishtp_hbm_cl_connect_req(dev, cl)) {
  314. dev->print_log(dev, "%s() HBM connect req fail\n", __func__);
  315. return -ENODEV;
  316. }
  317. rets = wait_event_interruptible_timeout(cl->wait_ctrl_res,
  318. (dev->dev_state == ISHTP_DEV_ENABLED &&
  319. (cl->state == ISHTP_CL_CONNECTED ||
  320. cl->state == ISHTP_CL_DISCONNECTED)),
  321. ishtp_secs_to_jiffies(
  322. ISHTP_CL_CONNECT_TIMEOUT));
  323. /*
  324. * If FW reset arrived, this will happen. Don't check cl->,
  325. * as 'cl' may be freed already
  326. */
  327. if (dev->dev_state != ISHTP_DEV_ENABLED) {
  328. dev->print_log(dev, "%s() dev_state != ISHTP_DEV_ENABLED\n",
  329. __func__);
  330. return -EFAULT;
  331. }
  332. if (cl->state != ISHTP_CL_CONNECTED) {
  333. dev->print_log(dev, "%s() state != ISHTP_CL_CONNECTED\n",
  334. __func__);
  335. return -EFAULT;
  336. }
  337. rets = cl->status;
  338. if (rets) {
  339. dev->print_log(dev, "%s() Invalid status\n", __func__);
  340. return rets;
  341. }
  342. rets = ishtp_cl_device_bind(cl);
  343. if (rets) {
  344. dev->print_log(dev, "%s() Bind error\n", __func__);
  345. ishtp_cl_disconnect(cl);
  346. return rets;
  347. }
  348. rets = ishtp_cl_alloc_rx_ring(cl);
  349. if (rets) {
  350. dev->print_log(dev, "%s() Alloc RX ring failed\n", __func__);
  351. /* if failed allocation, disconnect */
  352. ishtp_cl_disconnect(cl);
  353. return rets;
  354. }
  355. rets = ishtp_cl_alloc_tx_ring(cl);
  356. if (rets) {
  357. dev->print_log(dev, "%s() Alloc TX ring failed\n", __func__);
  358. /* if failed allocation, disconnect */
  359. ishtp_cl_free_rx_ring(cl);
  360. ishtp_cl_disconnect(cl);
  361. return rets;
  362. }
  363. /* Upon successful connection and allocation, emit flow-control */
  364. rets = ishtp_cl_read_start(cl);
  365. dev->print_log(dev, "%s() successful\n", __func__);
  366. return rets;
  367. }
  368. EXPORT_SYMBOL(ishtp_cl_connect);
  369. /**
  370. * ishtp_cl_read_start() - Prepare to read client message
  371. * @cl: client device instance
  372. *
  373. * Get a free buffer from pool of free read buffers and add to read buffer
  374. * pool to add contents. Send a flow control request to firmware to be able
  375. * send next message.
  376. *
  377. * Return: 0 if successful or error code on failure
  378. */
  379. int ishtp_cl_read_start(struct ishtp_cl *cl)
  380. {
  381. struct ishtp_device *dev;
  382. struct ishtp_cl_rb *rb;
  383. int rets;
  384. int i;
  385. unsigned long flags;
  386. unsigned long dev_flags;
  387. if (WARN_ON(!cl || !cl->dev))
  388. return -ENODEV;
  389. dev = cl->dev;
  390. if (cl->state != ISHTP_CL_CONNECTED)
  391. return -ENODEV;
  392. if (dev->dev_state != ISHTP_DEV_ENABLED)
  393. return -ENODEV;
  394. i = ishtp_fw_cl_by_id(dev, cl->fw_client_id);
  395. if (i < 0) {
  396. dev_err(&cl->device->dev, "no such fw client %d\n",
  397. cl->fw_client_id);
  398. return -ENODEV;
  399. }
  400. /* The current rb is the head of the free rb list */
  401. spin_lock_irqsave(&cl->free_list_spinlock, flags);
  402. if (list_empty(&cl->free_rb_list.list)) {
  403. dev_warn(&cl->device->dev,
  404. "[ishtp-ish] Rx buffers pool is empty\n");
  405. rets = -ENOMEM;
  406. rb = NULL;
  407. spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
  408. goto out;
  409. }
  410. rb = list_entry(cl->free_rb_list.list.next, struct ishtp_cl_rb, list);
  411. list_del_init(&rb->list);
  412. spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
  413. rb->cl = cl;
  414. rb->buf_idx = 0;
  415. INIT_LIST_HEAD(&rb->list);
  416. rets = 0;
  417. /*
  418. * This must be BEFORE sending flow control -
  419. * response in ISR may come too fast...
  420. */
  421. spin_lock_irqsave(&dev->read_list_spinlock, dev_flags);
  422. list_add_tail(&rb->list, &dev->read_list.list);
  423. spin_unlock_irqrestore(&dev->read_list_spinlock, dev_flags);
  424. if (ishtp_hbm_cl_flow_control_req(dev, cl)) {
  425. rets = -ENODEV;
  426. goto out;
  427. }
  428. out:
  429. /* if ishtp_hbm_cl_flow_control_req failed, return rb to free list */
  430. if (rets && rb) {
  431. spin_lock_irqsave(&dev->read_list_spinlock, dev_flags);
  432. list_del(&rb->list);
  433. spin_unlock_irqrestore(&dev->read_list_spinlock, dev_flags);
  434. spin_lock_irqsave(&cl->free_list_spinlock, flags);
  435. list_add_tail(&rb->list, &cl->free_rb_list.list);
  436. spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
  437. }
  438. return rets;
  439. }
  440. /**
  441. * ishtp_cl_send() - Send a message to firmware
  442. * @cl: client device instance
  443. * @buf: message buffer
  444. * @length: length of message
  445. *
  446. * If the client is correct state to send message, this function gets a buffer
  447. * from tx ring buffers, copy the message data and call to send the message
  448. * using ishtp_cl_send_msg()
  449. *
  450. * Return: 0 if successful or error code on failure
  451. */
  452. int ishtp_cl_send(struct ishtp_cl *cl, uint8_t *buf, size_t length)
  453. {
  454. struct ishtp_device *dev;
  455. int id;
  456. struct ishtp_cl_tx_ring *cl_msg;
  457. int have_msg_to_send = 0;
  458. unsigned long tx_flags, tx_free_flags;
  459. if (WARN_ON(!cl || !cl->dev))
  460. return -ENODEV;
  461. dev = cl->dev;
  462. if (cl->state != ISHTP_CL_CONNECTED) {
  463. ++cl->err_send_msg;
  464. return -EPIPE;
  465. }
  466. if (dev->dev_state != ISHTP_DEV_ENABLED) {
  467. ++cl->err_send_msg;
  468. return -ENODEV;
  469. }
  470. /* Check if we have fw client device */
  471. id = ishtp_fw_cl_by_id(dev, cl->fw_client_id);
  472. if (id < 0) {
  473. ++cl->err_send_msg;
  474. return -ENOENT;
  475. }
  476. if (length > dev->fw_clients[id].props.max_msg_length) {
  477. ++cl->err_send_msg;
  478. return -EMSGSIZE;
  479. }
  480. /* No free bufs */
  481. spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
  482. if (list_empty(&cl->tx_free_list.list)) {
  483. spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
  484. tx_free_flags);
  485. ++cl->err_send_msg;
  486. return -ENOMEM;
  487. }
  488. cl_msg = list_first_entry(&cl->tx_free_list.list,
  489. struct ishtp_cl_tx_ring, list);
  490. if (!cl_msg->send_buf.data) {
  491. spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
  492. tx_free_flags);
  493. return -EIO;
  494. /* Should not happen, as free list is pre-allocated */
  495. }
  496. /*
  497. * This is safe, as 'length' is already checked for not exceeding
  498. * max ISHTP message size per client
  499. */
  500. list_del_init(&cl_msg->list);
  501. --cl->tx_ring_free_size;
  502. spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags);
  503. memcpy(cl_msg->send_buf.data, buf, length);
  504. cl_msg->send_buf.size = length;
  505. spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags);
  506. have_msg_to_send = !list_empty(&cl->tx_list.list);
  507. list_add_tail(&cl_msg->list, &cl->tx_list.list);
  508. spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
  509. if (!have_msg_to_send && cl->ishtp_flow_ctrl_creds > 0)
  510. ishtp_cl_send_msg(dev, cl);
  511. return 0;
  512. }
  513. EXPORT_SYMBOL(ishtp_cl_send);
  514. /**
  515. * ishtp_cl_read_complete() - read complete
  516. * @rb: Pointer to client request block
  517. *
  518. * If the message is completely received call ishtp_cl_bus_rx_event()
  519. * to process message
  520. */
  521. static void ishtp_cl_read_complete(struct ishtp_cl_rb *rb)
  522. {
  523. unsigned long flags;
  524. int schedule_work_flag = 0;
  525. struct ishtp_cl *cl = rb->cl;
  526. spin_lock_irqsave(&cl->in_process_spinlock, flags);
  527. /*
  528. * if in-process list is empty, then need to schedule
  529. * the processing thread
  530. */
  531. schedule_work_flag = list_empty(&cl->in_process_list.list);
  532. list_add_tail(&rb->list, &cl->in_process_list.list);
  533. spin_unlock_irqrestore(&cl->in_process_spinlock, flags);
  534. if (schedule_work_flag)
  535. ishtp_cl_bus_rx_event(cl->device);
  536. }
  537. /**
  538. * ipc_tx_send() - IPC tx send function
  539. * @prm: Pointer to client device instance
  540. *
  541. * Send message over IPC. Message will be split into fragments
  542. * if message size is bigger than IPC FIFO size, and all
  543. * fragments will be sent one by one.
  544. */
  545. static void ipc_tx_send(void *prm)
  546. {
  547. struct ishtp_cl *cl = prm;
  548. struct ishtp_cl_tx_ring *cl_msg;
  549. size_t rem;
  550. struct ishtp_device *dev = (cl ? cl->dev : NULL);
  551. struct ishtp_msg_hdr ishtp_hdr;
  552. unsigned long tx_flags, tx_free_flags;
  553. unsigned char *pmsg;
  554. if (!dev)
  555. return;
  556. /*
  557. * Other conditions if some critical error has
  558. * occurred before this callback is called
  559. */
  560. if (dev->dev_state != ISHTP_DEV_ENABLED)
  561. return;
  562. if (cl->state != ISHTP_CL_CONNECTED)
  563. return;
  564. spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags);
  565. if (list_empty(&cl->tx_list.list)) {
  566. spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
  567. return;
  568. }
  569. if (cl->ishtp_flow_ctrl_creds != 1 && !cl->sending) {
  570. spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
  571. return;
  572. }
  573. if (!cl->sending) {
  574. --cl->ishtp_flow_ctrl_creds;
  575. cl->last_ipc_acked = 0;
  576. cl->last_tx_path = CL_TX_PATH_IPC;
  577. cl->sending = 1;
  578. }
  579. cl_msg = list_entry(cl->tx_list.list.next, struct ishtp_cl_tx_ring,
  580. list);
  581. rem = cl_msg->send_buf.size - cl->tx_offs;
  582. while (rem > 0) {
  583. ishtp_hdr.host_addr = cl->host_client_id;
  584. ishtp_hdr.fw_addr = cl->fw_client_id;
  585. ishtp_hdr.reserved = 0;
  586. pmsg = cl_msg->send_buf.data + cl->tx_offs;
  587. if (rem <= dev->mtu) {
  588. /* Last fragment or only one packet */
  589. ishtp_hdr.length = rem;
  590. ishtp_hdr.msg_complete = 1;
  591. /* Submit to IPC queue with no callback */
  592. ishtp_write_message(dev, &ishtp_hdr, pmsg);
  593. cl->tx_offs = 0;
  594. cl->sending = 0;
  595. break;
  596. } else {
  597. /* Send ipc fragment */
  598. ishtp_hdr.length = dev->mtu;
  599. ishtp_hdr.msg_complete = 0;
  600. /* All fregments submitted to IPC queue with no callback */
  601. ishtp_write_message(dev, &ishtp_hdr, pmsg);
  602. cl->tx_offs += dev->mtu;
  603. rem = cl_msg->send_buf.size - cl->tx_offs;
  604. }
  605. }
  606. list_del_init(&cl_msg->list);
  607. spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
  608. spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
  609. list_add_tail(&cl_msg->list, &cl->tx_free_list.list);
  610. ++cl->tx_ring_free_size;
  611. spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
  612. tx_free_flags);
  613. }
  614. /**
  615. * ishtp_cl_send_msg_ipc() -Send message using IPC
  616. * @dev: ISHTP device instance
  617. * @cl: Pointer to client device instance
  618. *
  619. * Send message over IPC not using DMA
  620. */
  621. static void ishtp_cl_send_msg_ipc(struct ishtp_device *dev,
  622. struct ishtp_cl *cl)
  623. {
  624. /* If last DMA message wasn't acked yet, leave this one in Tx queue */
  625. if (cl->last_tx_path == CL_TX_PATH_DMA && cl->last_dma_acked == 0)
  626. return;
  627. cl->tx_offs = 0;
  628. ipc_tx_send(cl);
  629. ++cl->send_msg_cnt_ipc;
  630. }
  631. /**
  632. * ishtp_cl_send_msg_dma() -Send message using DMA
  633. * @dev: ISHTP device instance
  634. * @cl: Pointer to client device instance
  635. *
  636. * Send message using DMA
  637. */
  638. static void ishtp_cl_send_msg_dma(struct ishtp_device *dev,
  639. struct ishtp_cl *cl)
  640. {
  641. struct ishtp_msg_hdr hdr;
  642. struct dma_xfer_hbm dma_xfer;
  643. unsigned char *msg_addr;
  644. int off;
  645. struct ishtp_cl_tx_ring *cl_msg;
  646. unsigned long tx_flags, tx_free_flags;
  647. /* If last IPC message wasn't acked yet, leave this one in Tx queue */
  648. if (cl->last_tx_path == CL_TX_PATH_IPC && cl->last_ipc_acked == 0)
  649. return;
  650. spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags);
  651. if (list_empty(&cl->tx_list.list)) {
  652. spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
  653. return;
  654. }
  655. cl_msg = list_entry(cl->tx_list.list.next, struct ishtp_cl_tx_ring,
  656. list);
  657. msg_addr = ishtp_cl_get_dma_send_buf(dev, cl_msg->send_buf.size);
  658. if (!msg_addr) {
  659. spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
  660. if (dev->transfer_path == CL_TX_PATH_DEFAULT)
  661. ishtp_cl_send_msg_ipc(dev, cl);
  662. return;
  663. }
  664. list_del_init(&cl_msg->list); /* Must be before write */
  665. spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
  666. --cl->ishtp_flow_ctrl_creds;
  667. cl->last_dma_acked = 0;
  668. cl->last_dma_addr = msg_addr;
  669. cl->last_tx_path = CL_TX_PATH_DMA;
  670. /* write msg to dma buf */
  671. memcpy(msg_addr, cl_msg->send_buf.data, cl_msg->send_buf.size);
  672. /*
  673. * if current fw don't support cache snooping, driver have to
  674. * flush the cache manually.
  675. */
  676. if (dev->ops->dma_no_cache_snooping &&
  677. dev->ops->dma_no_cache_snooping(dev))
  678. clflush_cache_range(msg_addr, cl_msg->send_buf.size);
  679. /* send dma_xfer hbm msg */
  680. off = msg_addr - (unsigned char *)dev->ishtp_host_dma_tx_buf;
  681. ishtp_hbm_hdr(&hdr, sizeof(struct dma_xfer_hbm));
  682. dma_xfer.hbm = DMA_XFER;
  683. dma_xfer.fw_client_id = cl->fw_client_id;
  684. dma_xfer.host_client_id = cl->host_client_id;
  685. dma_xfer.reserved = 0;
  686. dma_xfer.msg_addr = dev->ishtp_host_dma_tx_buf_phys + off;
  687. dma_xfer.msg_length = cl_msg->send_buf.size;
  688. dma_xfer.reserved2 = 0;
  689. ishtp_write_message(dev, &hdr, (unsigned char *)&dma_xfer);
  690. spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
  691. list_add_tail(&cl_msg->list, &cl->tx_free_list.list);
  692. ++cl->tx_ring_free_size;
  693. spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags);
  694. ++cl->send_msg_cnt_dma;
  695. }
  696. /**
  697. * ishtp_cl_send_msg() -Send message using DMA or IPC
  698. * @dev: ISHTP device instance
  699. * @cl: Pointer to client device instance
  700. *
  701. * Send message using DMA or IPC based on transfer_path
  702. */
  703. void ishtp_cl_send_msg(struct ishtp_device *dev, struct ishtp_cl *cl)
  704. {
  705. if (dev->transfer_path == CL_TX_PATH_DMA)
  706. ishtp_cl_send_msg_dma(dev, cl);
  707. else
  708. ishtp_cl_send_msg_ipc(dev, cl);
  709. }
  710. /**
  711. * recv_ishtp_cl_msg() -Receive client message
  712. * @dev: ISHTP device instance
  713. * @ishtp_hdr: Pointer to message header
  714. *
  715. * Receive and dispatch ISHTP client messages. This function executes in ISR
  716. * or work queue context
  717. */
  718. void recv_ishtp_cl_msg(struct ishtp_device *dev,
  719. struct ishtp_msg_hdr *ishtp_hdr)
  720. {
  721. struct ishtp_cl *cl;
  722. struct ishtp_cl_rb *rb;
  723. struct ishtp_cl_rb *new_rb;
  724. unsigned char *buffer = NULL;
  725. struct ishtp_cl_rb *complete_rb = NULL;
  726. unsigned long flags;
  727. int rb_count;
  728. if (ishtp_hdr->reserved) {
  729. dev_err(dev->devc, "corrupted message header.\n");
  730. goto eoi;
  731. }
  732. if (ishtp_hdr->length > IPC_PAYLOAD_SIZE) {
  733. dev_err(dev->devc,
  734. "ISHTP message length in hdr exceeds IPC MTU\n");
  735. goto eoi;
  736. }
  737. spin_lock_irqsave(&dev->read_list_spinlock, flags);
  738. rb_count = -1;
  739. list_for_each_entry(rb, &dev->read_list.list, list) {
  740. ++rb_count;
  741. cl = rb->cl;
  742. if (!cl || !(cl->host_client_id == ishtp_hdr->host_addr &&
  743. cl->fw_client_id == ishtp_hdr->fw_addr) ||
  744. !(cl->state == ISHTP_CL_CONNECTED))
  745. continue;
  746. /* If no Rx buffer is allocated, disband the rb */
  747. if (rb->buffer.size == 0 || rb->buffer.data == NULL) {
  748. spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
  749. dev_err(&cl->device->dev,
  750. "Rx buffer is not allocated.\n");
  751. list_del(&rb->list);
  752. ishtp_io_rb_free(rb);
  753. cl->status = -ENOMEM;
  754. goto eoi;
  755. }
  756. /*
  757. * If message buffer overflown (exceeds max. client msg
  758. * size, drop message and return to free buffer.
  759. * Do we need to disconnect such a client? (We don't send
  760. * back FC, so communication will be stuck anyway)
  761. */
  762. if (rb->buffer.size < ishtp_hdr->length + rb->buf_idx) {
  763. spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
  764. dev_err(&cl->device->dev,
  765. "message overflow. size %d len %d idx %ld\n",
  766. rb->buffer.size, ishtp_hdr->length,
  767. rb->buf_idx);
  768. list_del(&rb->list);
  769. ishtp_cl_io_rb_recycle(rb);
  770. cl->status = -EIO;
  771. goto eoi;
  772. }
  773. buffer = rb->buffer.data + rb->buf_idx;
  774. dev->ops->ishtp_read(dev, buffer, ishtp_hdr->length);
  775. rb->buf_idx += ishtp_hdr->length;
  776. if (ishtp_hdr->msg_complete) {
  777. /* Last fragment in message - it's complete */
  778. cl->status = 0;
  779. list_del(&rb->list);
  780. complete_rb = rb;
  781. --cl->out_flow_ctrl_creds;
  782. /*
  783. * the whole msg arrived, send a new FC, and add a new
  784. * rb buffer for the next coming msg
  785. */
  786. spin_lock(&cl->free_list_spinlock);
  787. if (!list_empty(&cl->free_rb_list.list)) {
  788. new_rb = list_entry(cl->free_rb_list.list.next,
  789. struct ishtp_cl_rb, list);
  790. list_del_init(&new_rb->list);
  791. spin_unlock(&cl->free_list_spinlock);
  792. new_rb->cl = cl;
  793. new_rb->buf_idx = 0;
  794. INIT_LIST_HEAD(&new_rb->list);
  795. list_add_tail(&new_rb->list,
  796. &dev->read_list.list);
  797. ishtp_hbm_cl_flow_control_req(dev, cl);
  798. } else {
  799. spin_unlock(&cl->free_list_spinlock);
  800. }
  801. }
  802. /* One more fragment in message (even if this was last) */
  803. ++cl->recv_msg_num_frags;
  804. /*
  805. * We can safely break here (and in BH too),
  806. * a single input message can go only to a single request!
  807. */
  808. break;
  809. }
  810. spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
  811. /* If it's nobody's message, just read and discard it */
  812. if (!buffer) {
  813. uint8_t rd_msg_buf[ISHTP_RD_MSG_BUF_SIZE];
  814. dev_err(dev->devc, "Dropped Rx msg - no request\n");
  815. dev->ops->ishtp_read(dev, rd_msg_buf, ishtp_hdr->length);
  816. goto eoi;
  817. }
  818. if (complete_rb) {
  819. cl = complete_rb->cl;
  820. cl->ts_rx = ktime_get();
  821. ++cl->recv_msg_cnt_ipc;
  822. ishtp_cl_read_complete(complete_rb);
  823. }
  824. eoi:
  825. return;
  826. }
  827. /**
  828. * recv_ishtp_cl_msg_dma() -Receive client message
  829. * @dev: ISHTP device instance
  830. * @msg: message pointer
  831. * @hbm: hbm buffer
  832. *
  833. * Receive and dispatch ISHTP client messages using DMA. This function executes
  834. * in ISR or work queue context
  835. */
  836. void recv_ishtp_cl_msg_dma(struct ishtp_device *dev, void *msg,
  837. struct dma_xfer_hbm *hbm)
  838. {
  839. struct ishtp_cl *cl;
  840. struct ishtp_cl_rb *rb;
  841. struct ishtp_cl_rb *new_rb;
  842. unsigned char *buffer = NULL;
  843. struct ishtp_cl_rb *complete_rb = NULL;
  844. unsigned long flags;
  845. spin_lock_irqsave(&dev->read_list_spinlock, flags);
  846. list_for_each_entry(rb, &dev->read_list.list, list) {
  847. cl = rb->cl;
  848. if (!cl || !(cl->host_client_id == hbm->host_client_id &&
  849. cl->fw_client_id == hbm->fw_client_id) ||
  850. !(cl->state == ISHTP_CL_CONNECTED))
  851. continue;
  852. /*
  853. * If no Rx buffer is allocated, disband the rb
  854. */
  855. if (rb->buffer.size == 0 || rb->buffer.data == NULL) {
  856. spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
  857. dev_err(&cl->device->dev,
  858. "response buffer is not allocated.\n");
  859. list_del(&rb->list);
  860. ishtp_io_rb_free(rb);
  861. cl->status = -ENOMEM;
  862. goto eoi;
  863. }
  864. /*
  865. * If message buffer overflown (exceeds max. client msg
  866. * size, drop message and return to free buffer.
  867. * Do we need to disconnect such a client? (We don't send
  868. * back FC, so communication will be stuck anyway)
  869. */
  870. if (rb->buffer.size < hbm->msg_length) {
  871. spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
  872. dev_err(&cl->device->dev,
  873. "message overflow. size %d len %d idx %ld\n",
  874. rb->buffer.size, hbm->msg_length, rb->buf_idx);
  875. list_del(&rb->list);
  876. ishtp_cl_io_rb_recycle(rb);
  877. cl->status = -EIO;
  878. goto eoi;
  879. }
  880. buffer = rb->buffer.data;
  881. /*
  882. * if current fw don't support cache snooping, driver have to
  883. * flush the cache manually.
  884. */
  885. if (dev->ops->dma_no_cache_snooping &&
  886. dev->ops->dma_no_cache_snooping(dev))
  887. clflush_cache_range(msg, hbm->msg_length);
  888. memcpy(buffer, msg, hbm->msg_length);
  889. rb->buf_idx = hbm->msg_length;
  890. /* Last fragment in message - it's complete */
  891. cl->status = 0;
  892. list_del(&rb->list);
  893. complete_rb = rb;
  894. --cl->out_flow_ctrl_creds;
  895. /*
  896. * the whole msg arrived, send a new FC, and add a new
  897. * rb buffer for the next coming msg
  898. */
  899. spin_lock(&cl->free_list_spinlock);
  900. if (!list_empty(&cl->free_rb_list.list)) {
  901. new_rb = list_entry(cl->free_rb_list.list.next,
  902. struct ishtp_cl_rb, list);
  903. list_del_init(&new_rb->list);
  904. spin_unlock(&cl->free_list_spinlock);
  905. new_rb->cl = cl;
  906. new_rb->buf_idx = 0;
  907. INIT_LIST_HEAD(&new_rb->list);
  908. list_add_tail(&new_rb->list,
  909. &dev->read_list.list);
  910. ishtp_hbm_cl_flow_control_req(dev, cl);
  911. } else {
  912. spin_unlock(&cl->free_list_spinlock);
  913. }
  914. /* One more fragment in message (this is always last) */
  915. ++cl->recv_msg_num_frags;
  916. /*
  917. * We can safely break here (and in BH too),
  918. * a single input message can go only to a single request!
  919. */
  920. break;
  921. }
  922. spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
  923. /* If it's nobody's message, just read and discard it */
  924. if (!buffer) {
  925. dev_err(dev->devc, "Dropped Rx (DMA) msg - no request\n");
  926. goto eoi;
  927. }
  928. if (complete_rb) {
  929. cl = complete_rb->cl;
  930. cl->ts_rx = ktime_get();
  931. ++cl->recv_msg_cnt_dma;
  932. ishtp_cl_read_complete(complete_rb);
  933. }
  934. eoi:
  935. return;
  936. }
  937. void *ishtp_get_client_data(struct ishtp_cl *cl)
  938. {
  939. return cl->client_data;
  940. }
  941. EXPORT_SYMBOL(ishtp_get_client_data);
  942. void ishtp_set_client_data(struct ishtp_cl *cl, void *data)
  943. {
  944. cl->client_data = data;
  945. }
  946. EXPORT_SYMBOL(ishtp_set_client_data);
  947. struct ishtp_device *ishtp_get_ishtp_device(struct ishtp_cl *cl)
  948. {
  949. return cl->dev;
  950. }
  951. EXPORT_SYMBOL(ishtp_get_ishtp_device);
  952. void ishtp_set_tx_ring_size(struct ishtp_cl *cl, int size)
  953. {
  954. cl->tx_ring_size = size;
  955. }
  956. EXPORT_SYMBOL(ishtp_set_tx_ring_size);
  957. void ishtp_set_rx_ring_size(struct ishtp_cl *cl, int size)
  958. {
  959. cl->rx_ring_size = size;
  960. }
  961. EXPORT_SYMBOL(ishtp_set_rx_ring_size);
  962. void ishtp_set_connection_state(struct ishtp_cl *cl, int state)
  963. {
  964. cl->state = state;
  965. }
  966. EXPORT_SYMBOL(ishtp_set_connection_state);
  967. void ishtp_cl_set_fw_client_id(struct ishtp_cl *cl, int fw_client_id)
  968. {
  969. cl->fw_client_id = fw_client_id;
  970. }
  971. EXPORT_SYMBOL(ishtp_cl_set_fw_client_id);