bcm_vk_msg.c 34 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright 2018-2020 Broadcom.
  4. */
  5. #include <linux/delay.h>
  6. #include <linux/fs.h>
  7. #include <linux/hash.h>
  8. #include <linux/interrupt.h>
  9. #include <linux/list.h>
  10. #include <linux/module.h>
  11. #include <linux/poll.h>
  12. #include <linux/sizes.h>
  13. #include <linux/spinlock.h>
  14. #include <linux/timer.h>
  15. #include "bcm_vk.h"
  16. #include "bcm_vk_msg.h"
  17. #include "bcm_vk_sg.h"
  18. /* functions to manipulate the transport id in msg block */
  19. #define BCM_VK_MSG_Q_SHIFT 4
  20. #define BCM_VK_MSG_Q_MASK 0xF
  21. #define BCM_VK_MSG_ID_MASK 0xFFF
  22. #define BCM_VK_DMA_DRAIN_MAX_MS 2000
  23. /* number x q_size will be the max number of msg processed per loop */
  24. #define BCM_VK_MSG_PROC_MAX_LOOP 2
  25. /* module parameter */
  26. static bool hb_mon = true;
  27. module_param(hb_mon, bool, 0444);
  28. MODULE_PARM_DESC(hb_mon, "Monitoring heartbeat continuously.\n");
  29. static int batch_log = 1;
  30. module_param(batch_log, int, 0444);
  31. MODULE_PARM_DESC(batch_log, "Max num of logs per batch operation.\n");
  32. static bool hb_mon_is_on(void)
  33. {
  34. return hb_mon;
  35. }
  36. static u32 get_q_num(const struct vk_msg_blk *msg)
  37. {
  38. u32 q_num = msg->trans_id & BCM_VK_MSG_Q_MASK;
  39. if (q_num >= VK_MSGQ_PER_CHAN_MAX)
  40. q_num = VK_MSGQ_NUM_DEFAULT;
  41. return q_num;
  42. }
  43. static void set_q_num(struct vk_msg_blk *msg, u32 q_num)
  44. {
  45. u32 trans_q;
  46. if (q_num >= VK_MSGQ_PER_CHAN_MAX)
  47. trans_q = VK_MSGQ_NUM_DEFAULT;
  48. else
  49. trans_q = q_num;
  50. msg->trans_id = (msg->trans_id & ~BCM_VK_MSG_Q_MASK) | trans_q;
  51. }
  52. static u32 get_msg_id(const struct vk_msg_blk *msg)
  53. {
  54. return ((msg->trans_id >> BCM_VK_MSG_Q_SHIFT) & BCM_VK_MSG_ID_MASK);
  55. }
  56. static void set_msg_id(struct vk_msg_blk *msg, u32 val)
  57. {
  58. msg->trans_id = (val << BCM_VK_MSG_Q_SHIFT) | get_q_num(msg);
  59. }
  60. static u32 msgq_inc(const struct bcm_vk_sync_qinfo *qinfo, u32 idx, u32 inc)
  61. {
  62. return ((idx + inc) & qinfo->q_mask);
  63. }
  64. static
  65. struct vk_msg_blk __iomem *msgq_blk_addr(const struct bcm_vk_sync_qinfo *qinfo,
  66. u32 idx)
  67. {
  68. return qinfo->q_start + (VK_MSGQ_BLK_SIZE * idx);
  69. }
  70. static u32 msgq_occupied(const struct bcm_vk_msgq __iomem *msgq,
  71. const struct bcm_vk_sync_qinfo *qinfo)
  72. {
  73. u32 wr_idx, rd_idx;
  74. wr_idx = readl_relaxed(&msgq->wr_idx);
  75. rd_idx = readl_relaxed(&msgq->rd_idx);
  76. return ((wr_idx - rd_idx) & qinfo->q_mask);
  77. }
  78. static
  79. u32 msgq_avail_space(const struct bcm_vk_msgq __iomem *msgq,
  80. const struct bcm_vk_sync_qinfo *qinfo)
  81. {
  82. return (qinfo->q_size - msgq_occupied(msgq, qinfo) - 1);
  83. }
  84. /* number of retries when enqueue message fails before returning EAGAIN */
  85. #define BCM_VK_H2VK_ENQ_RETRY 10
  86. #define BCM_VK_H2VK_ENQ_RETRY_DELAY_MS 50
  87. bool bcm_vk_drv_access_ok(struct bcm_vk *vk)
  88. {
  89. return (!!atomic_read(&vk->msgq_inited));
  90. }
  91. void bcm_vk_set_host_alert(struct bcm_vk *vk, u32 bit_mask)
  92. {
  93. struct bcm_vk_alert *alert = &vk->host_alert;
  94. unsigned long flags;
  95. /* use irqsave version as this maybe called inside timer interrupt */
  96. spin_lock_irqsave(&vk->host_alert_lock, flags);
  97. alert->notfs |= bit_mask;
  98. spin_unlock_irqrestore(&vk->host_alert_lock, flags);
  99. if (test_and_set_bit(BCM_VK_WQ_NOTF_PEND, vk->wq_offload) == 0)
  100. queue_work(vk->wq_thread, &vk->wq_work);
  101. }
  102. /*
  103. * Heartbeat related defines
  104. * The heartbeat from host is a last resort. If stuck condition happens
  105. * on the card, firmware is supposed to detect it. Therefore, the heartbeat
  106. * values used will be more relaxed on the driver, which need to be bigger
  107. * than the watchdog timeout on the card. The watchdog timeout on the card
  108. * is 20s, with a jitter of 2s => 22s. We use a value of 27s here.
  109. */
  110. #define BCM_VK_HB_TIMER_S 3
  111. #define BCM_VK_HB_TIMER_VALUE (BCM_VK_HB_TIMER_S * HZ)
  112. #define BCM_VK_HB_LOST_MAX (27 / BCM_VK_HB_TIMER_S)
  113. static void bcm_vk_hb_poll(struct timer_list *t)
  114. {
  115. u32 uptime_s;
  116. struct bcm_vk_hb_ctrl *hb = container_of(t, struct bcm_vk_hb_ctrl,
  117. timer);
  118. struct bcm_vk *vk = container_of(hb, struct bcm_vk, hb_ctrl);
  119. if (bcm_vk_drv_access_ok(vk) && hb_mon_is_on()) {
  120. /* read uptime from register and compare */
  121. uptime_s = vkread32(vk, BAR_0, BAR_OS_UPTIME);
  122. if (uptime_s == hb->last_uptime)
  123. hb->lost_cnt++;
  124. else /* reset to avoid accumulation */
  125. hb->lost_cnt = 0;
  126. dev_dbg(&vk->pdev->dev, "Last uptime %d current %d, lost %d\n",
  127. hb->last_uptime, uptime_s, hb->lost_cnt);
  128. /*
  129. * if the interface goes down without any activity, a value
  130. * of 0xFFFFFFFF will be continuously read, and the detection
  131. * will be happened eventually.
  132. */
  133. hb->last_uptime = uptime_s;
  134. } else {
  135. /* reset heart beat lost cnt */
  136. hb->lost_cnt = 0;
  137. }
  138. /* next, check if heartbeat exceeds limit */
  139. if (hb->lost_cnt > BCM_VK_HB_LOST_MAX) {
  140. dev_err(&vk->pdev->dev, "Heartbeat Misses %d times, %d s!\n",
  141. BCM_VK_HB_LOST_MAX,
  142. BCM_VK_HB_LOST_MAX * BCM_VK_HB_TIMER_S);
  143. bcm_vk_blk_drv_access(vk);
  144. bcm_vk_set_host_alert(vk, ERR_LOG_HOST_HB_FAIL);
  145. }
  146. /* re-arm timer */
  147. mod_timer(&hb->timer, jiffies + BCM_VK_HB_TIMER_VALUE);
  148. }
  149. void bcm_vk_hb_init(struct bcm_vk *vk)
  150. {
  151. struct bcm_vk_hb_ctrl *hb = &vk->hb_ctrl;
  152. timer_setup(&hb->timer, bcm_vk_hb_poll, 0);
  153. mod_timer(&hb->timer, jiffies + BCM_VK_HB_TIMER_VALUE);
  154. }
  155. void bcm_vk_hb_deinit(struct bcm_vk *vk)
  156. {
  157. struct bcm_vk_hb_ctrl *hb = &vk->hb_ctrl;
  158. del_timer(&hb->timer);
  159. }
  160. static void bcm_vk_msgid_bitmap_clear(struct bcm_vk *vk,
  161. unsigned int start,
  162. unsigned int nbits)
  163. {
  164. spin_lock(&vk->msg_id_lock);
  165. bitmap_clear(vk->bmap, start, nbits);
  166. spin_unlock(&vk->msg_id_lock);
  167. }
  168. /*
  169. * allocate a ctx per file struct
  170. */
  171. static struct bcm_vk_ctx *bcm_vk_get_ctx(struct bcm_vk *vk, const pid_t pid)
  172. {
  173. u32 i;
  174. struct bcm_vk_ctx *ctx = NULL;
  175. u32 hash_idx = hash_32(pid, VK_PID_HT_SHIFT_BIT);
  176. spin_lock(&vk->ctx_lock);
  177. /* check if it is in reset, if so, don't allow */
  178. if (vk->reset_pid) {
  179. dev_err(&vk->pdev->dev,
  180. "No context allowed during reset by pid %d\n",
  181. vk->reset_pid);
  182. goto in_reset_exit;
  183. }
  184. for (i = 0; i < ARRAY_SIZE(vk->ctx); i++) {
  185. if (!vk->ctx[i].in_use) {
  186. vk->ctx[i].in_use = true;
  187. ctx = &vk->ctx[i];
  188. break;
  189. }
  190. }
  191. if (!ctx) {
  192. dev_err(&vk->pdev->dev, "All context in use\n");
  193. goto all_in_use_exit;
  194. }
  195. /* set the pid and insert it to hash table */
  196. ctx->pid = pid;
  197. ctx->hash_idx = hash_idx;
  198. list_add_tail(&ctx->node, &vk->pid_ht[hash_idx].head);
  199. /* increase kref */
  200. kref_get(&vk->kref);
  201. /* clear counter */
  202. atomic_set(&ctx->pend_cnt, 0);
  203. atomic_set(&ctx->dma_cnt, 0);
  204. init_waitqueue_head(&ctx->rd_wq);
  205. all_in_use_exit:
  206. in_reset_exit:
  207. spin_unlock(&vk->ctx_lock);
  208. return ctx;
  209. }
  210. static u16 bcm_vk_get_msg_id(struct bcm_vk *vk)
  211. {
  212. u16 rc = VK_MSG_ID_OVERFLOW;
  213. u16 test_bit_count = 0;
  214. spin_lock(&vk->msg_id_lock);
  215. while (test_bit_count < (VK_MSG_ID_BITMAP_SIZE - 1)) {
  216. /*
  217. * first time come in this loop, msg_id will be 0
  218. * and the first one tested will be 1. We skip
  219. * VK_SIMPLEX_MSG_ID (0) for one way host2vk
  220. * communication
  221. */
  222. vk->msg_id++;
  223. if (vk->msg_id == VK_MSG_ID_BITMAP_SIZE)
  224. vk->msg_id = 1;
  225. if (test_bit(vk->msg_id, vk->bmap)) {
  226. test_bit_count++;
  227. continue;
  228. }
  229. rc = vk->msg_id;
  230. bitmap_set(vk->bmap, vk->msg_id, 1);
  231. break;
  232. }
  233. spin_unlock(&vk->msg_id_lock);
  234. return rc;
  235. }
  236. static int bcm_vk_free_ctx(struct bcm_vk *vk, struct bcm_vk_ctx *ctx)
  237. {
  238. u32 idx;
  239. u32 hash_idx;
  240. pid_t pid;
  241. struct bcm_vk_ctx *entry;
  242. int count = 0;
  243. if (!ctx) {
  244. dev_err(&vk->pdev->dev, "NULL context detected\n");
  245. return -EINVAL;
  246. }
  247. idx = ctx->idx;
  248. pid = ctx->pid;
  249. spin_lock(&vk->ctx_lock);
  250. if (!vk->ctx[idx].in_use) {
  251. dev_err(&vk->pdev->dev, "context[%d] not in use!\n", idx);
  252. } else {
  253. vk->ctx[idx].in_use = false;
  254. vk->ctx[idx].miscdev = NULL;
  255. /* Remove it from hash list and see if it is the last one. */
  256. list_del(&ctx->node);
  257. hash_idx = ctx->hash_idx;
  258. list_for_each_entry(entry, &vk->pid_ht[hash_idx].head, node) {
  259. if (entry->pid == pid)
  260. count++;
  261. }
  262. }
  263. spin_unlock(&vk->ctx_lock);
  264. return count;
  265. }
  266. static void bcm_vk_free_wkent(struct device *dev, struct bcm_vk_wkent *entry)
  267. {
  268. int proc_cnt;
  269. bcm_vk_sg_free(dev, entry->dma, VK_DMA_MAX_ADDRS, &proc_cnt);
  270. if (proc_cnt)
  271. atomic_dec(&entry->ctx->dma_cnt);
  272. kfree(entry->to_h_msg);
  273. kfree(entry);
  274. }
  275. static void bcm_vk_drain_all_pend(struct device *dev,
  276. struct bcm_vk_msg_chan *chan,
  277. struct bcm_vk_ctx *ctx)
  278. {
  279. u32 num;
  280. struct bcm_vk_wkent *entry, *tmp;
  281. struct bcm_vk *vk;
  282. struct list_head del_q;
  283. if (ctx)
  284. vk = container_of(ctx->miscdev, struct bcm_vk, miscdev);
  285. INIT_LIST_HEAD(&del_q);
  286. spin_lock(&chan->pendq_lock);
  287. for (num = 0; num < chan->q_nr; num++) {
  288. list_for_each_entry_safe(entry, tmp, &chan->pendq[num], node) {
  289. if ((!ctx) || (entry->ctx->idx == ctx->idx)) {
  290. list_move_tail(&entry->node, &del_q);
  291. }
  292. }
  293. }
  294. spin_unlock(&chan->pendq_lock);
  295. /* batch clean up */
  296. num = 0;
  297. list_for_each_entry_safe(entry, tmp, &del_q, node) {
  298. list_del(&entry->node);
  299. num++;
  300. if (ctx) {
  301. struct vk_msg_blk *msg;
  302. int bit_set;
  303. bool responded;
  304. u32 msg_id;
  305. /* if it is specific ctx, log for any stuck */
  306. msg = entry->to_v_msg;
  307. msg_id = get_msg_id(msg);
  308. bit_set = test_bit(msg_id, vk->bmap);
  309. responded = entry->to_h_msg ? true : false;
  310. if (num <= batch_log)
  311. dev_info(dev,
  312. "Drained: fid %u size %u msg 0x%x(seq-%x) ctx 0x%x[fd-%d] args:[0x%x 0x%x] resp %s, bmap %d\n",
  313. msg->function_id, msg->size,
  314. msg_id, entry->seq_num,
  315. msg->context_id, entry->ctx->idx,
  316. msg->cmd, msg->arg,
  317. responded ? "T" : "F", bit_set);
  318. if (responded)
  319. atomic_dec(&ctx->pend_cnt);
  320. else if (bit_set)
  321. bcm_vk_msgid_bitmap_clear(vk, msg_id, 1);
  322. }
  323. bcm_vk_free_wkent(dev, entry);
  324. }
  325. if (num && ctx)
  326. dev_info(dev, "Total drained items %d [fd-%d]\n",
  327. num, ctx->idx);
  328. }
  329. void bcm_vk_drain_msg_on_reset(struct bcm_vk *vk)
  330. {
  331. bcm_vk_drain_all_pend(&vk->pdev->dev, &vk->to_v_msg_chan, NULL);
  332. bcm_vk_drain_all_pend(&vk->pdev->dev, &vk->to_h_msg_chan, NULL);
  333. }
  334. /*
  335. * Function to sync up the messages queue info that is provided by BAR1
  336. */
  337. int bcm_vk_sync_msgq(struct bcm_vk *vk, bool force_sync)
  338. {
  339. struct bcm_vk_msgq __iomem *msgq;
  340. struct device *dev = &vk->pdev->dev;
  341. u32 msgq_off;
  342. u32 num_q;
  343. struct bcm_vk_msg_chan *chan_list[] = {&vk->to_v_msg_chan,
  344. &vk->to_h_msg_chan};
  345. struct bcm_vk_msg_chan *chan;
  346. int i, j;
  347. int ret = 0;
  348. /*
  349. * If the driver is loaded at startup where vk OS is not up yet,
  350. * the msgq-info may not be available until a later time. In
  351. * this case, we skip and the sync function is supposed to be
  352. * called again.
  353. */
  354. if (!bcm_vk_msgq_marker_valid(vk)) {
  355. dev_info(dev, "BAR1 msgq marker not initialized.\n");
  356. return -EAGAIN;
  357. }
  358. msgq_off = vkread32(vk, BAR_1, VK_BAR1_MSGQ_CTRL_OFF);
  359. /* each side is always half the total */
  360. num_q = vkread32(vk, BAR_1, VK_BAR1_MSGQ_NR) / 2;
  361. if (!num_q || (num_q > VK_MSGQ_PER_CHAN_MAX)) {
  362. dev_err(dev,
  363. "Advertised msgq %d error - max %d allowed\n",
  364. num_q, VK_MSGQ_PER_CHAN_MAX);
  365. return -EINVAL;
  366. }
  367. vk->to_v_msg_chan.q_nr = num_q;
  368. vk->to_h_msg_chan.q_nr = num_q;
  369. /* first msgq location */
  370. msgq = vk->bar[BAR_1] + msgq_off;
  371. /*
  372. * if this function is called when it is already inited,
  373. * something is wrong
  374. */
  375. if (bcm_vk_drv_access_ok(vk) && !force_sync) {
  376. dev_err(dev, "Msgq info already in sync\n");
  377. return -EPERM;
  378. }
  379. for (i = 0; i < ARRAY_SIZE(chan_list); i++) {
  380. chan = chan_list[i];
  381. memset(chan->sync_qinfo, 0, sizeof(chan->sync_qinfo));
  382. for (j = 0; j < num_q; j++) {
  383. struct bcm_vk_sync_qinfo *qinfo;
  384. u32 msgq_start;
  385. u32 msgq_size;
  386. u32 msgq_nxt;
  387. u32 msgq_db_offset, q_db_offset;
  388. chan->msgq[j] = msgq;
  389. msgq_start = readl_relaxed(&msgq->start);
  390. msgq_size = readl_relaxed(&msgq->size);
  391. msgq_nxt = readl_relaxed(&msgq->nxt);
  392. msgq_db_offset = readl_relaxed(&msgq->db_offset);
  393. q_db_offset = (msgq_db_offset & ((1 << DB_SHIFT) - 1));
  394. if (q_db_offset == (~msgq_db_offset >> DB_SHIFT))
  395. msgq_db_offset = q_db_offset;
  396. else
  397. /* fall back to default */
  398. msgq_db_offset = VK_BAR0_Q_DB_BASE(j);
  399. dev_info(dev,
  400. "MsgQ[%d] type %d num %d, @ 0x%x, db_offset 0x%x rd_idx %d wr_idx %d, size %d, nxt 0x%x\n",
  401. j,
  402. readw_relaxed(&msgq->type),
  403. readw_relaxed(&msgq->num),
  404. msgq_start,
  405. msgq_db_offset,
  406. readl_relaxed(&msgq->rd_idx),
  407. readl_relaxed(&msgq->wr_idx),
  408. msgq_size,
  409. msgq_nxt);
  410. qinfo = &chan->sync_qinfo[j];
  411. /* formulate and record static info */
  412. qinfo->q_start = vk->bar[BAR_1] + msgq_start;
  413. qinfo->q_size = msgq_size;
  414. /* set low threshold as 50% or 1/2 */
  415. qinfo->q_low = qinfo->q_size >> 1;
  416. qinfo->q_mask = qinfo->q_size - 1;
  417. qinfo->q_db_offset = msgq_db_offset;
  418. msgq++;
  419. }
  420. }
  421. atomic_set(&vk->msgq_inited, 1);
  422. return ret;
  423. }
  424. static int bcm_vk_msg_chan_init(struct bcm_vk_msg_chan *chan)
  425. {
  426. u32 i;
  427. mutex_init(&chan->msgq_mutex);
  428. spin_lock_init(&chan->pendq_lock);
  429. for (i = 0; i < VK_MSGQ_MAX_NR; i++)
  430. INIT_LIST_HEAD(&chan->pendq[i]);
  431. return 0;
  432. }
  433. static void bcm_vk_append_pendq(struct bcm_vk_msg_chan *chan, u16 q_num,
  434. struct bcm_vk_wkent *entry)
  435. {
  436. struct bcm_vk_ctx *ctx;
  437. spin_lock(&chan->pendq_lock);
  438. list_add_tail(&entry->node, &chan->pendq[q_num]);
  439. if (entry->to_h_msg) {
  440. ctx = entry->ctx;
  441. atomic_inc(&ctx->pend_cnt);
  442. wake_up_interruptible(&ctx->rd_wq);
  443. }
  444. spin_unlock(&chan->pendq_lock);
  445. }
  446. static u32 bcm_vk_append_ib_sgl(struct bcm_vk *vk,
  447. struct bcm_vk_wkent *entry,
  448. struct _vk_data *data,
  449. unsigned int num_planes)
  450. {
  451. unsigned int i;
  452. unsigned int item_cnt = 0;
  453. struct device *dev = &vk->pdev->dev;
  454. struct bcm_vk_msg_chan *chan = &vk->to_v_msg_chan;
  455. struct vk_msg_blk *msg = &entry->to_v_msg[0];
  456. struct bcm_vk_msgq __iomem *msgq;
  457. struct bcm_vk_sync_qinfo *qinfo;
  458. u32 ib_sgl_size = 0;
  459. u8 *buf = (u8 *)&entry->to_v_msg[entry->to_v_blks];
  460. u32 avail;
  461. u32 q_num;
  462. /* check if high watermark is hit, and if so, skip */
  463. q_num = get_q_num(msg);
  464. msgq = chan->msgq[q_num];
  465. qinfo = &chan->sync_qinfo[q_num];
  466. avail = msgq_avail_space(msgq, qinfo);
  467. if (avail < qinfo->q_low) {
  468. dev_dbg(dev, "Skip inserting inband SGL, [0x%x/0x%x]\n",
  469. avail, qinfo->q_size);
  470. return 0;
  471. }
  472. for (i = 0; i < num_planes; i++) {
  473. if (data[i].address &&
  474. (ib_sgl_size + data[i].size) <= vk->ib_sgl_size) {
  475. item_cnt++;
  476. memcpy(buf, entry->dma[i].sglist, data[i].size);
  477. ib_sgl_size += data[i].size;
  478. buf += data[i].size;
  479. }
  480. }
  481. dev_dbg(dev, "Num %u sgl items appended, size 0x%x, room 0x%x\n",
  482. item_cnt, ib_sgl_size, vk->ib_sgl_size);
  483. /* round up size */
  484. ib_sgl_size = (ib_sgl_size + VK_MSGQ_BLK_SIZE - 1)
  485. >> VK_MSGQ_BLK_SZ_SHIFT;
  486. return ib_sgl_size;
  487. }
  488. void bcm_to_v_q_doorbell(struct bcm_vk *vk, u32 q_num, u32 db_val)
  489. {
  490. struct bcm_vk_msg_chan *chan = &vk->to_v_msg_chan;
  491. struct bcm_vk_sync_qinfo *qinfo = &chan->sync_qinfo[q_num];
  492. vkwrite32(vk, db_val, BAR_0, qinfo->q_db_offset);
  493. }
  494. static int bcm_to_v_msg_enqueue(struct bcm_vk *vk, struct bcm_vk_wkent *entry)
  495. {
  496. static u32 seq_num;
  497. struct bcm_vk_msg_chan *chan = &vk->to_v_msg_chan;
  498. struct device *dev = &vk->pdev->dev;
  499. struct vk_msg_blk *src = &entry->to_v_msg[0];
  500. struct vk_msg_blk __iomem *dst;
  501. struct bcm_vk_msgq __iomem *msgq;
  502. struct bcm_vk_sync_qinfo *qinfo;
  503. u32 q_num = get_q_num(src);
  504. u32 wr_idx; /* local copy */
  505. u32 i;
  506. u32 avail;
  507. u32 retry;
  508. if (entry->to_v_blks != src->size + 1) {
  509. dev_err(dev, "number of blks %d not matching %d MsgId[0x%x]: func %d ctx 0x%x\n",
  510. entry->to_v_blks,
  511. src->size + 1,
  512. get_msg_id(src),
  513. src->function_id,
  514. src->context_id);
  515. return -EMSGSIZE;
  516. }
  517. msgq = chan->msgq[q_num];
  518. qinfo = &chan->sync_qinfo[q_num];
  519. mutex_lock(&chan->msgq_mutex);
  520. avail = msgq_avail_space(msgq, qinfo);
  521. /* if not enough space, return EAGAIN and let app handles it */
  522. retry = 0;
  523. while ((avail < entry->to_v_blks) &&
  524. (retry++ < BCM_VK_H2VK_ENQ_RETRY)) {
  525. mutex_unlock(&chan->msgq_mutex);
  526. msleep(BCM_VK_H2VK_ENQ_RETRY_DELAY_MS);
  527. mutex_lock(&chan->msgq_mutex);
  528. avail = msgq_avail_space(msgq, qinfo);
  529. }
  530. if (retry > BCM_VK_H2VK_ENQ_RETRY) {
  531. mutex_unlock(&chan->msgq_mutex);
  532. return -EAGAIN;
  533. }
  534. /* at this point, mutex is taken and there is enough space */
  535. entry->seq_num = seq_num++; /* update debug seq number */
  536. wr_idx = readl_relaxed(&msgq->wr_idx);
  537. if (wr_idx >= qinfo->q_size) {
  538. dev_crit(dev, "Invalid wr_idx 0x%x => max 0x%x!",
  539. wr_idx, qinfo->q_size);
  540. bcm_vk_blk_drv_access(vk);
  541. bcm_vk_set_host_alert(vk, ERR_LOG_HOST_PCIE_DWN);
  542. goto idx_err;
  543. }
  544. dst = msgq_blk_addr(qinfo, wr_idx);
  545. for (i = 0; i < entry->to_v_blks; i++) {
  546. memcpy_toio(dst, src, sizeof(*dst));
  547. src++;
  548. wr_idx = msgq_inc(qinfo, wr_idx, 1);
  549. dst = msgq_blk_addr(qinfo, wr_idx);
  550. }
  551. /* flush the write pointer */
  552. writel(wr_idx, &msgq->wr_idx);
  553. /* log new info for debugging */
  554. dev_dbg(dev,
  555. "MsgQ[%d] [Rd Wr] = [%d %d] blks inserted %d - Q = [u-%d a-%d]/%d\n",
  556. readl_relaxed(&msgq->num),
  557. readl_relaxed(&msgq->rd_idx),
  558. wr_idx,
  559. entry->to_v_blks,
  560. msgq_occupied(msgq, qinfo),
  561. msgq_avail_space(msgq, qinfo),
  562. readl_relaxed(&msgq->size));
  563. /*
  564. * press door bell based on queue number. 1 is added to the wr_idx
  565. * to avoid the value of 0 appearing on the VK side to distinguish
  566. * from initial value.
  567. */
  568. bcm_to_v_q_doorbell(vk, q_num, wr_idx + 1);
  569. idx_err:
  570. mutex_unlock(&chan->msgq_mutex);
  571. return 0;
  572. }
  573. int bcm_vk_send_shutdown_msg(struct bcm_vk *vk, u32 shut_type,
  574. const pid_t pid, const u32 q_num)
  575. {
  576. int rc = 0;
  577. struct bcm_vk_wkent *entry;
  578. struct device *dev = &vk->pdev->dev;
  579. /*
  580. * check if the marker is still good. Sometimes, the PCIe interface may
  581. * have gone done, and if so and we ship down thing based on broken
  582. * values, kernel may panic.
  583. */
  584. if (!bcm_vk_msgq_marker_valid(vk)) {
  585. dev_info(dev, "PCIe comm chan - invalid marker (0x%x)!\n",
  586. vkread32(vk, BAR_1, VK_BAR1_MSGQ_DEF_RDY));
  587. return -EINVAL;
  588. }
  589. entry = kzalloc(struct_size(entry, to_v_msg, 1), GFP_KERNEL);
  590. if (!entry)
  591. return -ENOMEM;
  592. /* fill up necessary data */
  593. entry->to_v_msg[0].function_id = VK_FID_SHUTDOWN;
  594. set_q_num(&entry->to_v_msg[0], q_num);
  595. set_msg_id(&entry->to_v_msg[0], VK_SIMPLEX_MSG_ID);
  596. entry->to_v_blks = 1; /* always 1 block */
  597. entry->to_v_msg[0].cmd = shut_type;
  598. entry->to_v_msg[0].arg = pid;
  599. rc = bcm_to_v_msg_enqueue(vk, entry);
  600. if (rc)
  601. dev_err(dev,
  602. "Sending shutdown message to q %d for pid %d fails.\n",
  603. get_q_num(&entry->to_v_msg[0]), pid);
  604. kfree(entry);
  605. return rc;
  606. }
  607. static int bcm_vk_handle_last_sess(struct bcm_vk *vk, const pid_t pid,
  608. const u32 q_num)
  609. {
  610. int rc = 0;
  611. struct device *dev = &vk->pdev->dev;
  612. /*
  613. * don't send down or do anything if message queue is not initialized
  614. * and if it is the reset session, clear it.
  615. */
  616. if (!bcm_vk_drv_access_ok(vk)) {
  617. if (vk->reset_pid == pid)
  618. vk->reset_pid = 0;
  619. return -EPERM;
  620. }
  621. dev_dbg(dev, "No more sessions, shut down pid %d\n", pid);
  622. /* only need to do it if it is not the reset process */
  623. if (vk->reset_pid != pid)
  624. rc = bcm_vk_send_shutdown_msg(vk, VK_SHUTDOWN_PID, pid, q_num);
  625. else
  626. /* put reset_pid to 0 if it is exiting last session */
  627. vk->reset_pid = 0;
  628. return rc;
  629. }
  630. static struct bcm_vk_wkent *bcm_vk_dequeue_pending(struct bcm_vk *vk,
  631. struct bcm_vk_msg_chan *chan,
  632. u16 q_num,
  633. u16 msg_id)
  634. {
  635. struct bcm_vk_wkent *entry = NULL, *iter;
  636. spin_lock(&chan->pendq_lock);
  637. list_for_each_entry(iter, &chan->pendq[q_num], node) {
  638. if (get_msg_id(&iter->to_v_msg[0]) == msg_id) {
  639. list_del(&iter->node);
  640. entry = iter;
  641. bcm_vk_msgid_bitmap_clear(vk, msg_id, 1);
  642. break;
  643. }
  644. }
  645. spin_unlock(&chan->pendq_lock);
  646. return entry;
  647. }
  648. s32 bcm_to_h_msg_dequeue(struct bcm_vk *vk)
  649. {
  650. struct device *dev = &vk->pdev->dev;
  651. struct bcm_vk_msg_chan *chan = &vk->to_h_msg_chan;
  652. struct vk_msg_blk *data;
  653. struct vk_msg_blk __iomem *src;
  654. struct vk_msg_blk *dst;
  655. struct bcm_vk_msgq __iomem *msgq;
  656. struct bcm_vk_sync_qinfo *qinfo;
  657. struct bcm_vk_wkent *entry;
  658. u32 rd_idx, wr_idx;
  659. u32 q_num, msg_id, j;
  660. u32 num_blks;
  661. s32 total = 0;
  662. int cnt = 0;
  663. int msg_processed = 0;
  664. int max_msg_to_process;
  665. bool exit_loop;
  666. /*
  667. * drain all the messages from the queues, and find its pending
  668. * entry in the to_v queue, based on msg_id & q_num, and move the
  669. * entry to the to_h pending queue, waiting for user space
  670. * program to extract
  671. */
  672. mutex_lock(&chan->msgq_mutex);
  673. for (q_num = 0; q_num < chan->q_nr; q_num++) {
  674. msgq = chan->msgq[q_num];
  675. qinfo = &chan->sync_qinfo[q_num];
  676. max_msg_to_process = BCM_VK_MSG_PROC_MAX_LOOP * qinfo->q_size;
  677. rd_idx = readl_relaxed(&msgq->rd_idx);
  678. wr_idx = readl_relaxed(&msgq->wr_idx);
  679. msg_processed = 0;
  680. exit_loop = false;
  681. while ((rd_idx != wr_idx) && !exit_loop) {
  682. u8 src_size;
  683. /*
  684. * Make a local copy and get pointer to src blk
  685. * The rd_idx is masked before getting the pointer to
  686. * avoid out of bound access in case the interface goes
  687. * down. It will end up pointing to the last block in
  688. * the buffer, but subsequent src->size check would be
  689. * able to catch this.
  690. */
  691. src = msgq_blk_addr(qinfo, rd_idx & qinfo->q_mask);
  692. src_size = readb(&src->size);
  693. if ((rd_idx >= qinfo->q_size) ||
  694. (src_size > (qinfo->q_size - 1))) {
  695. dev_crit(dev,
  696. "Invalid rd_idx 0x%x or size 0x%x => max 0x%x!",
  697. rd_idx, src_size, qinfo->q_size);
  698. bcm_vk_blk_drv_access(vk);
  699. bcm_vk_set_host_alert(vk,
  700. ERR_LOG_HOST_PCIE_DWN);
  701. goto idx_err;
  702. }
  703. num_blks = src_size + 1;
  704. data = kzalloc(num_blks * VK_MSGQ_BLK_SIZE, GFP_KERNEL);
  705. if (data) {
  706. /* copy messages and linearize it */
  707. dst = data;
  708. for (j = 0; j < num_blks; j++) {
  709. memcpy_fromio(dst, src, sizeof(*dst));
  710. dst++;
  711. rd_idx = msgq_inc(qinfo, rd_idx, 1);
  712. src = msgq_blk_addr(qinfo, rd_idx);
  713. }
  714. total++;
  715. } else {
  716. /*
  717. * if we could not allocate memory in kernel,
  718. * that is fatal.
  719. */
  720. dev_crit(dev, "Kernel mem allocation failure.\n");
  721. total = -ENOMEM;
  722. goto idx_err;
  723. }
  724. /* flush rd pointer after a message is dequeued */
  725. writel(rd_idx, &msgq->rd_idx);
  726. /* log new info for debugging */
  727. dev_dbg(dev,
  728. "MsgQ[%d] [Rd Wr] = [%d %d] blks extracted %d - Q = [u-%d a-%d]/%d\n",
  729. readl_relaxed(&msgq->num),
  730. rd_idx,
  731. wr_idx,
  732. num_blks,
  733. msgq_occupied(msgq, qinfo),
  734. msgq_avail_space(msgq, qinfo),
  735. readl_relaxed(&msgq->size));
  736. /*
  737. * No need to search if it is an autonomous one-way
  738. * message from driver, as these messages do not bear
  739. * a to_v pending item. Currently, only the shutdown
  740. * message falls into this category.
  741. */
  742. if (data->function_id == VK_FID_SHUTDOWN) {
  743. kfree(data);
  744. continue;
  745. }
  746. msg_id = get_msg_id(data);
  747. /* lookup original message in to_v direction */
  748. entry = bcm_vk_dequeue_pending(vk,
  749. &vk->to_v_msg_chan,
  750. q_num,
  751. msg_id);
  752. /*
  753. * if there is message to does not have prior send,
  754. * this is the location to add here
  755. */
  756. if (entry) {
  757. entry->to_h_blks = num_blks;
  758. entry->to_h_msg = data;
  759. bcm_vk_append_pendq(&vk->to_h_msg_chan,
  760. q_num, entry);
  761. } else {
  762. if (cnt++ < batch_log)
  763. dev_info(dev,
  764. "Could not find MsgId[0x%x] for resp func %d bmap %d\n",
  765. msg_id, data->function_id,
  766. test_bit(msg_id, vk->bmap));
  767. kfree(data);
  768. }
  769. /* Fetch wr_idx to handle more back-to-back events */
  770. wr_idx = readl(&msgq->wr_idx);
  771. /*
  772. * cap the max so that even we try to handle more back-to-back events,
  773. * so that it won't hold CPU too long or in case rd/wr idexes are
  774. * corrupted which triggers infinite looping.
  775. */
  776. if (++msg_processed >= max_msg_to_process) {
  777. dev_warn(dev, "Q[%d] Per loop processing exceeds %d\n",
  778. q_num, max_msg_to_process);
  779. exit_loop = true;
  780. }
  781. }
  782. }
  783. idx_err:
  784. mutex_unlock(&chan->msgq_mutex);
  785. dev_dbg(dev, "total %d drained from queues\n", total);
  786. return total;
  787. }
  788. /*
  789. * init routine for all required data structures
  790. */
  791. static int bcm_vk_data_init(struct bcm_vk *vk)
  792. {
  793. int i;
  794. spin_lock_init(&vk->ctx_lock);
  795. for (i = 0; i < ARRAY_SIZE(vk->ctx); i++) {
  796. vk->ctx[i].in_use = false;
  797. vk->ctx[i].idx = i; /* self identity */
  798. vk->ctx[i].miscdev = NULL;
  799. }
  800. spin_lock_init(&vk->msg_id_lock);
  801. spin_lock_init(&vk->host_alert_lock);
  802. vk->msg_id = 0;
  803. /* initialize hash table */
  804. for (i = 0; i < VK_PID_HT_SZ; i++)
  805. INIT_LIST_HEAD(&vk->pid_ht[i].head);
  806. return 0;
  807. }
  808. irqreturn_t bcm_vk_msgq_irqhandler(int irq, void *dev_id)
  809. {
  810. struct bcm_vk *vk = dev_id;
  811. if (!bcm_vk_drv_access_ok(vk)) {
  812. dev_err(&vk->pdev->dev,
  813. "Interrupt %d received when msgq not inited\n", irq);
  814. goto skip_schedule_work;
  815. }
  816. queue_work(vk->wq_thread, &vk->wq_work);
  817. skip_schedule_work:
  818. return IRQ_HANDLED;
  819. }
  820. int bcm_vk_open(struct inode *inode, struct file *p_file)
  821. {
  822. struct bcm_vk_ctx *ctx;
  823. struct miscdevice *miscdev = (struct miscdevice *)p_file->private_data;
  824. struct bcm_vk *vk = container_of(miscdev, struct bcm_vk, miscdev);
  825. struct device *dev = &vk->pdev->dev;
  826. int rc = 0;
  827. /* get a context and set it up for file */
  828. ctx = bcm_vk_get_ctx(vk, task_tgid_nr(current));
  829. if (!ctx) {
  830. dev_err(dev, "Error allocating context\n");
  831. rc = -ENOMEM;
  832. } else {
  833. /*
  834. * set up context and replace private data with context for
  835. * other methods to use. Reason for the context is because
  836. * it is allowed for multiple sessions to open the sysfs, and
  837. * for each file open, when upper layer query the response,
  838. * only those that are tied to a specific open should be
  839. * returned. The context->idx will be used for such binding
  840. */
  841. ctx->miscdev = miscdev;
  842. p_file->private_data = ctx;
  843. dev_dbg(dev, "ctx_returned with idx %d, pid %d\n",
  844. ctx->idx, ctx->pid);
  845. }
  846. return rc;
  847. }
  848. ssize_t bcm_vk_read(struct file *p_file,
  849. char __user *buf,
  850. size_t count,
  851. loff_t *f_pos)
  852. {
  853. ssize_t rc = -ENOMSG;
  854. struct bcm_vk_ctx *ctx = p_file->private_data;
  855. struct bcm_vk *vk = container_of(ctx->miscdev, struct bcm_vk,
  856. miscdev);
  857. struct device *dev = &vk->pdev->dev;
  858. struct bcm_vk_msg_chan *chan = &vk->to_h_msg_chan;
  859. struct bcm_vk_wkent *entry = NULL, *iter;
  860. u32 q_num;
  861. u32 rsp_length;
  862. if (!bcm_vk_drv_access_ok(vk))
  863. return -EPERM;
  864. dev_dbg(dev, "Buf count %zu\n", count);
  865. /*
  866. * search through the pendq on the to_h chan, and return only those
  867. * that belongs to the same context. Search is always from the high to
  868. * the low priority queues
  869. */
  870. spin_lock(&chan->pendq_lock);
  871. for (q_num = 0; q_num < chan->q_nr; q_num++) {
  872. list_for_each_entry(iter, &chan->pendq[q_num], node) {
  873. if (iter->ctx->idx == ctx->idx) {
  874. if (count >=
  875. (iter->to_h_blks * VK_MSGQ_BLK_SIZE)) {
  876. list_del(&iter->node);
  877. atomic_dec(&ctx->pend_cnt);
  878. entry = iter;
  879. } else {
  880. /* buffer not big enough */
  881. rc = -EMSGSIZE;
  882. }
  883. goto read_loop_exit;
  884. }
  885. }
  886. }
  887. read_loop_exit:
  888. spin_unlock(&chan->pendq_lock);
  889. if (entry) {
  890. /* retrieve the passed down msg_id */
  891. set_msg_id(&entry->to_h_msg[0], entry->usr_msg_id);
  892. rsp_length = entry->to_h_blks * VK_MSGQ_BLK_SIZE;
  893. if (copy_to_user(buf, entry->to_h_msg, rsp_length) == 0)
  894. rc = rsp_length;
  895. bcm_vk_free_wkent(dev, entry);
  896. } else if (rc == -EMSGSIZE) {
  897. struct vk_msg_blk tmp_msg = entry->to_h_msg[0];
  898. /*
  899. * in this case, return just the first block, so
  900. * that app knows what size it is looking for.
  901. */
  902. set_msg_id(&tmp_msg, entry->usr_msg_id);
  903. tmp_msg.size = entry->to_h_blks - 1;
  904. if (copy_to_user(buf, &tmp_msg, VK_MSGQ_BLK_SIZE) != 0) {
  905. dev_err(dev, "Error return 1st block in -EMSGSIZE\n");
  906. rc = -EFAULT;
  907. }
  908. }
  909. return rc;
  910. }
  911. ssize_t bcm_vk_write(struct file *p_file,
  912. const char __user *buf,
  913. size_t count,
  914. loff_t *f_pos)
  915. {
  916. ssize_t rc;
  917. struct bcm_vk_ctx *ctx = p_file->private_data;
  918. struct bcm_vk *vk = container_of(ctx->miscdev, struct bcm_vk,
  919. miscdev);
  920. struct bcm_vk_msgq __iomem *msgq;
  921. struct device *dev = &vk->pdev->dev;
  922. struct bcm_vk_wkent *entry;
  923. u32 sgl_extra_blks;
  924. u32 q_num;
  925. u32 msg_size;
  926. u32 msgq_size;
  927. if (!bcm_vk_drv_access_ok(vk))
  928. return -EPERM;
  929. dev_dbg(dev, "Msg count %zu\n", count);
  930. /* first, do sanity check where count should be multiple of basic blk */
  931. if (count & (VK_MSGQ_BLK_SIZE - 1)) {
  932. dev_err(dev, "Failure with size %zu not multiple of %zu\n",
  933. count, VK_MSGQ_BLK_SIZE);
  934. rc = -EINVAL;
  935. goto write_err;
  936. }
  937. /* allocate the work entry + buffer for size count and inband sgl */
  938. entry = kzalloc(sizeof(*entry) + count + vk->ib_sgl_size,
  939. GFP_KERNEL);
  940. if (!entry) {
  941. rc = -ENOMEM;
  942. goto write_err;
  943. }
  944. /* now copy msg from user space, and then formulate the work entry */
  945. if (copy_from_user(&entry->to_v_msg[0], buf, count)) {
  946. rc = -EFAULT;
  947. goto write_free_ent;
  948. }
  949. entry->to_v_blks = count >> VK_MSGQ_BLK_SZ_SHIFT;
  950. entry->ctx = ctx;
  951. /* do a check on the blk size which could not exceed queue space */
  952. q_num = get_q_num(&entry->to_v_msg[0]);
  953. msgq = vk->to_v_msg_chan.msgq[q_num];
  954. msgq_size = readl_relaxed(&msgq->size);
  955. if (entry->to_v_blks + (vk->ib_sgl_size >> VK_MSGQ_BLK_SZ_SHIFT)
  956. > (msgq_size - 1)) {
  957. dev_err(dev, "Blk size %d exceed max queue size allowed %d\n",
  958. entry->to_v_blks, msgq_size - 1);
  959. rc = -EINVAL;
  960. goto write_free_ent;
  961. }
  962. /* Use internal message id */
  963. entry->usr_msg_id = get_msg_id(&entry->to_v_msg[0]);
  964. rc = bcm_vk_get_msg_id(vk);
  965. if (rc == VK_MSG_ID_OVERFLOW) {
  966. dev_err(dev, "msg_id overflow\n");
  967. rc = -EOVERFLOW;
  968. goto write_free_ent;
  969. }
  970. set_msg_id(&entry->to_v_msg[0], rc);
  971. ctx->q_num = q_num;
  972. dev_dbg(dev,
  973. "[Q-%d]Message ctx id %d, usr_msg_id 0x%x sent msg_id 0x%x\n",
  974. ctx->q_num, ctx->idx, entry->usr_msg_id,
  975. get_msg_id(&entry->to_v_msg[0]));
  976. if (entry->to_v_msg[0].function_id == VK_FID_TRANS_BUF) {
  977. /* Convert any pointers to sg list */
  978. unsigned int num_planes;
  979. int dir;
  980. struct _vk_data *data;
  981. /*
  982. * check if we are in reset, if so, no buffer transfer is
  983. * allowed and return error.
  984. */
  985. if (vk->reset_pid) {
  986. dev_dbg(dev, "No Transfer allowed during reset, pid %d.\n",
  987. ctx->pid);
  988. rc = -EACCES;
  989. goto write_free_msgid;
  990. }
  991. num_planes = entry->to_v_msg[0].cmd & VK_CMD_PLANES_MASK;
  992. if ((entry->to_v_msg[0].cmd & VK_CMD_MASK) == VK_CMD_DOWNLOAD)
  993. dir = DMA_FROM_DEVICE;
  994. else
  995. dir = DMA_TO_DEVICE;
  996. /* Calculate vk_data location */
  997. /* Go to end of the message */
  998. msg_size = entry->to_v_msg[0].size;
  999. if (msg_size > entry->to_v_blks) {
  1000. rc = -EMSGSIZE;
  1001. goto write_free_msgid;
  1002. }
  1003. data = (struct _vk_data *)&entry->to_v_msg[msg_size + 1];
  1004. /* Now back up to the start of the pointers */
  1005. data -= num_planes;
  1006. /* Convert user addresses to DMA SG List */
  1007. rc = bcm_vk_sg_alloc(dev, entry->dma, dir, data, num_planes);
  1008. if (rc)
  1009. goto write_free_msgid;
  1010. atomic_inc(&ctx->dma_cnt);
  1011. /* try to embed inband sgl */
  1012. sgl_extra_blks = bcm_vk_append_ib_sgl(vk, entry, data,
  1013. num_planes);
  1014. entry->to_v_blks += sgl_extra_blks;
  1015. entry->to_v_msg[0].size += sgl_extra_blks;
  1016. } else if (entry->to_v_msg[0].function_id == VK_FID_INIT &&
  1017. entry->to_v_msg[0].context_id == VK_NEW_CTX) {
  1018. /*
  1019. * Init happens in 2 stages, only the first stage contains the
  1020. * pid that needs translating.
  1021. */
  1022. pid_t org_pid, pid;
  1023. /*
  1024. * translate the pid into the unique host space as user
  1025. * may run sessions inside containers or process
  1026. * namespaces.
  1027. */
  1028. #define VK_MSG_PID_MASK 0xffffff00
  1029. #define VK_MSG_PID_SH 8
  1030. org_pid = (entry->to_v_msg[0].arg & VK_MSG_PID_MASK)
  1031. >> VK_MSG_PID_SH;
  1032. pid = task_tgid_nr(current);
  1033. entry->to_v_msg[0].arg =
  1034. (entry->to_v_msg[0].arg & ~VK_MSG_PID_MASK) |
  1035. (pid << VK_MSG_PID_SH);
  1036. if (org_pid != pid)
  1037. dev_dbg(dev, "In PID 0x%x(%d), converted PID 0x%x(%d)\n",
  1038. org_pid, org_pid, pid, pid);
  1039. }
  1040. /*
  1041. * store work entry to pending queue until a response is received.
  1042. * This needs to be done before enqueuing the message
  1043. */
  1044. bcm_vk_append_pendq(&vk->to_v_msg_chan, q_num, entry);
  1045. rc = bcm_to_v_msg_enqueue(vk, entry);
  1046. if (rc) {
  1047. dev_err(dev, "Fail to enqueue msg to to_v queue\n");
  1048. /* remove message from pending list */
  1049. entry = bcm_vk_dequeue_pending
  1050. (vk,
  1051. &vk->to_v_msg_chan,
  1052. q_num,
  1053. get_msg_id(&entry->to_v_msg[0]));
  1054. goto write_free_ent;
  1055. }
  1056. return count;
  1057. write_free_msgid:
  1058. bcm_vk_msgid_bitmap_clear(vk, get_msg_id(&entry->to_v_msg[0]), 1);
  1059. write_free_ent:
  1060. kfree(entry);
  1061. write_err:
  1062. return rc;
  1063. }
  1064. __poll_t bcm_vk_poll(struct file *p_file, struct poll_table_struct *wait)
  1065. {
  1066. __poll_t ret = 0;
  1067. int cnt;
  1068. struct bcm_vk_ctx *ctx = p_file->private_data;
  1069. struct bcm_vk *vk = container_of(ctx->miscdev, struct bcm_vk, miscdev);
  1070. struct device *dev = &vk->pdev->dev;
  1071. poll_wait(p_file, &ctx->rd_wq, wait);
  1072. cnt = atomic_read(&ctx->pend_cnt);
  1073. if (cnt) {
  1074. ret = (__force __poll_t)(POLLIN | POLLRDNORM);
  1075. if (cnt < 0) {
  1076. dev_err(dev, "Error cnt %d, setting back to 0", cnt);
  1077. atomic_set(&ctx->pend_cnt, 0);
  1078. }
  1079. }
  1080. return ret;
  1081. }
  1082. int bcm_vk_release(struct inode *inode, struct file *p_file)
  1083. {
  1084. int ret;
  1085. struct bcm_vk_ctx *ctx = p_file->private_data;
  1086. struct bcm_vk *vk = container_of(ctx->miscdev, struct bcm_vk, miscdev);
  1087. struct device *dev = &vk->pdev->dev;
  1088. pid_t pid = ctx->pid;
  1089. int dma_cnt;
  1090. unsigned long timeout, start_time;
  1091. /*
  1092. * if there are outstanding DMA transactions, need to delay long enough
  1093. * to ensure that the card side would have stopped touching the host buffer
  1094. * and its SGL list. A race condition could happen if the host app is killed
  1095. * abruptly, eg kill -9, while some DMA transfer orders are still inflight.
  1096. * Nothing could be done except for a delay as host side is running in a
  1097. * completely async fashion.
  1098. */
  1099. start_time = jiffies;
  1100. timeout = start_time + msecs_to_jiffies(BCM_VK_DMA_DRAIN_MAX_MS);
  1101. do {
  1102. if (time_after(jiffies, timeout)) {
  1103. dev_warn(dev, "%d dma still pending for [fd-%d] pid %d\n",
  1104. dma_cnt, ctx->idx, pid);
  1105. break;
  1106. }
  1107. dma_cnt = atomic_read(&ctx->dma_cnt);
  1108. cpu_relax();
  1109. cond_resched();
  1110. } while (dma_cnt);
  1111. dev_dbg(dev, "Draining for [fd-%d] pid %d - delay %d ms\n",
  1112. ctx->idx, pid, jiffies_to_msecs(jiffies - start_time));
  1113. bcm_vk_drain_all_pend(&vk->pdev->dev, &vk->to_v_msg_chan, ctx);
  1114. bcm_vk_drain_all_pend(&vk->pdev->dev, &vk->to_h_msg_chan, ctx);
  1115. ret = bcm_vk_free_ctx(vk, ctx);
  1116. if (ret == 0)
  1117. ret = bcm_vk_handle_last_sess(vk, pid, ctx->q_num);
  1118. else
  1119. ret = 0;
  1120. kref_put(&vk->kref, bcm_vk_release_data);
  1121. return ret;
  1122. }
  1123. int bcm_vk_msg_init(struct bcm_vk *vk)
  1124. {
  1125. struct device *dev = &vk->pdev->dev;
  1126. int ret;
  1127. if (bcm_vk_data_init(vk)) {
  1128. dev_err(dev, "Error initializing internal data structures\n");
  1129. return -EINVAL;
  1130. }
  1131. if (bcm_vk_msg_chan_init(&vk->to_v_msg_chan) ||
  1132. bcm_vk_msg_chan_init(&vk->to_h_msg_chan)) {
  1133. dev_err(dev, "Error initializing communication channel\n");
  1134. return -EIO;
  1135. }
  1136. /* read msgq info if ready */
  1137. ret = bcm_vk_sync_msgq(vk, false);
  1138. if (ret && (ret != -EAGAIN)) {
  1139. dev_err(dev, "Error reading comm msg Q info\n");
  1140. return -EIO;
  1141. }
  1142. return 0;
  1143. }
  1144. void bcm_vk_msg_remove(struct bcm_vk *vk)
  1145. {
  1146. bcm_vk_blk_drv_access(vk);
  1147. /* drain all pending items */
  1148. bcm_vk_drain_all_pend(&vk->pdev->dev, &vk->to_v_msg_chan, NULL);
  1149. bcm_vk_drain_all_pend(&vk->pdev->dev, &vk->to_h_msg_chan, NULL);
  1150. }