cmt_speech.c 33 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * cmt_speech.c - HSI CMT speech driver
  4. *
  5. * Copyright (C) 2008,2009,2010 Nokia Corporation. All rights reserved.
  6. *
  7. * Contact: Kai Vehmanen <[email protected]>
  8. * Original author: Peter Ujfalusi <[email protected]>
  9. */
  10. #include <linux/errno.h>
  11. #include <linux/module.h>
  12. #include <linux/types.h>
  13. #include <linux/init.h>
  14. #include <linux/device.h>
  15. #include <linux/miscdevice.h>
  16. #include <linux/mm.h>
  17. #include <linux/slab.h>
  18. #include <linux/fs.h>
  19. #include <linux/poll.h>
  20. #include <linux/sched/signal.h>
  21. #include <linux/ioctl.h>
  22. #include <linux/uaccess.h>
  23. #include <linux/pm_qos.h>
  24. #include <linux/hsi/hsi.h>
  25. #include <linux/hsi/ssi_protocol.h>
  26. #include <linux/hsi/cs-protocol.h>
  27. #define CS_MMAP_SIZE PAGE_SIZE
  28. struct char_queue {
  29. struct list_head list;
  30. u32 msg;
  31. };
  32. struct cs_char {
  33. unsigned int opened;
  34. struct hsi_client *cl;
  35. struct cs_hsi_iface *hi;
  36. struct list_head chardev_queue;
  37. struct list_head dataind_queue;
  38. int dataind_pending;
  39. /* mmap things */
  40. unsigned long mmap_base;
  41. unsigned long mmap_size;
  42. spinlock_t lock;
  43. struct fasync_struct *async_queue;
  44. wait_queue_head_t wait;
  45. /* hsi channel ids */
  46. int channel_id_cmd;
  47. int channel_id_data;
  48. };
  49. #define SSI_CHANNEL_STATE_READING 1
  50. #define SSI_CHANNEL_STATE_WRITING (1 << 1)
  51. #define SSI_CHANNEL_STATE_POLL (1 << 2)
  52. #define SSI_CHANNEL_STATE_ERROR (1 << 3)
  53. #define TARGET_MASK 0xf000000
  54. #define TARGET_REMOTE (1 << CS_DOMAIN_SHIFT)
  55. #define TARGET_LOCAL 0
  56. /* Number of pre-allocated commands buffers */
  57. #define CS_MAX_CMDS 4
  58. /*
  59. * During data transfers, transactions must be handled
  60. * within 20ms (fixed value in cmtspeech HSI protocol)
  61. */
  62. #define CS_QOS_LATENCY_FOR_DATA_USEC 20000
  63. /* Timeout to wait for pending HSI transfers to complete */
  64. #define CS_HSI_TRANSFER_TIMEOUT_MS 500
  65. #define RX_PTR_BOUNDARY_SHIFT 8
  66. #define RX_PTR_MAX_SHIFT (RX_PTR_BOUNDARY_SHIFT + \
  67. CS_MAX_BUFFERS_SHIFT)
  68. struct cs_hsi_iface {
  69. struct hsi_client *cl;
  70. struct hsi_client *master;
  71. unsigned int iface_state;
  72. unsigned int wakeline_state;
  73. unsigned int control_state;
  74. unsigned int data_state;
  75. /* state exposed to application */
  76. struct cs_mmap_config_block *mmap_cfg;
  77. unsigned long mmap_base;
  78. unsigned long mmap_size;
  79. unsigned int rx_slot;
  80. unsigned int tx_slot;
  81. /* note: for security reasons, we do not trust the contents of
  82. * mmap_cfg, but instead duplicate the variables here */
  83. unsigned int buf_size;
  84. unsigned int rx_bufs;
  85. unsigned int tx_bufs;
  86. unsigned int rx_ptr_boundary;
  87. unsigned int rx_offsets[CS_MAX_BUFFERS];
  88. unsigned int tx_offsets[CS_MAX_BUFFERS];
  89. /* size of aligned memory blocks */
  90. unsigned int slot_size;
  91. unsigned int flags;
  92. struct list_head cmdqueue;
  93. struct hsi_msg *data_rx_msg;
  94. struct hsi_msg *data_tx_msg;
  95. wait_queue_head_t datawait;
  96. struct pm_qos_request pm_qos_req;
  97. spinlock_t lock;
  98. };
  99. static struct cs_char cs_char_data;
  100. static void cs_hsi_read_on_control(struct cs_hsi_iface *hi);
  101. static void cs_hsi_read_on_data(struct cs_hsi_iface *hi);
  102. static inline void rx_ptr_shift_too_big(void)
  103. {
  104. BUILD_BUG_ON((1LLU << RX_PTR_MAX_SHIFT) > UINT_MAX);
  105. }
  106. static void cs_notify(u32 message, struct list_head *head)
  107. {
  108. struct char_queue *entry;
  109. spin_lock(&cs_char_data.lock);
  110. if (!cs_char_data.opened) {
  111. spin_unlock(&cs_char_data.lock);
  112. goto out;
  113. }
  114. entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
  115. if (!entry) {
  116. dev_err(&cs_char_data.cl->device,
  117. "Can't allocate new entry for the queue.\n");
  118. spin_unlock(&cs_char_data.lock);
  119. goto out;
  120. }
  121. entry->msg = message;
  122. list_add_tail(&entry->list, head);
  123. spin_unlock(&cs_char_data.lock);
  124. wake_up_interruptible(&cs_char_data.wait);
  125. kill_fasync(&cs_char_data.async_queue, SIGIO, POLL_IN);
  126. out:
  127. return;
  128. }
  129. static u32 cs_pop_entry(struct list_head *head)
  130. {
  131. struct char_queue *entry;
  132. u32 data;
  133. entry = list_entry(head->next, struct char_queue, list);
  134. data = entry->msg;
  135. list_del(&entry->list);
  136. kfree(entry);
  137. return data;
  138. }
  139. static void cs_notify_control(u32 message)
  140. {
  141. cs_notify(message, &cs_char_data.chardev_queue);
  142. }
  143. static void cs_notify_data(u32 message, int maxlength)
  144. {
  145. cs_notify(message, &cs_char_data.dataind_queue);
  146. spin_lock(&cs_char_data.lock);
  147. cs_char_data.dataind_pending++;
  148. while (cs_char_data.dataind_pending > maxlength &&
  149. !list_empty(&cs_char_data.dataind_queue)) {
  150. dev_dbg(&cs_char_data.cl->device, "data notification "
  151. "queue overrun (%u entries)\n", cs_char_data.dataind_pending);
  152. cs_pop_entry(&cs_char_data.dataind_queue);
  153. cs_char_data.dataind_pending--;
  154. }
  155. spin_unlock(&cs_char_data.lock);
  156. }
  157. static inline void cs_set_cmd(struct hsi_msg *msg, u32 cmd)
  158. {
  159. u32 *data = sg_virt(msg->sgt.sgl);
  160. *data = cmd;
  161. }
  162. static inline u32 cs_get_cmd(struct hsi_msg *msg)
  163. {
  164. u32 *data = sg_virt(msg->sgt.sgl);
  165. return *data;
  166. }
  167. static void cs_release_cmd(struct hsi_msg *msg)
  168. {
  169. struct cs_hsi_iface *hi = msg->context;
  170. list_add_tail(&msg->link, &hi->cmdqueue);
  171. }
  172. static void cs_cmd_destructor(struct hsi_msg *msg)
  173. {
  174. struct cs_hsi_iface *hi = msg->context;
  175. spin_lock(&hi->lock);
  176. dev_dbg(&cs_char_data.cl->device, "control cmd destructor\n");
  177. if (hi->iface_state != CS_STATE_CLOSED)
  178. dev_err(&hi->cl->device, "Cmd flushed while driver active\n");
  179. if (msg->ttype == HSI_MSG_READ)
  180. hi->control_state &=
  181. ~(SSI_CHANNEL_STATE_POLL | SSI_CHANNEL_STATE_READING);
  182. else if (msg->ttype == HSI_MSG_WRITE &&
  183. hi->control_state & SSI_CHANNEL_STATE_WRITING)
  184. hi->control_state &= ~SSI_CHANNEL_STATE_WRITING;
  185. cs_release_cmd(msg);
  186. spin_unlock(&hi->lock);
  187. }
  188. static struct hsi_msg *cs_claim_cmd(struct cs_hsi_iface* ssi)
  189. {
  190. struct hsi_msg *msg;
  191. BUG_ON(list_empty(&ssi->cmdqueue));
  192. msg = list_first_entry(&ssi->cmdqueue, struct hsi_msg, link);
  193. list_del(&msg->link);
  194. msg->destructor = cs_cmd_destructor;
  195. return msg;
  196. }
  197. static void cs_free_cmds(struct cs_hsi_iface *ssi)
  198. {
  199. struct hsi_msg *msg, *tmp;
  200. list_for_each_entry_safe(msg, tmp, &ssi->cmdqueue, link) {
  201. list_del(&msg->link);
  202. msg->destructor = NULL;
  203. kfree(sg_virt(msg->sgt.sgl));
  204. hsi_free_msg(msg);
  205. }
  206. }
  207. static int cs_alloc_cmds(struct cs_hsi_iface *hi)
  208. {
  209. struct hsi_msg *msg;
  210. u32 *buf;
  211. unsigned int i;
  212. INIT_LIST_HEAD(&hi->cmdqueue);
  213. for (i = 0; i < CS_MAX_CMDS; i++) {
  214. msg = hsi_alloc_msg(1, GFP_KERNEL);
  215. if (!msg)
  216. goto out;
  217. buf = kmalloc(sizeof(*buf), GFP_KERNEL);
  218. if (!buf) {
  219. hsi_free_msg(msg);
  220. goto out;
  221. }
  222. sg_init_one(msg->sgt.sgl, buf, sizeof(*buf));
  223. msg->channel = cs_char_data.channel_id_cmd;
  224. msg->context = hi;
  225. list_add_tail(&msg->link, &hi->cmdqueue);
  226. }
  227. return 0;
  228. out:
  229. cs_free_cmds(hi);
  230. return -ENOMEM;
  231. }
  232. static void cs_hsi_data_destructor(struct hsi_msg *msg)
  233. {
  234. struct cs_hsi_iface *hi = msg->context;
  235. const char *dir = (msg->ttype == HSI_MSG_READ) ? "TX" : "RX";
  236. dev_dbg(&cs_char_data.cl->device, "Freeing data %s message\n", dir);
  237. spin_lock(&hi->lock);
  238. if (hi->iface_state != CS_STATE_CLOSED)
  239. dev_err(&cs_char_data.cl->device,
  240. "Data %s flush while device active\n", dir);
  241. if (msg->ttype == HSI_MSG_READ)
  242. hi->data_state &=
  243. ~(SSI_CHANNEL_STATE_POLL | SSI_CHANNEL_STATE_READING);
  244. else
  245. hi->data_state &= ~SSI_CHANNEL_STATE_WRITING;
  246. msg->status = HSI_STATUS_COMPLETED;
  247. if (unlikely(waitqueue_active(&hi->datawait)))
  248. wake_up_interruptible(&hi->datawait);
  249. spin_unlock(&hi->lock);
  250. }
  251. static int cs_hsi_alloc_data(struct cs_hsi_iface *hi)
  252. {
  253. struct hsi_msg *txmsg, *rxmsg;
  254. int res = 0;
  255. rxmsg = hsi_alloc_msg(1, GFP_KERNEL);
  256. if (!rxmsg) {
  257. res = -ENOMEM;
  258. goto out1;
  259. }
  260. rxmsg->channel = cs_char_data.channel_id_data;
  261. rxmsg->destructor = cs_hsi_data_destructor;
  262. rxmsg->context = hi;
  263. txmsg = hsi_alloc_msg(1, GFP_KERNEL);
  264. if (!txmsg) {
  265. res = -ENOMEM;
  266. goto out2;
  267. }
  268. txmsg->channel = cs_char_data.channel_id_data;
  269. txmsg->destructor = cs_hsi_data_destructor;
  270. txmsg->context = hi;
  271. hi->data_rx_msg = rxmsg;
  272. hi->data_tx_msg = txmsg;
  273. return 0;
  274. out2:
  275. hsi_free_msg(rxmsg);
  276. out1:
  277. return res;
  278. }
  279. static void cs_hsi_free_data_msg(struct hsi_msg *msg)
  280. {
  281. WARN_ON(msg->status != HSI_STATUS_COMPLETED &&
  282. msg->status != HSI_STATUS_ERROR);
  283. hsi_free_msg(msg);
  284. }
  285. static void cs_hsi_free_data(struct cs_hsi_iface *hi)
  286. {
  287. cs_hsi_free_data_msg(hi->data_rx_msg);
  288. cs_hsi_free_data_msg(hi->data_tx_msg);
  289. }
  290. static inline void __cs_hsi_error_pre(struct cs_hsi_iface *hi,
  291. struct hsi_msg *msg, const char *info,
  292. unsigned int *state)
  293. {
  294. spin_lock(&hi->lock);
  295. dev_err(&hi->cl->device, "HSI %s error, msg %d, state %u\n",
  296. info, msg->status, *state);
  297. }
  298. static inline void __cs_hsi_error_post(struct cs_hsi_iface *hi)
  299. {
  300. spin_unlock(&hi->lock);
  301. }
  302. static inline void __cs_hsi_error_read_bits(unsigned int *state)
  303. {
  304. *state |= SSI_CHANNEL_STATE_ERROR;
  305. *state &= ~(SSI_CHANNEL_STATE_READING | SSI_CHANNEL_STATE_POLL);
  306. }
  307. static inline void __cs_hsi_error_write_bits(unsigned int *state)
  308. {
  309. *state |= SSI_CHANNEL_STATE_ERROR;
  310. *state &= ~SSI_CHANNEL_STATE_WRITING;
  311. }
  312. static void cs_hsi_control_read_error(struct cs_hsi_iface *hi,
  313. struct hsi_msg *msg)
  314. {
  315. __cs_hsi_error_pre(hi, msg, "control read", &hi->control_state);
  316. cs_release_cmd(msg);
  317. __cs_hsi_error_read_bits(&hi->control_state);
  318. __cs_hsi_error_post(hi);
  319. }
  320. static void cs_hsi_control_write_error(struct cs_hsi_iface *hi,
  321. struct hsi_msg *msg)
  322. {
  323. __cs_hsi_error_pre(hi, msg, "control write", &hi->control_state);
  324. cs_release_cmd(msg);
  325. __cs_hsi_error_write_bits(&hi->control_state);
  326. __cs_hsi_error_post(hi);
  327. }
  328. static void cs_hsi_data_read_error(struct cs_hsi_iface *hi, struct hsi_msg *msg)
  329. {
  330. __cs_hsi_error_pre(hi, msg, "data read", &hi->data_state);
  331. __cs_hsi_error_read_bits(&hi->data_state);
  332. __cs_hsi_error_post(hi);
  333. }
  334. static void cs_hsi_data_write_error(struct cs_hsi_iface *hi,
  335. struct hsi_msg *msg)
  336. {
  337. __cs_hsi_error_pre(hi, msg, "data write", &hi->data_state);
  338. __cs_hsi_error_write_bits(&hi->data_state);
  339. __cs_hsi_error_post(hi);
  340. }
  341. static void cs_hsi_read_on_control_complete(struct hsi_msg *msg)
  342. {
  343. u32 cmd = cs_get_cmd(msg);
  344. struct cs_hsi_iface *hi = msg->context;
  345. spin_lock(&hi->lock);
  346. hi->control_state &= ~SSI_CHANNEL_STATE_READING;
  347. if (msg->status == HSI_STATUS_ERROR) {
  348. dev_err(&hi->cl->device, "Control RX error detected\n");
  349. spin_unlock(&hi->lock);
  350. cs_hsi_control_read_error(hi, msg);
  351. goto out;
  352. }
  353. dev_dbg(&hi->cl->device, "Read on control: %08X\n", cmd);
  354. cs_release_cmd(msg);
  355. if (hi->flags & CS_FEAT_TSTAMP_RX_CTRL) {
  356. struct timespec64 tspec;
  357. struct cs_timestamp *tstamp =
  358. &hi->mmap_cfg->tstamp_rx_ctrl;
  359. ktime_get_ts64(&tspec);
  360. tstamp->tv_sec = (__u32) tspec.tv_sec;
  361. tstamp->tv_nsec = (__u32) tspec.tv_nsec;
  362. }
  363. spin_unlock(&hi->lock);
  364. cs_notify_control(cmd);
  365. out:
  366. cs_hsi_read_on_control(hi);
  367. }
  368. static void cs_hsi_peek_on_control_complete(struct hsi_msg *msg)
  369. {
  370. struct cs_hsi_iface *hi = msg->context;
  371. int ret;
  372. if (msg->status == HSI_STATUS_ERROR) {
  373. dev_err(&hi->cl->device, "Control peek RX error detected\n");
  374. cs_hsi_control_read_error(hi, msg);
  375. return;
  376. }
  377. WARN_ON(!(hi->control_state & SSI_CHANNEL_STATE_READING));
  378. dev_dbg(&hi->cl->device, "Peek on control complete, reading\n");
  379. msg->sgt.nents = 1;
  380. msg->complete = cs_hsi_read_on_control_complete;
  381. ret = hsi_async_read(hi->cl, msg);
  382. if (ret)
  383. cs_hsi_control_read_error(hi, msg);
  384. }
  385. static void cs_hsi_read_on_control(struct cs_hsi_iface *hi)
  386. {
  387. struct hsi_msg *msg;
  388. int ret;
  389. spin_lock(&hi->lock);
  390. if (hi->control_state & SSI_CHANNEL_STATE_READING) {
  391. dev_err(&hi->cl->device, "Control read already pending (%d)\n",
  392. hi->control_state);
  393. spin_unlock(&hi->lock);
  394. return;
  395. }
  396. if (hi->control_state & SSI_CHANNEL_STATE_ERROR) {
  397. dev_err(&hi->cl->device, "Control read error (%d)\n",
  398. hi->control_state);
  399. spin_unlock(&hi->lock);
  400. return;
  401. }
  402. hi->control_state |= SSI_CHANNEL_STATE_READING;
  403. dev_dbg(&hi->cl->device, "Issuing RX on control\n");
  404. msg = cs_claim_cmd(hi);
  405. spin_unlock(&hi->lock);
  406. msg->sgt.nents = 0;
  407. msg->complete = cs_hsi_peek_on_control_complete;
  408. ret = hsi_async_read(hi->cl, msg);
  409. if (ret)
  410. cs_hsi_control_read_error(hi, msg);
  411. }
  412. static void cs_hsi_write_on_control_complete(struct hsi_msg *msg)
  413. {
  414. struct cs_hsi_iface *hi = msg->context;
  415. if (msg->status == HSI_STATUS_COMPLETED) {
  416. spin_lock(&hi->lock);
  417. hi->control_state &= ~SSI_CHANNEL_STATE_WRITING;
  418. cs_release_cmd(msg);
  419. spin_unlock(&hi->lock);
  420. } else if (msg->status == HSI_STATUS_ERROR) {
  421. cs_hsi_control_write_error(hi, msg);
  422. } else {
  423. dev_err(&hi->cl->device,
  424. "unexpected status in control write callback %d\n",
  425. msg->status);
  426. }
  427. }
  428. static int cs_hsi_write_on_control(struct cs_hsi_iface *hi, u32 message)
  429. {
  430. struct hsi_msg *msg;
  431. int ret;
  432. spin_lock(&hi->lock);
  433. if (hi->control_state & SSI_CHANNEL_STATE_ERROR) {
  434. spin_unlock(&hi->lock);
  435. return -EIO;
  436. }
  437. if (hi->control_state & SSI_CHANNEL_STATE_WRITING) {
  438. dev_err(&hi->cl->device,
  439. "Write still pending on control channel.\n");
  440. spin_unlock(&hi->lock);
  441. return -EBUSY;
  442. }
  443. hi->control_state |= SSI_CHANNEL_STATE_WRITING;
  444. msg = cs_claim_cmd(hi);
  445. spin_unlock(&hi->lock);
  446. cs_set_cmd(msg, message);
  447. msg->sgt.nents = 1;
  448. msg->complete = cs_hsi_write_on_control_complete;
  449. dev_dbg(&hi->cl->device,
  450. "Sending control message %08X\n", message);
  451. ret = hsi_async_write(hi->cl, msg);
  452. if (ret) {
  453. dev_err(&hi->cl->device,
  454. "async_write failed with %d\n", ret);
  455. cs_hsi_control_write_error(hi, msg);
  456. }
  457. /*
  458. * Make sure control read is always pending when issuing
  459. * new control writes. This is needed as the controller
  460. * may flush our messages if e.g. the peer device reboots
  461. * unexpectedly (and we cannot directly resubmit a new read from
  462. * the message destructor; see cs_cmd_destructor()).
  463. */
  464. if (!(hi->control_state & SSI_CHANNEL_STATE_READING)) {
  465. dev_err(&hi->cl->device, "Restarting control reads\n");
  466. cs_hsi_read_on_control(hi);
  467. }
  468. return 0;
  469. }
  470. static void cs_hsi_read_on_data_complete(struct hsi_msg *msg)
  471. {
  472. struct cs_hsi_iface *hi = msg->context;
  473. u32 payload;
  474. if (unlikely(msg->status == HSI_STATUS_ERROR)) {
  475. cs_hsi_data_read_error(hi, msg);
  476. return;
  477. }
  478. spin_lock(&hi->lock);
  479. WARN_ON(!(hi->data_state & SSI_CHANNEL_STATE_READING));
  480. hi->data_state &= ~SSI_CHANNEL_STATE_READING;
  481. payload = CS_RX_DATA_RECEIVED;
  482. payload |= hi->rx_slot;
  483. hi->rx_slot++;
  484. hi->rx_slot %= hi->rx_ptr_boundary;
  485. /* expose current rx ptr in mmap area */
  486. hi->mmap_cfg->rx_ptr = hi->rx_slot;
  487. if (unlikely(waitqueue_active(&hi->datawait)))
  488. wake_up_interruptible(&hi->datawait);
  489. spin_unlock(&hi->lock);
  490. cs_notify_data(payload, hi->rx_bufs);
  491. cs_hsi_read_on_data(hi);
  492. }
  493. static void cs_hsi_peek_on_data_complete(struct hsi_msg *msg)
  494. {
  495. struct cs_hsi_iface *hi = msg->context;
  496. u32 *address;
  497. int ret;
  498. if (unlikely(msg->status == HSI_STATUS_ERROR)) {
  499. cs_hsi_data_read_error(hi, msg);
  500. return;
  501. }
  502. if (unlikely(hi->iface_state != CS_STATE_CONFIGURED)) {
  503. dev_err(&hi->cl->device, "Data received in invalid state\n");
  504. cs_hsi_data_read_error(hi, msg);
  505. return;
  506. }
  507. spin_lock(&hi->lock);
  508. WARN_ON(!(hi->data_state & SSI_CHANNEL_STATE_POLL));
  509. hi->data_state &= ~SSI_CHANNEL_STATE_POLL;
  510. hi->data_state |= SSI_CHANNEL_STATE_READING;
  511. spin_unlock(&hi->lock);
  512. address = (u32 *)(hi->mmap_base +
  513. hi->rx_offsets[hi->rx_slot % hi->rx_bufs]);
  514. sg_init_one(msg->sgt.sgl, address, hi->buf_size);
  515. msg->sgt.nents = 1;
  516. msg->complete = cs_hsi_read_on_data_complete;
  517. ret = hsi_async_read(hi->cl, msg);
  518. if (ret)
  519. cs_hsi_data_read_error(hi, msg);
  520. }
  521. /*
  522. * Read/write transaction is ongoing. Returns false if in
  523. * SSI_CHANNEL_STATE_POLL state.
  524. */
  525. static inline int cs_state_xfer_active(unsigned int state)
  526. {
  527. return (state & SSI_CHANNEL_STATE_WRITING) ||
  528. (state & SSI_CHANNEL_STATE_READING);
  529. }
  530. /*
  531. * No pending read/writes
  532. */
  533. static inline int cs_state_idle(unsigned int state)
  534. {
  535. return !(state & ~SSI_CHANNEL_STATE_ERROR);
  536. }
  537. static void cs_hsi_read_on_data(struct cs_hsi_iface *hi)
  538. {
  539. struct hsi_msg *rxmsg;
  540. int ret;
  541. spin_lock(&hi->lock);
  542. if (hi->data_state &
  543. (SSI_CHANNEL_STATE_READING | SSI_CHANNEL_STATE_POLL)) {
  544. dev_dbg(&hi->cl->device, "Data read already pending (%u)\n",
  545. hi->data_state);
  546. spin_unlock(&hi->lock);
  547. return;
  548. }
  549. hi->data_state |= SSI_CHANNEL_STATE_POLL;
  550. spin_unlock(&hi->lock);
  551. rxmsg = hi->data_rx_msg;
  552. sg_init_one(rxmsg->sgt.sgl, (void *)hi->mmap_base, 0);
  553. rxmsg->sgt.nents = 0;
  554. rxmsg->complete = cs_hsi_peek_on_data_complete;
  555. ret = hsi_async_read(hi->cl, rxmsg);
  556. if (ret)
  557. cs_hsi_data_read_error(hi, rxmsg);
  558. }
  559. static void cs_hsi_write_on_data_complete(struct hsi_msg *msg)
  560. {
  561. struct cs_hsi_iface *hi = msg->context;
  562. if (msg->status == HSI_STATUS_COMPLETED) {
  563. spin_lock(&hi->lock);
  564. hi->data_state &= ~SSI_CHANNEL_STATE_WRITING;
  565. if (unlikely(waitqueue_active(&hi->datawait)))
  566. wake_up_interruptible(&hi->datawait);
  567. spin_unlock(&hi->lock);
  568. } else {
  569. cs_hsi_data_write_error(hi, msg);
  570. }
  571. }
  572. static int cs_hsi_write_on_data(struct cs_hsi_iface *hi, unsigned int slot)
  573. {
  574. u32 *address;
  575. struct hsi_msg *txmsg;
  576. int ret;
  577. spin_lock(&hi->lock);
  578. if (hi->iface_state != CS_STATE_CONFIGURED) {
  579. dev_err(&hi->cl->device, "Not configured, aborting\n");
  580. ret = -EINVAL;
  581. goto error;
  582. }
  583. if (hi->data_state & SSI_CHANNEL_STATE_ERROR) {
  584. dev_err(&hi->cl->device, "HSI error, aborting\n");
  585. ret = -EIO;
  586. goto error;
  587. }
  588. if (hi->data_state & SSI_CHANNEL_STATE_WRITING) {
  589. dev_err(&hi->cl->device, "Write pending on data channel.\n");
  590. ret = -EBUSY;
  591. goto error;
  592. }
  593. hi->data_state |= SSI_CHANNEL_STATE_WRITING;
  594. spin_unlock(&hi->lock);
  595. hi->tx_slot = slot;
  596. address = (u32 *)(hi->mmap_base + hi->tx_offsets[hi->tx_slot]);
  597. txmsg = hi->data_tx_msg;
  598. sg_init_one(txmsg->sgt.sgl, address, hi->buf_size);
  599. txmsg->complete = cs_hsi_write_on_data_complete;
  600. ret = hsi_async_write(hi->cl, txmsg);
  601. if (ret)
  602. cs_hsi_data_write_error(hi, txmsg);
  603. return ret;
  604. error:
  605. spin_unlock(&hi->lock);
  606. if (ret == -EIO)
  607. cs_hsi_data_write_error(hi, hi->data_tx_msg);
  608. return ret;
  609. }
  610. static unsigned int cs_hsi_get_state(struct cs_hsi_iface *hi)
  611. {
  612. return hi->iface_state;
  613. }
  614. static int cs_hsi_command(struct cs_hsi_iface *hi, u32 cmd)
  615. {
  616. int ret = 0;
  617. local_bh_disable();
  618. switch (cmd & TARGET_MASK) {
  619. case TARGET_REMOTE:
  620. ret = cs_hsi_write_on_control(hi, cmd);
  621. break;
  622. case TARGET_LOCAL:
  623. if ((cmd & CS_CMD_MASK) == CS_TX_DATA_READY)
  624. ret = cs_hsi_write_on_data(hi, cmd & CS_PARAM_MASK);
  625. else
  626. ret = -EINVAL;
  627. break;
  628. default:
  629. ret = -EINVAL;
  630. break;
  631. }
  632. local_bh_enable();
  633. return ret;
  634. }
  635. static void cs_hsi_set_wakeline(struct cs_hsi_iface *hi, bool new_state)
  636. {
  637. int change = 0;
  638. spin_lock_bh(&hi->lock);
  639. if (hi->wakeline_state != new_state) {
  640. hi->wakeline_state = new_state;
  641. change = 1;
  642. dev_dbg(&hi->cl->device, "setting wake line to %d (%p)\n",
  643. new_state, hi->cl);
  644. }
  645. spin_unlock_bh(&hi->lock);
  646. if (change) {
  647. if (new_state)
  648. ssip_slave_start_tx(hi->master);
  649. else
  650. ssip_slave_stop_tx(hi->master);
  651. }
  652. dev_dbg(&hi->cl->device, "wake line set to %d (%p)\n",
  653. new_state, hi->cl);
  654. }
  655. static void set_buffer_sizes(struct cs_hsi_iface *hi, int rx_bufs, int tx_bufs)
  656. {
  657. hi->rx_bufs = rx_bufs;
  658. hi->tx_bufs = tx_bufs;
  659. hi->mmap_cfg->rx_bufs = rx_bufs;
  660. hi->mmap_cfg->tx_bufs = tx_bufs;
  661. if (hi->flags & CS_FEAT_ROLLING_RX_COUNTER) {
  662. /*
  663. * For more robust overrun detection, let the rx
  664. * pointer run in range 0..'boundary-1'. Boundary
  665. * is a multiple of rx_bufs, and limited in max size
  666. * by RX_PTR_MAX_SHIFT to allow for fast ptr-diff
  667. * calculation.
  668. */
  669. hi->rx_ptr_boundary = (rx_bufs << RX_PTR_BOUNDARY_SHIFT);
  670. hi->mmap_cfg->rx_ptr_boundary = hi->rx_ptr_boundary;
  671. } else {
  672. hi->rx_ptr_boundary = hi->rx_bufs;
  673. }
  674. }
  675. static int check_buf_params(struct cs_hsi_iface *hi,
  676. const struct cs_buffer_config *buf_cfg)
  677. {
  678. size_t buf_size_aligned = L1_CACHE_ALIGN(buf_cfg->buf_size) *
  679. (buf_cfg->rx_bufs + buf_cfg->tx_bufs);
  680. size_t ctrl_size_aligned = L1_CACHE_ALIGN(sizeof(*hi->mmap_cfg));
  681. int r = 0;
  682. if (buf_cfg->rx_bufs > CS_MAX_BUFFERS ||
  683. buf_cfg->tx_bufs > CS_MAX_BUFFERS) {
  684. r = -EINVAL;
  685. } else if ((buf_size_aligned + ctrl_size_aligned) >= hi->mmap_size) {
  686. dev_err(&hi->cl->device, "No space for the requested buffer "
  687. "configuration\n");
  688. r = -ENOBUFS;
  689. }
  690. return r;
  691. }
  692. /*
  693. * Block until pending data transfers have completed.
  694. */
  695. static int cs_hsi_data_sync(struct cs_hsi_iface *hi)
  696. {
  697. int r = 0;
  698. spin_lock_bh(&hi->lock);
  699. if (!cs_state_xfer_active(hi->data_state)) {
  700. dev_dbg(&hi->cl->device, "hsi_data_sync break, idle\n");
  701. goto out;
  702. }
  703. for (;;) {
  704. int s;
  705. DEFINE_WAIT(wait);
  706. if (!cs_state_xfer_active(hi->data_state))
  707. goto out;
  708. if (signal_pending(current)) {
  709. r = -ERESTARTSYS;
  710. goto out;
  711. }
  712. /*
  713. * prepare_to_wait must be called with hi->lock held
  714. * so that callbacks can check for waitqueue_active()
  715. */
  716. prepare_to_wait(&hi->datawait, &wait, TASK_INTERRUPTIBLE);
  717. spin_unlock_bh(&hi->lock);
  718. s = schedule_timeout(
  719. msecs_to_jiffies(CS_HSI_TRANSFER_TIMEOUT_MS));
  720. spin_lock_bh(&hi->lock);
  721. finish_wait(&hi->datawait, &wait);
  722. if (!s) {
  723. dev_dbg(&hi->cl->device,
  724. "hsi_data_sync timeout after %d ms\n",
  725. CS_HSI_TRANSFER_TIMEOUT_MS);
  726. r = -EIO;
  727. goto out;
  728. }
  729. }
  730. out:
  731. spin_unlock_bh(&hi->lock);
  732. dev_dbg(&hi->cl->device, "hsi_data_sync done with res %d\n", r);
  733. return r;
  734. }
  735. static void cs_hsi_data_enable(struct cs_hsi_iface *hi,
  736. struct cs_buffer_config *buf_cfg)
  737. {
  738. unsigned int data_start, i;
  739. BUG_ON(hi->buf_size == 0);
  740. set_buffer_sizes(hi, buf_cfg->rx_bufs, buf_cfg->tx_bufs);
  741. hi->slot_size = L1_CACHE_ALIGN(hi->buf_size);
  742. dev_dbg(&hi->cl->device,
  743. "setting slot size to %u, buf size %u, align %u\n",
  744. hi->slot_size, hi->buf_size, L1_CACHE_BYTES);
  745. data_start = L1_CACHE_ALIGN(sizeof(*hi->mmap_cfg));
  746. dev_dbg(&hi->cl->device,
  747. "setting data start at %u, cfg block %u, align %u\n",
  748. data_start, sizeof(*hi->mmap_cfg), L1_CACHE_BYTES);
  749. for (i = 0; i < hi->mmap_cfg->rx_bufs; i++) {
  750. hi->rx_offsets[i] = data_start + i * hi->slot_size;
  751. hi->mmap_cfg->rx_offsets[i] = hi->rx_offsets[i];
  752. dev_dbg(&hi->cl->device, "DL buf #%u at %u\n",
  753. i, hi->rx_offsets[i]);
  754. }
  755. for (i = 0; i < hi->mmap_cfg->tx_bufs; i++) {
  756. hi->tx_offsets[i] = data_start +
  757. (i + hi->mmap_cfg->rx_bufs) * hi->slot_size;
  758. hi->mmap_cfg->tx_offsets[i] = hi->tx_offsets[i];
  759. dev_dbg(&hi->cl->device, "UL buf #%u at %u\n",
  760. i, hi->rx_offsets[i]);
  761. }
  762. hi->iface_state = CS_STATE_CONFIGURED;
  763. }
  764. static void cs_hsi_data_disable(struct cs_hsi_iface *hi, int old_state)
  765. {
  766. if (old_state == CS_STATE_CONFIGURED) {
  767. dev_dbg(&hi->cl->device,
  768. "closing data channel with slot size 0\n");
  769. hi->iface_state = CS_STATE_OPENED;
  770. }
  771. }
  772. static int cs_hsi_buf_config(struct cs_hsi_iface *hi,
  773. struct cs_buffer_config *buf_cfg)
  774. {
  775. int r = 0;
  776. unsigned int old_state = hi->iface_state;
  777. spin_lock_bh(&hi->lock);
  778. /* Prevent new transactions during buffer reconfig */
  779. if (old_state == CS_STATE_CONFIGURED)
  780. hi->iface_state = CS_STATE_OPENED;
  781. spin_unlock_bh(&hi->lock);
  782. /*
  783. * make sure that no non-zero data reads are ongoing before
  784. * proceeding to change the buffer layout
  785. */
  786. r = cs_hsi_data_sync(hi);
  787. if (r < 0)
  788. return r;
  789. WARN_ON(cs_state_xfer_active(hi->data_state));
  790. spin_lock_bh(&hi->lock);
  791. r = check_buf_params(hi, buf_cfg);
  792. if (r < 0)
  793. goto error;
  794. hi->buf_size = buf_cfg->buf_size;
  795. hi->mmap_cfg->buf_size = hi->buf_size;
  796. hi->flags = buf_cfg->flags;
  797. hi->rx_slot = 0;
  798. hi->tx_slot = 0;
  799. hi->slot_size = 0;
  800. if (hi->buf_size)
  801. cs_hsi_data_enable(hi, buf_cfg);
  802. else
  803. cs_hsi_data_disable(hi, old_state);
  804. spin_unlock_bh(&hi->lock);
  805. if (old_state != hi->iface_state) {
  806. if (hi->iface_state == CS_STATE_CONFIGURED) {
  807. cpu_latency_qos_add_request(&hi->pm_qos_req,
  808. CS_QOS_LATENCY_FOR_DATA_USEC);
  809. local_bh_disable();
  810. cs_hsi_read_on_data(hi);
  811. local_bh_enable();
  812. } else if (old_state == CS_STATE_CONFIGURED) {
  813. cpu_latency_qos_remove_request(&hi->pm_qos_req);
  814. }
  815. }
  816. return r;
  817. error:
  818. spin_unlock_bh(&hi->lock);
  819. return r;
  820. }
  821. static int cs_hsi_start(struct cs_hsi_iface **hi, struct hsi_client *cl,
  822. unsigned long mmap_base, unsigned long mmap_size)
  823. {
  824. int err = 0;
  825. struct cs_hsi_iface *hsi_if = kzalloc(sizeof(*hsi_if), GFP_KERNEL);
  826. dev_dbg(&cl->device, "cs_hsi_start\n");
  827. if (!hsi_if) {
  828. err = -ENOMEM;
  829. goto leave0;
  830. }
  831. spin_lock_init(&hsi_if->lock);
  832. hsi_if->cl = cl;
  833. hsi_if->iface_state = CS_STATE_CLOSED;
  834. hsi_if->mmap_cfg = (struct cs_mmap_config_block *)mmap_base;
  835. hsi_if->mmap_base = mmap_base;
  836. hsi_if->mmap_size = mmap_size;
  837. memset(hsi_if->mmap_cfg, 0, sizeof(*hsi_if->mmap_cfg));
  838. init_waitqueue_head(&hsi_if->datawait);
  839. err = cs_alloc_cmds(hsi_if);
  840. if (err < 0) {
  841. dev_err(&cl->device, "Unable to alloc HSI messages\n");
  842. goto leave1;
  843. }
  844. err = cs_hsi_alloc_data(hsi_if);
  845. if (err < 0) {
  846. dev_err(&cl->device, "Unable to alloc HSI messages for data\n");
  847. goto leave2;
  848. }
  849. err = hsi_claim_port(cl, 1);
  850. if (err < 0) {
  851. dev_err(&cl->device,
  852. "Could not open, HSI port already claimed\n");
  853. goto leave3;
  854. }
  855. hsi_if->master = ssip_slave_get_master(cl);
  856. if (IS_ERR(hsi_if->master)) {
  857. err = PTR_ERR(hsi_if->master);
  858. dev_err(&cl->device, "Could not get HSI master client\n");
  859. goto leave4;
  860. }
  861. if (!ssip_slave_running(hsi_if->master)) {
  862. err = -ENODEV;
  863. dev_err(&cl->device,
  864. "HSI port not initialized\n");
  865. goto leave4;
  866. }
  867. hsi_if->iface_state = CS_STATE_OPENED;
  868. local_bh_disable();
  869. cs_hsi_read_on_control(hsi_if);
  870. local_bh_enable();
  871. dev_dbg(&cl->device, "cs_hsi_start...done\n");
  872. BUG_ON(!hi);
  873. *hi = hsi_if;
  874. return 0;
  875. leave4:
  876. hsi_release_port(cl);
  877. leave3:
  878. cs_hsi_free_data(hsi_if);
  879. leave2:
  880. cs_free_cmds(hsi_if);
  881. leave1:
  882. kfree(hsi_if);
  883. leave0:
  884. dev_dbg(&cl->device, "cs_hsi_start...done/error\n\n");
  885. return err;
  886. }
  887. static void cs_hsi_stop(struct cs_hsi_iface *hi)
  888. {
  889. dev_dbg(&hi->cl->device, "cs_hsi_stop\n");
  890. cs_hsi_set_wakeline(hi, 0);
  891. ssip_slave_put_master(hi->master);
  892. /* hsi_release_port() needs to be called with CS_STATE_CLOSED */
  893. hi->iface_state = CS_STATE_CLOSED;
  894. hsi_release_port(hi->cl);
  895. /*
  896. * hsi_release_port() should flush out all the pending
  897. * messages, so cs_state_idle() should be true for both
  898. * control and data channels.
  899. */
  900. WARN_ON(!cs_state_idle(hi->control_state));
  901. WARN_ON(!cs_state_idle(hi->data_state));
  902. if (cpu_latency_qos_request_active(&hi->pm_qos_req))
  903. cpu_latency_qos_remove_request(&hi->pm_qos_req);
  904. spin_lock_bh(&hi->lock);
  905. cs_hsi_free_data(hi);
  906. cs_free_cmds(hi);
  907. spin_unlock_bh(&hi->lock);
  908. kfree(hi);
  909. }
  910. static vm_fault_t cs_char_vma_fault(struct vm_fault *vmf)
  911. {
  912. struct cs_char *csdata = vmf->vma->vm_private_data;
  913. struct page *page;
  914. page = virt_to_page((void *)csdata->mmap_base);
  915. get_page(page);
  916. vmf->page = page;
  917. return 0;
  918. }
  919. static const struct vm_operations_struct cs_char_vm_ops = {
  920. .fault = cs_char_vma_fault,
  921. };
  922. static int cs_char_fasync(int fd, struct file *file, int on)
  923. {
  924. struct cs_char *csdata = file->private_data;
  925. if (fasync_helper(fd, file, on, &csdata->async_queue) < 0)
  926. return -EIO;
  927. return 0;
  928. }
  929. static __poll_t cs_char_poll(struct file *file, poll_table *wait)
  930. {
  931. struct cs_char *csdata = file->private_data;
  932. __poll_t ret = 0;
  933. poll_wait(file, &cs_char_data.wait, wait);
  934. spin_lock_bh(&csdata->lock);
  935. if (!list_empty(&csdata->chardev_queue))
  936. ret = EPOLLIN | EPOLLRDNORM;
  937. else if (!list_empty(&csdata->dataind_queue))
  938. ret = EPOLLIN | EPOLLRDNORM;
  939. spin_unlock_bh(&csdata->lock);
  940. return ret;
  941. }
  942. static ssize_t cs_char_read(struct file *file, char __user *buf, size_t count,
  943. loff_t *unused)
  944. {
  945. struct cs_char *csdata = file->private_data;
  946. u32 data;
  947. ssize_t retval;
  948. if (count < sizeof(data))
  949. return -EINVAL;
  950. for (;;) {
  951. DEFINE_WAIT(wait);
  952. spin_lock_bh(&csdata->lock);
  953. if (!list_empty(&csdata->chardev_queue)) {
  954. data = cs_pop_entry(&csdata->chardev_queue);
  955. } else if (!list_empty(&csdata->dataind_queue)) {
  956. data = cs_pop_entry(&csdata->dataind_queue);
  957. csdata->dataind_pending--;
  958. } else {
  959. data = 0;
  960. }
  961. spin_unlock_bh(&csdata->lock);
  962. if (data)
  963. break;
  964. if (file->f_flags & O_NONBLOCK) {
  965. retval = -EAGAIN;
  966. goto out;
  967. } else if (signal_pending(current)) {
  968. retval = -ERESTARTSYS;
  969. goto out;
  970. }
  971. prepare_to_wait_exclusive(&csdata->wait, &wait,
  972. TASK_INTERRUPTIBLE);
  973. schedule();
  974. finish_wait(&csdata->wait, &wait);
  975. }
  976. retval = put_user(data, (u32 __user *)buf);
  977. if (!retval)
  978. retval = sizeof(data);
  979. out:
  980. return retval;
  981. }
  982. static ssize_t cs_char_write(struct file *file, const char __user *buf,
  983. size_t count, loff_t *unused)
  984. {
  985. struct cs_char *csdata = file->private_data;
  986. u32 data;
  987. int err;
  988. ssize_t retval;
  989. if (count < sizeof(data))
  990. return -EINVAL;
  991. if (get_user(data, (u32 __user *)buf))
  992. retval = -EFAULT;
  993. else
  994. retval = count;
  995. err = cs_hsi_command(csdata->hi, data);
  996. if (err < 0)
  997. retval = err;
  998. return retval;
  999. }
  1000. static long cs_char_ioctl(struct file *file, unsigned int cmd,
  1001. unsigned long arg)
  1002. {
  1003. struct cs_char *csdata = file->private_data;
  1004. int r = 0;
  1005. switch (cmd) {
  1006. case CS_GET_STATE: {
  1007. unsigned int state;
  1008. state = cs_hsi_get_state(csdata->hi);
  1009. if (copy_to_user((void __user *)arg, &state, sizeof(state)))
  1010. r = -EFAULT;
  1011. break;
  1012. }
  1013. case CS_SET_WAKELINE: {
  1014. unsigned int state;
  1015. if (copy_from_user(&state, (void __user *)arg, sizeof(state))) {
  1016. r = -EFAULT;
  1017. break;
  1018. }
  1019. if (state > 1) {
  1020. r = -EINVAL;
  1021. break;
  1022. }
  1023. cs_hsi_set_wakeline(csdata->hi, !!state);
  1024. break;
  1025. }
  1026. case CS_GET_IF_VERSION: {
  1027. unsigned int ifver = CS_IF_VERSION;
  1028. if (copy_to_user((void __user *)arg, &ifver, sizeof(ifver)))
  1029. r = -EFAULT;
  1030. break;
  1031. }
  1032. case CS_CONFIG_BUFS: {
  1033. struct cs_buffer_config buf_cfg;
  1034. if (copy_from_user(&buf_cfg, (void __user *)arg,
  1035. sizeof(buf_cfg)))
  1036. r = -EFAULT;
  1037. else
  1038. r = cs_hsi_buf_config(csdata->hi, &buf_cfg);
  1039. break;
  1040. }
  1041. default:
  1042. r = -ENOTTY;
  1043. break;
  1044. }
  1045. return r;
  1046. }
  1047. static int cs_char_mmap(struct file *file, struct vm_area_struct *vma)
  1048. {
  1049. if (vma->vm_end < vma->vm_start)
  1050. return -EINVAL;
  1051. if (vma_pages(vma) != 1)
  1052. return -EINVAL;
  1053. vm_flags_set(vma, VM_IO | VM_DONTDUMP | VM_DONTEXPAND);
  1054. vma->vm_ops = &cs_char_vm_ops;
  1055. vma->vm_private_data = file->private_data;
  1056. return 0;
  1057. }
  1058. static int cs_char_open(struct inode *unused, struct file *file)
  1059. {
  1060. int ret = 0;
  1061. unsigned long p;
  1062. spin_lock_bh(&cs_char_data.lock);
  1063. if (cs_char_data.opened) {
  1064. ret = -EBUSY;
  1065. spin_unlock_bh(&cs_char_data.lock);
  1066. goto out1;
  1067. }
  1068. cs_char_data.opened = 1;
  1069. cs_char_data.dataind_pending = 0;
  1070. spin_unlock_bh(&cs_char_data.lock);
  1071. p = get_zeroed_page(GFP_KERNEL);
  1072. if (!p) {
  1073. ret = -ENOMEM;
  1074. goto out2;
  1075. }
  1076. ret = cs_hsi_start(&cs_char_data.hi, cs_char_data.cl, p, CS_MMAP_SIZE);
  1077. if (ret) {
  1078. dev_err(&cs_char_data.cl->device, "Unable to initialize HSI\n");
  1079. goto out3;
  1080. }
  1081. /* these are only used in release so lock not needed */
  1082. cs_char_data.mmap_base = p;
  1083. cs_char_data.mmap_size = CS_MMAP_SIZE;
  1084. file->private_data = &cs_char_data;
  1085. return 0;
  1086. out3:
  1087. free_page(p);
  1088. out2:
  1089. spin_lock_bh(&cs_char_data.lock);
  1090. cs_char_data.opened = 0;
  1091. spin_unlock_bh(&cs_char_data.lock);
  1092. out1:
  1093. return ret;
  1094. }
  1095. static void cs_free_char_queue(struct list_head *head)
  1096. {
  1097. struct char_queue *entry;
  1098. struct list_head *cursor, *next;
  1099. if (!list_empty(head)) {
  1100. list_for_each_safe(cursor, next, head) {
  1101. entry = list_entry(cursor, struct char_queue, list);
  1102. list_del(&entry->list);
  1103. kfree(entry);
  1104. }
  1105. }
  1106. }
  1107. static int cs_char_release(struct inode *unused, struct file *file)
  1108. {
  1109. struct cs_char *csdata = file->private_data;
  1110. cs_hsi_stop(csdata->hi);
  1111. spin_lock_bh(&csdata->lock);
  1112. csdata->hi = NULL;
  1113. free_page(csdata->mmap_base);
  1114. cs_free_char_queue(&csdata->chardev_queue);
  1115. cs_free_char_queue(&csdata->dataind_queue);
  1116. csdata->opened = 0;
  1117. spin_unlock_bh(&csdata->lock);
  1118. return 0;
  1119. }
  1120. static const struct file_operations cs_char_fops = {
  1121. .owner = THIS_MODULE,
  1122. .read = cs_char_read,
  1123. .write = cs_char_write,
  1124. .poll = cs_char_poll,
  1125. .unlocked_ioctl = cs_char_ioctl,
  1126. .mmap = cs_char_mmap,
  1127. .open = cs_char_open,
  1128. .release = cs_char_release,
  1129. .fasync = cs_char_fasync,
  1130. };
  1131. static struct miscdevice cs_char_miscdev = {
  1132. .minor = MISC_DYNAMIC_MINOR,
  1133. .name = "cmt_speech",
  1134. .fops = &cs_char_fops
  1135. };
  1136. static int cs_hsi_client_probe(struct device *dev)
  1137. {
  1138. int err = 0;
  1139. struct hsi_client *cl = to_hsi_client(dev);
  1140. dev_dbg(dev, "hsi_client_probe\n");
  1141. init_waitqueue_head(&cs_char_data.wait);
  1142. spin_lock_init(&cs_char_data.lock);
  1143. cs_char_data.opened = 0;
  1144. cs_char_data.cl = cl;
  1145. cs_char_data.hi = NULL;
  1146. INIT_LIST_HEAD(&cs_char_data.chardev_queue);
  1147. INIT_LIST_HEAD(&cs_char_data.dataind_queue);
  1148. cs_char_data.channel_id_cmd = hsi_get_channel_id_by_name(cl,
  1149. "speech-control");
  1150. if (cs_char_data.channel_id_cmd < 0) {
  1151. err = cs_char_data.channel_id_cmd;
  1152. dev_err(dev, "Could not get cmd channel (%d)\n", err);
  1153. return err;
  1154. }
  1155. cs_char_data.channel_id_data = hsi_get_channel_id_by_name(cl,
  1156. "speech-data");
  1157. if (cs_char_data.channel_id_data < 0) {
  1158. err = cs_char_data.channel_id_data;
  1159. dev_err(dev, "Could not get data channel (%d)\n", err);
  1160. return err;
  1161. }
  1162. err = misc_register(&cs_char_miscdev);
  1163. if (err)
  1164. dev_err(dev, "Failed to register: %d\n", err);
  1165. return err;
  1166. }
  1167. static int cs_hsi_client_remove(struct device *dev)
  1168. {
  1169. struct cs_hsi_iface *hi;
  1170. dev_dbg(dev, "hsi_client_remove\n");
  1171. misc_deregister(&cs_char_miscdev);
  1172. spin_lock_bh(&cs_char_data.lock);
  1173. hi = cs_char_data.hi;
  1174. cs_char_data.hi = NULL;
  1175. spin_unlock_bh(&cs_char_data.lock);
  1176. if (hi)
  1177. cs_hsi_stop(hi);
  1178. return 0;
  1179. }
  1180. static struct hsi_client_driver cs_hsi_driver = {
  1181. .driver = {
  1182. .name = "cmt-speech",
  1183. .owner = THIS_MODULE,
  1184. .probe = cs_hsi_client_probe,
  1185. .remove = cs_hsi_client_remove,
  1186. },
  1187. };
  1188. static int __init cs_char_init(void)
  1189. {
  1190. pr_info("CMT speech driver added\n");
  1191. return hsi_register_client_driver(&cs_hsi_driver);
  1192. }
  1193. module_init(cs_char_init);
  1194. static void __exit cs_char_exit(void)
  1195. {
  1196. hsi_unregister_client_driver(&cs_hsi_driver);
  1197. pr_info("CMT speech driver removed\n");
  1198. }
  1199. module_exit(cs_char_exit);
  1200. MODULE_ALIAS("hsi:cmt-speech");
  1201. MODULE_AUTHOR("Kai Vehmanen <[email protected]>");
  1202. MODULE_AUTHOR("Peter Ujfalusi <[email protected]>");
  1203. MODULE_DESCRIPTION("CMT speech driver");
  1204. MODULE_LICENSE("GPL v2");