fmdrv_common.c 44 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * FM Driver for Connectivity chip of Texas Instruments.
  4. *
  5. * This sub-module of FM driver is common for FM RX and TX
  6. * functionality. This module is responsible for:
  7. * 1) Forming group of Channel-8 commands to perform particular
  8. * functionality (eg., frequency set require more than
  9. * one Channel-8 command to be sent to the chip).
  10. * 2) Sending each Channel-8 command to the chip and reading
  11. * response back over Shared Transport.
  12. * 3) Managing TX and RX Queues and Tasklets.
  13. * 4) Handling FM Interrupt packet and taking appropriate action.
  14. * 5) Loading FM firmware to the chip (common, FM TX, and FM RX
  15. * firmware files based on mode selection)
  16. *
  17. * Copyright (C) 2011 Texas Instruments
  18. * Author: Raja Mani <[email protected]>
  19. * Author: Manjunatha Halli <[email protected]>
  20. */
  21. #include <linux/delay.h>
  22. #include <linux/firmware.h>
  23. #include <linux/module.h>
  24. #include <linux/nospec.h>
  25. #include <linux/jiffies.h>
  26. #include "fmdrv.h"
  27. #include "fmdrv_v4l2.h"
  28. #include "fmdrv_common.h"
  29. #include <linux/ti_wilink_st.h>
  30. #include "fmdrv_rx.h"
  31. #include "fmdrv_tx.h"
  32. /* Region info */
  33. static struct region_info region_configs[] = {
  34. /* Europe/US */
  35. {
  36. .chanl_space = FM_CHANNEL_SPACING_200KHZ * FM_FREQ_MUL,
  37. .bot_freq = 87500, /* 87.5 MHz */
  38. .top_freq = 108000, /* 108 MHz */
  39. .fm_band = 0,
  40. },
  41. /* Japan */
  42. {
  43. .chanl_space = FM_CHANNEL_SPACING_200KHZ * FM_FREQ_MUL,
  44. .bot_freq = 76000, /* 76 MHz */
  45. .top_freq = 90000, /* 90 MHz */
  46. .fm_band = 1,
  47. },
  48. };
  49. /* Band selection */
  50. static u8 default_radio_region; /* Europe/US */
  51. module_param(default_radio_region, byte, 0);
  52. MODULE_PARM_DESC(default_radio_region, "Region: 0=Europe/US, 1=Japan");
  53. /* RDS buffer blocks */
  54. static u32 default_rds_buf = 300;
  55. module_param(default_rds_buf, uint, 0444);
  56. MODULE_PARM_DESC(default_rds_buf, "RDS buffer entries");
  57. /* Radio Nr */
  58. static u32 radio_nr = -1;
  59. module_param(radio_nr, int, 0444);
  60. MODULE_PARM_DESC(radio_nr, "Radio Nr");
  61. /* FM irq handlers forward declaration */
  62. static void fm_irq_send_flag_getcmd(struct fmdev *);
  63. static void fm_irq_handle_flag_getcmd_resp(struct fmdev *);
  64. static void fm_irq_handle_hw_malfunction(struct fmdev *);
  65. static void fm_irq_handle_rds_start(struct fmdev *);
  66. static void fm_irq_send_rdsdata_getcmd(struct fmdev *);
  67. static void fm_irq_handle_rdsdata_getcmd_resp(struct fmdev *);
  68. static void fm_irq_handle_rds_finish(struct fmdev *);
  69. static void fm_irq_handle_tune_op_ended(struct fmdev *);
  70. static void fm_irq_handle_power_enb(struct fmdev *);
  71. static void fm_irq_handle_low_rssi_start(struct fmdev *);
  72. static void fm_irq_afjump_set_pi(struct fmdev *);
  73. static void fm_irq_handle_set_pi_resp(struct fmdev *);
  74. static void fm_irq_afjump_set_pimask(struct fmdev *);
  75. static void fm_irq_handle_set_pimask_resp(struct fmdev *);
  76. static void fm_irq_afjump_setfreq(struct fmdev *);
  77. static void fm_irq_handle_setfreq_resp(struct fmdev *);
  78. static void fm_irq_afjump_enableint(struct fmdev *);
  79. static void fm_irq_afjump_enableint_resp(struct fmdev *);
  80. static void fm_irq_start_afjump(struct fmdev *);
  81. static void fm_irq_handle_start_afjump_resp(struct fmdev *);
  82. static void fm_irq_afjump_rd_freq(struct fmdev *);
  83. static void fm_irq_afjump_rd_freq_resp(struct fmdev *);
  84. static void fm_irq_handle_low_rssi_finish(struct fmdev *);
  85. static void fm_irq_send_intmsk_cmd(struct fmdev *);
  86. static void fm_irq_handle_intmsk_cmd_resp(struct fmdev *);
  87. /*
  88. * When FM common module receives interrupt packet, following handlers
  89. * will be executed one after another to service the interrupt(s)
  90. */
  91. enum fmc_irq_handler_index {
  92. FM_SEND_FLAG_GETCMD_IDX,
  93. FM_HANDLE_FLAG_GETCMD_RESP_IDX,
  94. /* HW malfunction irq handler */
  95. FM_HW_MAL_FUNC_IDX,
  96. /* RDS threshold reached irq handler */
  97. FM_RDS_START_IDX,
  98. FM_RDS_SEND_RDS_GETCMD_IDX,
  99. FM_RDS_HANDLE_RDS_GETCMD_RESP_IDX,
  100. FM_RDS_FINISH_IDX,
  101. /* Tune operation ended irq handler */
  102. FM_HW_TUNE_OP_ENDED_IDX,
  103. /* TX power enable irq handler */
  104. FM_HW_POWER_ENB_IDX,
  105. /* Low RSSI irq handler */
  106. FM_LOW_RSSI_START_IDX,
  107. FM_AF_JUMP_SETPI_IDX,
  108. FM_AF_JUMP_HANDLE_SETPI_RESP_IDX,
  109. FM_AF_JUMP_SETPI_MASK_IDX,
  110. FM_AF_JUMP_HANDLE_SETPI_MASK_RESP_IDX,
  111. FM_AF_JUMP_SET_AF_FREQ_IDX,
  112. FM_AF_JUMP_HANDLE_SET_AFFREQ_RESP_IDX,
  113. FM_AF_JUMP_ENABLE_INT_IDX,
  114. FM_AF_JUMP_ENABLE_INT_RESP_IDX,
  115. FM_AF_JUMP_START_AFJUMP_IDX,
  116. FM_AF_JUMP_HANDLE_START_AFJUMP_RESP_IDX,
  117. FM_AF_JUMP_RD_FREQ_IDX,
  118. FM_AF_JUMP_RD_FREQ_RESP_IDX,
  119. FM_LOW_RSSI_FINISH_IDX,
  120. /* Interrupt process post action */
  121. FM_SEND_INTMSK_CMD_IDX,
  122. FM_HANDLE_INTMSK_CMD_RESP_IDX,
  123. };
  124. /* FM interrupt handler table */
  125. static int_handler_prototype int_handler_table[] = {
  126. fm_irq_send_flag_getcmd,
  127. fm_irq_handle_flag_getcmd_resp,
  128. fm_irq_handle_hw_malfunction,
  129. fm_irq_handle_rds_start, /* RDS threshold reached irq handler */
  130. fm_irq_send_rdsdata_getcmd,
  131. fm_irq_handle_rdsdata_getcmd_resp,
  132. fm_irq_handle_rds_finish,
  133. fm_irq_handle_tune_op_ended,
  134. fm_irq_handle_power_enb, /* TX power enable irq handler */
  135. fm_irq_handle_low_rssi_start,
  136. fm_irq_afjump_set_pi,
  137. fm_irq_handle_set_pi_resp,
  138. fm_irq_afjump_set_pimask,
  139. fm_irq_handle_set_pimask_resp,
  140. fm_irq_afjump_setfreq,
  141. fm_irq_handle_setfreq_resp,
  142. fm_irq_afjump_enableint,
  143. fm_irq_afjump_enableint_resp,
  144. fm_irq_start_afjump,
  145. fm_irq_handle_start_afjump_resp,
  146. fm_irq_afjump_rd_freq,
  147. fm_irq_afjump_rd_freq_resp,
  148. fm_irq_handle_low_rssi_finish,
  149. fm_irq_send_intmsk_cmd, /* Interrupt process post action */
  150. fm_irq_handle_intmsk_cmd_resp
  151. };
  152. static long (*g_st_write) (struct sk_buff *skb);
  153. static struct completion wait_for_fmdrv_reg_comp;
  154. static inline void fm_irq_call(struct fmdev *fmdev)
  155. {
  156. fmdev->irq_info.handlers[fmdev->irq_info.stage](fmdev);
  157. }
  158. /* Continue next function in interrupt handler table */
  159. static inline void fm_irq_call_stage(struct fmdev *fmdev, u8 stage)
  160. {
  161. fmdev->irq_info.stage = stage;
  162. fm_irq_call(fmdev);
  163. }
  164. static inline void fm_irq_timeout_stage(struct fmdev *fmdev, u8 stage)
  165. {
  166. fmdev->irq_info.stage = stage;
  167. mod_timer(&fmdev->irq_info.timer, jiffies + FM_DRV_TX_TIMEOUT);
  168. }
  169. #ifdef FM_DUMP_TXRX_PKT
  170. /* To dump outgoing FM Channel-8 packets */
  171. inline void dump_tx_skb_data(struct sk_buff *skb)
  172. {
  173. int len, len_org;
  174. u8 index;
  175. struct fm_cmd_msg_hdr *cmd_hdr;
  176. cmd_hdr = (struct fm_cmd_msg_hdr *)skb->data;
  177. printk(KERN_INFO "<<%shdr:%02x len:%02x opcode:%02x type:%s dlen:%02x",
  178. fm_cb(skb)->completion ? " " : "*", cmd_hdr->hdr,
  179. cmd_hdr->len, cmd_hdr->op,
  180. cmd_hdr->rd_wr ? "RD" : "WR", cmd_hdr->dlen);
  181. len_org = skb->len - FM_CMD_MSG_HDR_SIZE;
  182. if (len_org > 0) {
  183. printk(KERN_CONT "\n data(%d): ", cmd_hdr->dlen);
  184. len = min(len_org, 14);
  185. for (index = 0; index < len; index++)
  186. printk(KERN_CONT "%x ",
  187. skb->data[FM_CMD_MSG_HDR_SIZE + index]);
  188. printk(KERN_CONT "%s", (len_org > 14) ? ".." : "");
  189. }
  190. printk(KERN_CONT "\n");
  191. }
  192. /* To dump incoming FM Channel-8 packets */
  193. inline void dump_rx_skb_data(struct sk_buff *skb)
  194. {
  195. int len, len_org;
  196. u8 index;
  197. struct fm_event_msg_hdr *evt_hdr;
  198. evt_hdr = (struct fm_event_msg_hdr *)skb->data;
  199. printk(KERN_INFO ">> hdr:%02x len:%02x sts:%02x numhci:%02x opcode:%02x type:%s dlen:%02x",
  200. evt_hdr->hdr, evt_hdr->len,
  201. evt_hdr->status, evt_hdr->num_fm_hci_cmds, evt_hdr->op,
  202. (evt_hdr->rd_wr) ? "RD" : "WR", evt_hdr->dlen);
  203. len_org = skb->len - FM_EVT_MSG_HDR_SIZE;
  204. if (len_org > 0) {
  205. printk(KERN_CONT "\n data(%d): ", evt_hdr->dlen);
  206. len = min(len_org, 14);
  207. for (index = 0; index < len; index++)
  208. printk(KERN_CONT "%x ",
  209. skb->data[FM_EVT_MSG_HDR_SIZE + index]);
  210. printk(KERN_CONT "%s", (len_org > 14) ? ".." : "");
  211. }
  212. printk(KERN_CONT "\n");
  213. }
  214. #endif
  215. void fmc_update_region_info(struct fmdev *fmdev, u8 region_to_set)
  216. {
  217. fmdev->rx.region = region_configs[region_to_set];
  218. }
  219. /*
  220. * FM common sub-module will schedule this tasklet whenever it receives
  221. * FM packet from ST driver.
  222. */
  223. static void recv_tasklet(struct tasklet_struct *t)
  224. {
  225. struct fmdev *fmdev;
  226. struct fm_irq *irq_info;
  227. struct fm_event_msg_hdr *evt_hdr;
  228. struct sk_buff *skb;
  229. u8 num_fm_hci_cmds;
  230. unsigned long flags;
  231. fmdev = from_tasklet(fmdev, t, tx_task);
  232. irq_info = &fmdev->irq_info;
  233. /* Process all packets in the RX queue */
  234. while ((skb = skb_dequeue(&fmdev->rx_q))) {
  235. if (skb->len < sizeof(struct fm_event_msg_hdr)) {
  236. fmerr("skb(%p) has only %d bytes, at least need %zu bytes to decode\n",
  237. skb,
  238. skb->len, sizeof(struct fm_event_msg_hdr));
  239. kfree_skb(skb);
  240. continue;
  241. }
  242. evt_hdr = (void *)skb->data;
  243. num_fm_hci_cmds = evt_hdr->num_fm_hci_cmds;
  244. /* FM interrupt packet? */
  245. if (evt_hdr->op == FM_INTERRUPT) {
  246. /* FM interrupt handler started already? */
  247. if (!test_bit(FM_INTTASK_RUNNING, &fmdev->flag)) {
  248. set_bit(FM_INTTASK_RUNNING, &fmdev->flag);
  249. if (irq_info->stage != 0) {
  250. fmerr("Inval stage resetting to zero\n");
  251. irq_info->stage = 0;
  252. }
  253. /*
  254. * Execute first function in interrupt handler
  255. * table.
  256. */
  257. irq_info->handlers[irq_info->stage](fmdev);
  258. } else {
  259. set_bit(FM_INTTASK_SCHEDULE_PENDING, &fmdev->flag);
  260. }
  261. kfree_skb(skb);
  262. }
  263. /* Anyone waiting for this with completion handler? */
  264. else if (evt_hdr->op == fmdev->pre_op && fmdev->resp_comp != NULL) {
  265. spin_lock_irqsave(&fmdev->resp_skb_lock, flags);
  266. fmdev->resp_skb = skb;
  267. spin_unlock_irqrestore(&fmdev->resp_skb_lock, flags);
  268. complete(fmdev->resp_comp);
  269. fmdev->resp_comp = NULL;
  270. atomic_set(&fmdev->tx_cnt, 1);
  271. }
  272. /* Is this for interrupt handler? */
  273. else if (evt_hdr->op == fmdev->pre_op && fmdev->resp_comp == NULL) {
  274. if (fmdev->resp_skb != NULL)
  275. fmerr("Response SKB ptr not NULL\n");
  276. spin_lock_irqsave(&fmdev->resp_skb_lock, flags);
  277. fmdev->resp_skb = skb;
  278. spin_unlock_irqrestore(&fmdev->resp_skb_lock, flags);
  279. /* Execute interrupt handler where state index points */
  280. irq_info->handlers[irq_info->stage](fmdev);
  281. kfree_skb(skb);
  282. atomic_set(&fmdev->tx_cnt, 1);
  283. } else {
  284. fmerr("Nobody claimed SKB(%p),purging\n", skb);
  285. }
  286. /*
  287. * Check flow control field. If Num_FM_HCI_Commands field is
  288. * not zero, schedule FM TX tasklet.
  289. */
  290. if (num_fm_hci_cmds && atomic_read(&fmdev->tx_cnt))
  291. if (!skb_queue_empty(&fmdev->tx_q))
  292. tasklet_schedule(&fmdev->tx_task);
  293. }
  294. }
  295. /* FM send tasklet: is scheduled when FM packet has to be sent to chip */
  296. static void send_tasklet(struct tasklet_struct *t)
  297. {
  298. struct fmdev *fmdev;
  299. struct sk_buff *skb;
  300. int len;
  301. fmdev = from_tasklet(fmdev, t, tx_task);
  302. if (!atomic_read(&fmdev->tx_cnt))
  303. return;
  304. /* Check, is there any timeout happened to last transmitted packet */
  305. if (time_is_before_jiffies(fmdev->last_tx_jiffies + FM_DRV_TX_TIMEOUT)) {
  306. fmerr("TX timeout occurred\n");
  307. atomic_set(&fmdev->tx_cnt, 1);
  308. }
  309. /* Send queued FM TX packets */
  310. skb = skb_dequeue(&fmdev->tx_q);
  311. if (!skb)
  312. return;
  313. atomic_dec(&fmdev->tx_cnt);
  314. fmdev->pre_op = fm_cb(skb)->fm_op;
  315. if (fmdev->resp_comp != NULL)
  316. fmerr("Response completion handler is not NULL\n");
  317. fmdev->resp_comp = fm_cb(skb)->completion;
  318. /* Write FM packet to ST driver */
  319. len = g_st_write(skb);
  320. if (len < 0) {
  321. kfree_skb(skb);
  322. fmdev->resp_comp = NULL;
  323. fmerr("TX tasklet failed to send skb(%p)\n", skb);
  324. atomic_set(&fmdev->tx_cnt, 1);
  325. } else {
  326. fmdev->last_tx_jiffies = jiffies;
  327. }
  328. }
  329. /*
  330. * Queues FM Channel-8 packet to FM TX queue and schedules FM TX tasklet for
  331. * transmission
  332. */
  333. static int fm_send_cmd(struct fmdev *fmdev, u8 fm_op, u16 type, void *payload,
  334. int payload_len, struct completion *wait_completion)
  335. {
  336. struct sk_buff *skb;
  337. struct fm_cmd_msg_hdr *hdr;
  338. int size;
  339. if (fm_op >= FM_INTERRUPT) {
  340. fmerr("Invalid fm opcode - %d\n", fm_op);
  341. return -EINVAL;
  342. }
  343. if (test_bit(FM_FW_DW_INPROGRESS, &fmdev->flag) && payload == NULL) {
  344. fmerr("Payload data is NULL during fw download\n");
  345. return -EINVAL;
  346. }
  347. if (!test_bit(FM_FW_DW_INPROGRESS, &fmdev->flag))
  348. size =
  349. FM_CMD_MSG_HDR_SIZE + ((payload == NULL) ? 0 : payload_len);
  350. else
  351. size = payload_len;
  352. skb = alloc_skb(size, GFP_ATOMIC);
  353. if (!skb) {
  354. fmerr("No memory to create new SKB\n");
  355. return -ENOMEM;
  356. }
  357. /*
  358. * Don't fill FM header info for the commands which come from
  359. * FM firmware file.
  360. */
  361. if (!test_bit(FM_FW_DW_INPROGRESS, &fmdev->flag) ||
  362. test_bit(FM_INTTASK_RUNNING, &fmdev->flag)) {
  363. /* Fill command header info */
  364. hdr = skb_put(skb, FM_CMD_MSG_HDR_SIZE);
  365. hdr->hdr = FM_PKT_LOGICAL_CHAN_NUMBER; /* 0x08 */
  366. /* 3 (fm_opcode,rd_wr,dlen) + payload len) */
  367. hdr->len = ((payload == NULL) ? 0 : payload_len) + 3;
  368. /* FM opcode */
  369. hdr->op = fm_op;
  370. /* read/write type */
  371. hdr->rd_wr = type;
  372. hdr->dlen = payload_len;
  373. fm_cb(skb)->fm_op = fm_op;
  374. /*
  375. * If firmware download has finished and the command is
  376. * not a read command then payload is != NULL - a write
  377. * command with u16 payload - convert to be16
  378. */
  379. if (payload != NULL)
  380. *(__be16 *)payload = cpu_to_be16(*(u16 *)payload);
  381. } else if (payload != NULL) {
  382. fm_cb(skb)->fm_op = *((u8 *)payload + 2);
  383. }
  384. if (payload != NULL)
  385. skb_put_data(skb, payload, payload_len);
  386. fm_cb(skb)->completion = wait_completion;
  387. skb_queue_tail(&fmdev->tx_q, skb);
  388. tasklet_schedule(&fmdev->tx_task);
  389. return 0;
  390. }
  391. /* Sends FM Channel-8 command to the chip and waits for the response */
  392. int fmc_send_cmd(struct fmdev *fmdev, u8 fm_op, u16 type, void *payload,
  393. unsigned int payload_len, void *response, int *response_len)
  394. {
  395. struct sk_buff *skb;
  396. struct fm_event_msg_hdr *evt_hdr;
  397. unsigned long flags;
  398. int ret;
  399. init_completion(&fmdev->maintask_comp);
  400. ret = fm_send_cmd(fmdev, fm_op, type, payload, payload_len,
  401. &fmdev->maintask_comp);
  402. if (ret)
  403. return ret;
  404. if (!wait_for_completion_timeout(&fmdev->maintask_comp,
  405. FM_DRV_TX_TIMEOUT)) {
  406. fmerr("Timeout(%d sec),didn't get regcompletion signal from RX tasklet\n",
  407. jiffies_to_msecs(FM_DRV_TX_TIMEOUT) / 1000);
  408. return -ETIMEDOUT;
  409. }
  410. if (!fmdev->resp_skb) {
  411. fmerr("Response SKB is missing\n");
  412. return -EFAULT;
  413. }
  414. spin_lock_irqsave(&fmdev->resp_skb_lock, flags);
  415. skb = fmdev->resp_skb;
  416. fmdev->resp_skb = NULL;
  417. spin_unlock_irqrestore(&fmdev->resp_skb_lock, flags);
  418. evt_hdr = (void *)skb->data;
  419. if (evt_hdr->status != 0) {
  420. fmerr("Received event pkt status(%d) is not zero\n",
  421. evt_hdr->status);
  422. kfree_skb(skb);
  423. return -EIO;
  424. }
  425. /* Send response data to caller */
  426. if (response != NULL && response_len != NULL && evt_hdr->dlen &&
  427. evt_hdr->dlen <= payload_len) {
  428. /* Skip header info and copy only response data */
  429. skb_pull(skb, sizeof(struct fm_event_msg_hdr));
  430. memcpy(response, skb->data, evt_hdr->dlen);
  431. *response_len = evt_hdr->dlen;
  432. } else if (response_len != NULL && evt_hdr->dlen == 0) {
  433. *response_len = 0;
  434. }
  435. kfree_skb(skb);
  436. return 0;
  437. }
  438. /* --- Helper functions used in FM interrupt handlers ---*/
  439. static inline int check_cmdresp_status(struct fmdev *fmdev,
  440. struct sk_buff **skb)
  441. {
  442. struct fm_event_msg_hdr *fm_evt_hdr;
  443. unsigned long flags;
  444. del_timer(&fmdev->irq_info.timer);
  445. spin_lock_irqsave(&fmdev->resp_skb_lock, flags);
  446. *skb = fmdev->resp_skb;
  447. fmdev->resp_skb = NULL;
  448. spin_unlock_irqrestore(&fmdev->resp_skb_lock, flags);
  449. fm_evt_hdr = (void *)(*skb)->data;
  450. if (fm_evt_hdr->status != 0) {
  451. fmerr("irq: opcode %x response status is not zero Initiating irq recovery process\n",
  452. fm_evt_hdr->op);
  453. mod_timer(&fmdev->irq_info.timer, jiffies + FM_DRV_TX_TIMEOUT);
  454. return -1;
  455. }
  456. return 0;
  457. }
  458. static inline void fm_irq_common_cmd_resp_helper(struct fmdev *fmdev, u8 stage)
  459. {
  460. struct sk_buff *skb;
  461. if (!check_cmdresp_status(fmdev, &skb))
  462. fm_irq_call_stage(fmdev, stage);
  463. }
  464. /*
  465. * Interrupt process timeout handler.
  466. * One of the irq handler did not get proper response from the chip. So take
  467. * recovery action here. FM interrupts are disabled in the beginning of
  468. * interrupt process. Therefore reset stage index to re-enable default
  469. * interrupts. So that next interrupt will be processed as usual.
  470. */
  471. static void int_timeout_handler(struct timer_list *t)
  472. {
  473. struct fmdev *fmdev;
  474. struct fm_irq *fmirq;
  475. fmdbg("irq: timeout,trying to re-enable fm interrupts\n");
  476. fmdev = from_timer(fmdev, t, irq_info.timer);
  477. fmirq = &fmdev->irq_info;
  478. fmirq->retry++;
  479. if (fmirq->retry > FM_IRQ_TIMEOUT_RETRY_MAX) {
  480. /* Stop recovery action (interrupt reenable process) and
  481. * reset stage index & retry count values */
  482. fmirq->stage = 0;
  483. fmirq->retry = 0;
  484. fmerr("Recovery action failed duringirq processing, max retry reached\n");
  485. return;
  486. }
  487. fm_irq_call_stage(fmdev, FM_SEND_INTMSK_CMD_IDX);
  488. }
  489. /* --------- FM interrupt handlers ------------*/
  490. static void fm_irq_send_flag_getcmd(struct fmdev *fmdev)
  491. {
  492. u16 flag;
  493. /* Send FLAG_GET command , to know the source of interrupt */
  494. if (!fm_send_cmd(fmdev, FLAG_GET, REG_RD, NULL, sizeof(flag), NULL))
  495. fm_irq_timeout_stage(fmdev, FM_HANDLE_FLAG_GETCMD_RESP_IDX);
  496. }
  497. static void fm_irq_handle_flag_getcmd_resp(struct fmdev *fmdev)
  498. {
  499. struct sk_buff *skb;
  500. struct fm_event_msg_hdr *fm_evt_hdr;
  501. if (check_cmdresp_status(fmdev, &skb))
  502. return;
  503. fm_evt_hdr = (void *)skb->data;
  504. if (fm_evt_hdr->dlen > sizeof(fmdev->irq_info.flag))
  505. return;
  506. /* Skip header info and copy only response data */
  507. skb_pull(skb, sizeof(struct fm_event_msg_hdr));
  508. memcpy(&fmdev->irq_info.flag, skb->data, fm_evt_hdr->dlen);
  509. fmdev->irq_info.flag = be16_to_cpu((__force __be16)fmdev->irq_info.flag);
  510. fmdbg("irq: flag register(0x%x)\n", fmdev->irq_info.flag);
  511. /* Continue next function in interrupt handler table */
  512. fm_irq_call_stage(fmdev, FM_HW_MAL_FUNC_IDX);
  513. }
  514. static void fm_irq_handle_hw_malfunction(struct fmdev *fmdev)
  515. {
  516. if (fmdev->irq_info.flag & FM_MAL_EVENT & fmdev->irq_info.mask)
  517. fmerr("irq: HW MAL int received - do nothing\n");
  518. /* Continue next function in interrupt handler table */
  519. fm_irq_call_stage(fmdev, FM_RDS_START_IDX);
  520. }
  521. static void fm_irq_handle_rds_start(struct fmdev *fmdev)
  522. {
  523. if (fmdev->irq_info.flag & FM_RDS_EVENT & fmdev->irq_info.mask) {
  524. fmdbg("irq: rds threshold reached\n");
  525. fmdev->irq_info.stage = FM_RDS_SEND_RDS_GETCMD_IDX;
  526. } else {
  527. /* Continue next function in interrupt handler table */
  528. fmdev->irq_info.stage = FM_HW_TUNE_OP_ENDED_IDX;
  529. }
  530. fm_irq_call(fmdev);
  531. }
  532. static void fm_irq_send_rdsdata_getcmd(struct fmdev *fmdev)
  533. {
  534. /* Send the command to read RDS data from the chip */
  535. if (!fm_send_cmd(fmdev, RDS_DATA_GET, REG_RD, NULL,
  536. (FM_RX_RDS_FIFO_THRESHOLD * 3), NULL))
  537. fm_irq_timeout_stage(fmdev, FM_RDS_HANDLE_RDS_GETCMD_RESP_IDX);
  538. }
  539. /* Keeps track of current RX channel AF (Alternate Frequency) */
  540. static void fm_rx_update_af_cache(struct fmdev *fmdev, u8 af)
  541. {
  542. struct tuned_station_info *stat_info = &fmdev->rx.stat_info;
  543. u8 reg_idx = fmdev->rx.region.fm_band;
  544. u8 index;
  545. u32 freq;
  546. /* First AF indicates the number of AF follows. Reset the list */
  547. if ((af >= FM_RDS_1_AF_FOLLOWS) && (af <= FM_RDS_25_AF_FOLLOWS)) {
  548. fmdev->rx.stat_info.af_list_max = (af - FM_RDS_1_AF_FOLLOWS + 1);
  549. fmdev->rx.stat_info.afcache_size = 0;
  550. fmdbg("No of expected AF : %d\n", fmdev->rx.stat_info.af_list_max);
  551. return;
  552. }
  553. if (af < FM_RDS_MIN_AF)
  554. return;
  555. if (reg_idx == FM_BAND_EUROPE_US && af > FM_RDS_MAX_AF)
  556. return;
  557. if (reg_idx == FM_BAND_JAPAN && af > FM_RDS_MAX_AF_JAPAN)
  558. return;
  559. freq = fmdev->rx.region.bot_freq + (af * 100);
  560. if (freq == fmdev->rx.freq) {
  561. fmdbg("Current freq(%d) is matching with received AF(%d)\n",
  562. fmdev->rx.freq, freq);
  563. return;
  564. }
  565. /* Do check in AF cache */
  566. for (index = 0; index < stat_info->afcache_size; index++) {
  567. if (stat_info->af_cache[index] == freq)
  568. break;
  569. }
  570. /* Reached the limit of the list - ignore the next AF */
  571. if (index == stat_info->af_list_max) {
  572. fmdbg("AF cache is full\n");
  573. return;
  574. }
  575. /*
  576. * If we reached the end of the list then this AF is not
  577. * in the list - add it.
  578. */
  579. if (index == stat_info->afcache_size) {
  580. fmdbg("Storing AF %d to cache index %d\n", freq, index);
  581. stat_info->af_cache[index] = freq;
  582. stat_info->afcache_size++;
  583. }
  584. }
  585. /*
  586. * Converts RDS buffer data from big endian format
  587. * to little endian format.
  588. */
  589. static void fm_rdsparse_swapbytes(struct fmdev *fmdev,
  590. struct fm_rdsdata_format *rds_format)
  591. {
  592. u8 index = 0;
  593. u8 *rds_buff;
  594. /*
  595. * Since in Orca the 2 RDS Data bytes are in little endian and
  596. * in Dolphin they are in big endian, the parsing of the RDS data
  597. * is chip dependent
  598. */
  599. if (fmdev->asci_id != 0x6350) {
  600. rds_buff = &rds_format->data.groupdatabuff.buff[0];
  601. while (index + 1 < FM_RX_RDS_INFO_FIELD_MAX) {
  602. swap(rds_buff[index], rds_buff[index + 1]);
  603. index += 2;
  604. }
  605. }
  606. }
  607. static void fm_irq_handle_rdsdata_getcmd_resp(struct fmdev *fmdev)
  608. {
  609. struct sk_buff *skb;
  610. struct fm_rdsdata_format rds_fmt;
  611. struct fm_rds *rds = &fmdev->rx.rds;
  612. unsigned long group_idx, flags;
  613. u8 *rds_data, meta_data, tmpbuf[FM_RDS_BLK_SIZE];
  614. u8 type, blk_idx, idx;
  615. u16 cur_picode;
  616. u32 rds_len;
  617. if (check_cmdresp_status(fmdev, &skb))
  618. return;
  619. /* Skip header info */
  620. skb_pull(skb, sizeof(struct fm_event_msg_hdr));
  621. rds_data = skb->data;
  622. rds_len = skb->len;
  623. /* Parse the RDS data */
  624. while (rds_len >= FM_RDS_BLK_SIZE) {
  625. meta_data = rds_data[2];
  626. /* Get the type: 0=A, 1=B, 2=C, 3=C', 4=D, 5=E */
  627. type = (meta_data & 0x07);
  628. /* Transform the blk type into index sequence (0, 1, 2, 3, 4) */
  629. blk_idx = (type <= FM_RDS_BLOCK_C ? type : (type - 1));
  630. fmdbg("Block index:%d(%s)\n", blk_idx,
  631. (meta_data & FM_RDS_STATUS_ERR_MASK) ? "Bad" : "Ok");
  632. if ((meta_data & FM_RDS_STATUS_ERR_MASK) != 0)
  633. break;
  634. if (blk_idx > FM_RDS_BLK_IDX_D) {
  635. fmdbg("Block sequence mismatch\n");
  636. rds->last_blk_idx = -1;
  637. break;
  638. }
  639. /* Skip checkword (control) byte and copy only data byte */
  640. idx = array_index_nospec(blk_idx * (FM_RDS_BLK_SIZE - 1),
  641. FM_RX_RDS_INFO_FIELD_MAX - (FM_RDS_BLK_SIZE - 1));
  642. memcpy(&rds_fmt.data.groupdatabuff.buff[idx], rds_data,
  643. FM_RDS_BLK_SIZE - 1);
  644. rds->last_blk_idx = blk_idx;
  645. /* If completed a whole group then handle it */
  646. if (blk_idx == FM_RDS_BLK_IDX_D) {
  647. fmdbg("Good block received\n");
  648. fm_rdsparse_swapbytes(fmdev, &rds_fmt);
  649. /*
  650. * Extract PI code and store in local cache.
  651. * We need this during AF switch processing.
  652. */
  653. cur_picode = be16_to_cpu((__force __be16)rds_fmt.data.groupgeneral.pidata);
  654. if (fmdev->rx.stat_info.picode != cur_picode)
  655. fmdev->rx.stat_info.picode = cur_picode;
  656. fmdbg("picode:%d\n", cur_picode);
  657. group_idx = (rds_fmt.data.groupgeneral.blk_b[0] >> 3);
  658. fmdbg("(fmdrv):Group:%ld%s\n", group_idx/2,
  659. (group_idx % 2) ? "B" : "A");
  660. group_idx = 1 << (rds_fmt.data.groupgeneral.blk_b[0] >> 3);
  661. if (group_idx == FM_RDS_GROUP_TYPE_MASK_0A) {
  662. fm_rx_update_af_cache(fmdev, rds_fmt.data.group0A.af[0]);
  663. fm_rx_update_af_cache(fmdev, rds_fmt.data.group0A.af[1]);
  664. }
  665. }
  666. rds_len -= FM_RDS_BLK_SIZE;
  667. rds_data += FM_RDS_BLK_SIZE;
  668. }
  669. /* Copy raw rds data to internal rds buffer */
  670. rds_data = skb->data;
  671. rds_len = skb->len;
  672. spin_lock_irqsave(&fmdev->rds_buff_lock, flags);
  673. while (rds_len > 0) {
  674. /*
  675. * Fill RDS buffer as per V4L2 specification.
  676. * Store control byte
  677. */
  678. type = (rds_data[2] & 0x07);
  679. blk_idx = (type <= FM_RDS_BLOCK_C ? type : (type - 1));
  680. tmpbuf[2] = blk_idx; /* Offset name */
  681. tmpbuf[2] |= blk_idx << 3; /* Received offset */
  682. /* Store data byte */
  683. tmpbuf[0] = rds_data[0];
  684. tmpbuf[1] = rds_data[1];
  685. memcpy(&rds->buff[rds->wr_idx], &tmpbuf, FM_RDS_BLK_SIZE);
  686. rds->wr_idx = (rds->wr_idx + FM_RDS_BLK_SIZE) % rds->buf_size;
  687. /* Check for overflow & start over */
  688. if (rds->wr_idx == rds->rd_idx) {
  689. fmdbg("RDS buffer overflow\n");
  690. rds->wr_idx = 0;
  691. rds->rd_idx = 0;
  692. break;
  693. }
  694. rds_len -= FM_RDS_BLK_SIZE;
  695. rds_data += FM_RDS_BLK_SIZE;
  696. }
  697. spin_unlock_irqrestore(&fmdev->rds_buff_lock, flags);
  698. /* Wakeup read queue */
  699. if (rds->wr_idx != rds->rd_idx)
  700. wake_up_interruptible(&rds->read_queue);
  701. fm_irq_call_stage(fmdev, FM_RDS_FINISH_IDX);
  702. }
  703. static void fm_irq_handle_rds_finish(struct fmdev *fmdev)
  704. {
  705. fm_irq_call_stage(fmdev, FM_HW_TUNE_OP_ENDED_IDX);
  706. }
  707. static void fm_irq_handle_tune_op_ended(struct fmdev *fmdev)
  708. {
  709. if (fmdev->irq_info.flag & (FM_FR_EVENT | FM_BL_EVENT) & fmdev->
  710. irq_info.mask) {
  711. fmdbg("irq: tune ended/bandlimit reached\n");
  712. if (test_and_clear_bit(FM_AF_SWITCH_INPROGRESS, &fmdev->flag)) {
  713. fmdev->irq_info.stage = FM_AF_JUMP_RD_FREQ_IDX;
  714. } else {
  715. complete(&fmdev->maintask_comp);
  716. fmdev->irq_info.stage = FM_HW_POWER_ENB_IDX;
  717. }
  718. } else
  719. fmdev->irq_info.stage = FM_HW_POWER_ENB_IDX;
  720. fm_irq_call(fmdev);
  721. }
  722. static void fm_irq_handle_power_enb(struct fmdev *fmdev)
  723. {
  724. if (fmdev->irq_info.flag & FM_POW_ENB_EVENT) {
  725. fmdbg("irq: Power Enabled/Disabled\n");
  726. complete(&fmdev->maintask_comp);
  727. }
  728. fm_irq_call_stage(fmdev, FM_LOW_RSSI_START_IDX);
  729. }
  730. static void fm_irq_handle_low_rssi_start(struct fmdev *fmdev)
  731. {
  732. if ((fmdev->rx.af_mode == FM_RX_RDS_AF_SWITCH_MODE_ON) &&
  733. (fmdev->irq_info.flag & FM_LEV_EVENT & fmdev->irq_info.mask) &&
  734. (fmdev->rx.freq != FM_UNDEFINED_FREQ) &&
  735. (fmdev->rx.stat_info.afcache_size != 0)) {
  736. fmdbg("irq: rssi level has fallen below threshold level\n");
  737. /* Disable further low RSSI interrupts */
  738. fmdev->irq_info.mask &= ~FM_LEV_EVENT;
  739. fmdev->rx.afjump_idx = 0;
  740. fmdev->rx.freq_before_jump = fmdev->rx.freq;
  741. fmdev->irq_info.stage = FM_AF_JUMP_SETPI_IDX;
  742. } else {
  743. /* Continue next function in interrupt handler table */
  744. fmdev->irq_info.stage = FM_SEND_INTMSK_CMD_IDX;
  745. }
  746. fm_irq_call(fmdev);
  747. }
  748. static void fm_irq_afjump_set_pi(struct fmdev *fmdev)
  749. {
  750. u16 payload;
  751. /* Set PI code - must be updated if the AF list is not empty */
  752. payload = fmdev->rx.stat_info.picode;
  753. if (!fm_send_cmd(fmdev, RDS_PI_SET, REG_WR, &payload, sizeof(payload), NULL))
  754. fm_irq_timeout_stage(fmdev, FM_AF_JUMP_HANDLE_SETPI_RESP_IDX);
  755. }
  756. static void fm_irq_handle_set_pi_resp(struct fmdev *fmdev)
  757. {
  758. fm_irq_common_cmd_resp_helper(fmdev, FM_AF_JUMP_SETPI_MASK_IDX);
  759. }
  760. /*
  761. * Set PI mask.
  762. * 0xFFFF = Enable PI code matching
  763. * 0x0000 = Disable PI code matching
  764. */
  765. static void fm_irq_afjump_set_pimask(struct fmdev *fmdev)
  766. {
  767. u16 payload;
  768. payload = 0x0000;
  769. if (!fm_send_cmd(fmdev, RDS_PI_MASK_SET, REG_WR, &payload, sizeof(payload), NULL))
  770. fm_irq_timeout_stage(fmdev, FM_AF_JUMP_HANDLE_SETPI_MASK_RESP_IDX);
  771. }
  772. static void fm_irq_handle_set_pimask_resp(struct fmdev *fmdev)
  773. {
  774. fm_irq_common_cmd_resp_helper(fmdev, FM_AF_JUMP_SET_AF_FREQ_IDX);
  775. }
  776. static void fm_irq_afjump_setfreq(struct fmdev *fmdev)
  777. {
  778. u16 frq_index;
  779. u16 payload;
  780. fmdbg("Switch to %d KHz\n", fmdev->rx.stat_info.af_cache[fmdev->rx.afjump_idx]);
  781. frq_index = (fmdev->rx.stat_info.af_cache[fmdev->rx.afjump_idx] -
  782. fmdev->rx.region.bot_freq) / FM_FREQ_MUL;
  783. payload = frq_index;
  784. if (!fm_send_cmd(fmdev, AF_FREQ_SET, REG_WR, &payload, sizeof(payload), NULL))
  785. fm_irq_timeout_stage(fmdev, FM_AF_JUMP_HANDLE_SET_AFFREQ_RESP_IDX);
  786. }
  787. static void fm_irq_handle_setfreq_resp(struct fmdev *fmdev)
  788. {
  789. fm_irq_common_cmd_resp_helper(fmdev, FM_AF_JUMP_ENABLE_INT_IDX);
  790. }
  791. static void fm_irq_afjump_enableint(struct fmdev *fmdev)
  792. {
  793. u16 payload;
  794. /* Enable FR (tuning operation ended) interrupt */
  795. payload = FM_FR_EVENT;
  796. if (!fm_send_cmd(fmdev, INT_MASK_SET, REG_WR, &payload, sizeof(payload), NULL))
  797. fm_irq_timeout_stage(fmdev, FM_AF_JUMP_ENABLE_INT_RESP_IDX);
  798. }
  799. static void fm_irq_afjump_enableint_resp(struct fmdev *fmdev)
  800. {
  801. fm_irq_common_cmd_resp_helper(fmdev, FM_AF_JUMP_START_AFJUMP_IDX);
  802. }
  803. static void fm_irq_start_afjump(struct fmdev *fmdev)
  804. {
  805. u16 payload;
  806. payload = FM_TUNER_AF_JUMP_MODE;
  807. if (!fm_send_cmd(fmdev, TUNER_MODE_SET, REG_WR, &payload,
  808. sizeof(payload), NULL))
  809. fm_irq_timeout_stage(fmdev, FM_AF_JUMP_HANDLE_START_AFJUMP_RESP_IDX);
  810. }
  811. static void fm_irq_handle_start_afjump_resp(struct fmdev *fmdev)
  812. {
  813. struct sk_buff *skb;
  814. if (check_cmdresp_status(fmdev, &skb))
  815. return;
  816. fmdev->irq_info.stage = FM_SEND_FLAG_GETCMD_IDX;
  817. set_bit(FM_AF_SWITCH_INPROGRESS, &fmdev->flag);
  818. clear_bit(FM_INTTASK_RUNNING, &fmdev->flag);
  819. }
  820. static void fm_irq_afjump_rd_freq(struct fmdev *fmdev)
  821. {
  822. u16 payload;
  823. if (!fm_send_cmd(fmdev, FREQ_SET, REG_RD, NULL, sizeof(payload), NULL))
  824. fm_irq_timeout_stage(fmdev, FM_AF_JUMP_RD_FREQ_RESP_IDX);
  825. }
  826. static void fm_irq_afjump_rd_freq_resp(struct fmdev *fmdev)
  827. {
  828. struct sk_buff *skb;
  829. u16 read_freq;
  830. u32 curr_freq, jumped_freq;
  831. if (check_cmdresp_status(fmdev, &skb))
  832. return;
  833. /* Skip header info and copy only response data */
  834. skb_pull(skb, sizeof(struct fm_event_msg_hdr));
  835. memcpy(&read_freq, skb->data, sizeof(read_freq));
  836. read_freq = be16_to_cpu((__force __be16)read_freq);
  837. curr_freq = fmdev->rx.region.bot_freq + ((u32)read_freq * FM_FREQ_MUL);
  838. jumped_freq = fmdev->rx.stat_info.af_cache[fmdev->rx.afjump_idx];
  839. /* If the frequency was changed the jump succeeded */
  840. if ((curr_freq != fmdev->rx.freq_before_jump) && (curr_freq == jumped_freq)) {
  841. fmdbg("Successfully switched to alternate freq %d\n", curr_freq);
  842. fmdev->rx.freq = curr_freq;
  843. fm_rx_reset_rds_cache(fmdev);
  844. /* AF feature is on, enable low level RSSI interrupt */
  845. if (fmdev->rx.af_mode == FM_RX_RDS_AF_SWITCH_MODE_ON)
  846. fmdev->irq_info.mask |= FM_LEV_EVENT;
  847. fmdev->irq_info.stage = FM_LOW_RSSI_FINISH_IDX;
  848. } else { /* jump to the next freq in the AF list */
  849. fmdev->rx.afjump_idx++;
  850. /* If we reached the end of the list - stop searching */
  851. if (fmdev->rx.afjump_idx >= fmdev->rx.stat_info.afcache_size) {
  852. fmdbg("AF switch processing failed\n");
  853. fmdev->irq_info.stage = FM_LOW_RSSI_FINISH_IDX;
  854. } else { /* AF List is not over - try next one */
  855. fmdbg("Trying next freq in AF cache\n");
  856. fmdev->irq_info.stage = FM_AF_JUMP_SETPI_IDX;
  857. }
  858. }
  859. fm_irq_call(fmdev);
  860. }
  861. static void fm_irq_handle_low_rssi_finish(struct fmdev *fmdev)
  862. {
  863. fm_irq_call_stage(fmdev, FM_SEND_INTMSK_CMD_IDX);
  864. }
  865. static void fm_irq_send_intmsk_cmd(struct fmdev *fmdev)
  866. {
  867. u16 payload;
  868. /* Re-enable FM interrupts */
  869. payload = fmdev->irq_info.mask;
  870. if (!fm_send_cmd(fmdev, INT_MASK_SET, REG_WR, &payload,
  871. sizeof(payload), NULL))
  872. fm_irq_timeout_stage(fmdev, FM_HANDLE_INTMSK_CMD_RESP_IDX);
  873. }
  874. static void fm_irq_handle_intmsk_cmd_resp(struct fmdev *fmdev)
  875. {
  876. struct sk_buff *skb;
  877. if (check_cmdresp_status(fmdev, &skb))
  878. return;
  879. /*
  880. * This is last function in interrupt table to be executed.
  881. * So, reset stage index to 0.
  882. */
  883. fmdev->irq_info.stage = FM_SEND_FLAG_GETCMD_IDX;
  884. /* Start processing any pending interrupt */
  885. if (test_and_clear_bit(FM_INTTASK_SCHEDULE_PENDING, &fmdev->flag))
  886. fmdev->irq_info.handlers[fmdev->irq_info.stage](fmdev);
  887. else
  888. clear_bit(FM_INTTASK_RUNNING, &fmdev->flag);
  889. }
  890. /* Returns availability of RDS data in internal buffer */
  891. int fmc_is_rds_data_available(struct fmdev *fmdev, struct file *file,
  892. struct poll_table_struct *pts)
  893. {
  894. poll_wait(file, &fmdev->rx.rds.read_queue, pts);
  895. if (fmdev->rx.rds.rd_idx != fmdev->rx.rds.wr_idx)
  896. return 0;
  897. return -EAGAIN;
  898. }
  899. /* Copies RDS data from internal buffer to user buffer */
  900. int fmc_transfer_rds_from_internal_buff(struct fmdev *fmdev, struct file *file,
  901. u8 __user *buf, size_t count)
  902. {
  903. u32 block_count;
  904. u8 tmpbuf[FM_RDS_BLK_SIZE];
  905. unsigned long flags;
  906. int ret;
  907. if (fmdev->rx.rds.wr_idx == fmdev->rx.rds.rd_idx) {
  908. if (file->f_flags & O_NONBLOCK)
  909. return -EWOULDBLOCK;
  910. ret = wait_event_interruptible(fmdev->rx.rds.read_queue,
  911. (fmdev->rx.rds.wr_idx != fmdev->rx.rds.rd_idx));
  912. if (ret)
  913. return -EINTR;
  914. }
  915. /* Calculate block count from byte count */
  916. count /= FM_RDS_BLK_SIZE;
  917. block_count = 0;
  918. ret = 0;
  919. while (block_count < count) {
  920. spin_lock_irqsave(&fmdev->rds_buff_lock, flags);
  921. if (fmdev->rx.rds.wr_idx == fmdev->rx.rds.rd_idx) {
  922. spin_unlock_irqrestore(&fmdev->rds_buff_lock, flags);
  923. break;
  924. }
  925. memcpy(tmpbuf, &fmdev->rx.rds.buff[fmdev->rx.rds.rd_idx],
  926. FM_RDS_BLK_SIZE);
  927. fmdev->rx.rds.rd_idx += FM_RDS_BLK_SIZE;
  928. if (fmdev->rx.rds.rd_idx >= fmdev->rx.rds.buf_size)
  929. fmdev->rx.rds.rd_idx = 0;
  930. spin_unlock_irqrestore(&fmdev->rds_buff_lock, flags);
  931. if (copy_to_user(buf, tmpbuf, FM_RDS_BLK_SIZE))
  932. break;
  933. block_count++;
  934. buf += FM_RDS_BLK_SIZE;
  935. ret += FM_RDS_BLK_SIZE;
  936. }
  937. return ret;
  938. }
  939. int fmc_set_freq(struct fmdev *fmdev, u32 freq_to_set)
  940. {
  941. switch (fmdev->curr_fmmode) {
  942. case FM_MODE_RX:
  943. return fm_rx_set_freq(fmdev, freq_to_set);
  944. case FM_MODE_TX:
  945. return fm_tx_set_freq(fmdev, freq_to_set);
  946. default:
  947. return -EINVAL;
  948. }
  949. }
  950. int fmc_get_freq(struct fmdev *fmdev, u32 *cur_tuned_frq)
  951. {
  952. if (fmdev->rx.freq == FM_UNDEFINED_FREQ) {
  953. fmerr("RX frequency is not set\n");
  954. return -EPERM;
  955. }
  956. if (cur_tuned_frq == NULL) {
  957. fmerr("Invalid memory\n");
  958. return -ENOMEM;
  959. }
  960. switch (fmdev->curr_fmmode) {
  961. case FM_MODE_RX:
  962. *cur_tuned_frq = fmdev->rx.freq;
  963. return 0;
  964. case FM_MODE_TX:
  965. *cur_tuned_frq = 0; /* TODO : Change this later */
  966. return 0;
  967. default:
  968. return -EINVAL;
  969. }
  970. }
  971. int fmc_set_region(struct fmdev *fmdev, u8 region_to_set)
  972. {
  973. switch (fmdev->curr_fmmode) {
  974. case FM_MODE_RX:
  975. return fm_rx_set_region(fmdev, region_to_set);
  976. case FM_MODE_TX:
  977. return fm_tx_set_region(fmdev, region_to_set);
  978. default:
  979. return -EINVAL;
  980. }
  981. }
  982. int fmc_set_mute_mode(struct fmdev *fmdev, u8 mute_mode_toset)
  983. {
  984. switch (fmdev->curr_fmmode) {
  985. case FM_MODE_RX:
  986. return fm_rx_set_mute_mode(fmdev, mute_mode_toset);
  987. case FM_MODE_TX:
  988. return fm_tx_set_mute_mode(fmdev, mute_mode_toset);
  989. default:
  990. return -EINVAL;
  991. }
  992. }
  993. int fmc_set_stereo_mono(struct fmdev *fmdev, u16 mode)
  994. {
  995. switch (fmdev->curr_fmmode) {
  996. case FM_MODE_RX:
  997. return fm_rx_set_stereo_mono(fmdev, mode);
  998. case FM_MODE_TX:
  999. return fm_tx_set_stereo_mono(fmdev, mode);
  1000. default:
  1001. return -EINVAL;
  1002. }
  1003. }
  1004. int fmc_set_rds_mode(struct fmdev *fmdev, u8 rds_en_dis)
  1005. {
  1006. switch (fmdev->curr_fmmode) {
  1007. case FM_MODE_RX:
  1008. return fm_rx_set_rds_mode(fmdev, rds_en_dis);
  1009. case FM_MODE_TX:
  1010. return fm_tx_set_rds_mode(fmdev, rds_en_dis);
  1011. default:
  1012. return -EINVAL;
  1013. }
  1014. }
  1015. /* Sends power off command to the chip */
  1016. static int fm_power_down(struct fmdev *fmdev)
  1017. {
  1018. u16 payload;
  1019. int ret;
  1020. if (!test_bit(FM_CORE_READY, &fmdev->flag)) {
  1021. fmerr("FM core is not ready\n");
  1022. return -EPERM;
  1023. }
  1024. if (fmdev->curr_fmmode == FM_MODE_OFF) {
  1025. fmdbg("FM chip is already in OFF state\n");
  1026. return 0;
  1027. }
  1028. payload = 0x0;
  1029. ret = fmc_send_cmd(fmdev, FM_POWER_MODE, REG_WR, &payload,
  1030. sizeof(payload), NULL, NULL);
  1031. if (ret < 0)
  1032. return ret;
  1033. return fmc_release(fmdev);
  1034. }
  1035. /* Reads init command from FM firmware file and loads to the chip */
  1036. static int fm_download_firmware(struct fmdev *fmdev, const u8 *fw_name)
  1037. {
  1038. const struct firmware *fw_entry;
  1039. struct bts_header *fw_header;
  1040. struct bts_action *action;
  1041. struct bts_action_delay *delay;
  1042. u8 *fw_data;
  1043. int ret, fw_len, cmd_cnt;
  1044. cmd_cnt = 0;
  1045. set_bit(FM_FW_DW_INPROGRESS, &fmdev->flag);
  1046. ret = request_firmware(&fw_entry, fw_name,
  1047. &fmdev->radio_dev->dev);
  1048. if (ret < 0) {
  1049. fmerr("Unable to read firmware(%s) content\n", fw_name);
  1050. return ret;
  1051. }
  1052. fmdbg("Firmware(%s) length : %zu bytes\n", fw_name, fw_entry->size);
  1053. fw_data = (void *)fw_entry->data;
  1054. fw_len = fw_entry->size;
  1055. fw_header = (struct bts_header *)fw_data;
  1056. if (fw_header->magic != FM_FW_FILE_HEADER_MAGIC) {
  1057. fmerr("%s not a legal TI firmware file\n", fw_name);
  1058. ret = -EINVAL;
  1059. goto rel_fw;
  1060. }
  1061. fmdbg("FW(%s) magic number : 0x%x\n", fw_name, fw_header->magic);
  1062. /* Skip file header info , we already verified it */
  1063. fw_data += sizeof(struct bts_header);
  1064. fw_len -= sizeof(struct bts_header);
  1065. while (fw_data && fw_len > 0) {
  1066. action = (struct bts_action *)fw_data;
  1067. switch (action->type) {
  1068. case ACTION_SEND_COMMAND: /* Send */
  1069. ret = fmc_send_cmd(fmdev, 0, 0, action->data,
  1070. action->size, NULL, NULL);
  1071. if (ret)
  1072. goto rel_fw;
  1073. cmd_cnt++;
  1074. break;
  1075. case ACTION_DELAY: /* Delay */
  1076. delay = (struct bts_action_delay *)action->data;
  1077. mdelay(delay->msec);
  1078. break;
  1079. }
  1080. fw_data += (sizeof(struct bts_action) + (action->size));
  1081. fw_len -= (sizeof(struct bts_action) + (action->size));
  1082. }
  1083. fmdbg("Firmware commands(%d) loaded to chip\n", cmd_cnt);
  1084. rel_fw:
  1085. release_firmware(fw_entry);
  1086. clear_bit(FM_FW_DW_INPROGRESS, &fmdev->flag);
  1087. return ret;
  1088. }
  1089. /* Loads default RX configuration to the chip */
  1090. static int load_default_rx_configuration(struct fmdev *fmdev)
  1091. {
  1092. int ret;
  1093. ret = fm_rx_set_volume(fmdev, FM_DEFAULT_RX_VOLUME);
  1094. if (ret < 0)
  1095. return ret;
  1096. return fm_rx_set_rssi_threshold(fmdev, FM_DEFAULT_RSSI_THRESHOLD);
  1097. }
  1098. /* Does FM power on sequence */
  1099. static int fm_power_up(struct fmdev *fmdev, u8 mode)
  1100. {
  1101. u16 payload;
  1102. __be16 asic_id = 0, asic_ver = 0;
  1103. int resp_len, ret;
  1104. u8 fw_name[50];
  1105. if (mode >= FM_MODE_ENTRY_MAX) {
  1106. fmerr("Invalid firmware download option\n");
  1107. return -EINVAL;
  1108. }
  1109. /*
  1110. * Initialize FM common module. FM GPIO toggling is
  1111. * taken care in Shared Transport driver.
  1112. */
  1113. ret = fmc_prepare(fmdev);
  1114. if (ret < 0) {
  1115. fmerr("Unable to prepare FM Common\n");
  1116. return ret;
  1117. }
  1118. payload = FM_ENABLE;
  1119. if (fmc_send_cmd(fmdev, FM_POWER_MODE, REG_WR, &payload,
  1120. sizeof(payload), NULL, NULL))
  1121. goto rel;
  1122. /* Allow the chip to settle down in Channel-8 mode */
  1123. msleep(20);
  1124. if (fmc_send_cmd(fmdev, ASIC_ID_GET, REG_RD, NULL,
  1125. sizeof(asic_id), &asic_id, &resp_len))
  1126. goto rel;
  1127. if (fmc_send_cmd(fmdev, ASIC_VER_GET, REG_RD, NULL,
  1128. sizeof(asic_ver), &asic_ver, &resp_len))
  1129. goto rel;
  1130. fmdbg("ASIC ID: 0x%x , ASIC Version: %d\n",
  1131. be16_to_cpu(asic_id), be16_to_cpu(asic_ver));
  1132. sprintf(fw_name, "%s_%x.%d.bts", FM_FMC_FW_FILE_START,
  1133. be16_to_cpu(asic_id), be16_to_cpu(asic_ver));
  1134. ret = fm_download_firmware(fmdev, fw_name);
  1135. if (ret < 0) {
  1136. fmdbg("Failed to download firmware file %s\n", fw_name);
  1137. goto rel;
  1138. }
  1139. sprintf(fw_name, "%s_%x.%d.bts", (mode == FM_MODE_RX) ?
  1140. FM_RX_FW_FILE_START : FM_TX_FW_FILE_START,
  1141. be16_to_cpu(asic_id), be16_to_cpu(asic_ver));
  1142. ret = fm_download_firmware(fmdev, fw_name);
  1143. if (ret < 0) {
  1144. fmdbg("Failed to download firmware file %s\n", fw_name);
  1145. goto rel;
  1146. } else
  1147. return ret;
  1148. rel:
  1149. return fmc_release(fmdev);
  1150. }
  1151. /* Set FM Modes(TX, RX, OFF) */
  1152. int fmc_set_mode(struct fmdev *fmdev, u8 fm_mode)
  1153. {
  1154. int ret = 0;
  1155. if (fm_mode >= FM_MODE_ENTRY_MAX) {
  1156. fmerr("Invalid FM mode\n");
  1157. return -EINVAL;
  1158. }
  1159. if (fmdev->curr_fmmode == fm_mode) {
  1160. fmdbg("Already fm is in mode(%d)\n", fm_mode);
  1161. return ret;
  1162. }
  1163. switch (fm_mode) {
  1164. case FM_MODE_OFF: /* OFF Mode */
  1165. ret = fm_power_down(fmdev);
  1166. if (ret < 0) {
  1167. fmerr("Failed to set OFF mode\n");
  1168. return ret;
  1169. }
  1170. break;
  1171. case FM_MODE_TX: /* TX Mode */
  1172. case FM_MODE_RX: /* RX Mode */
  1173. /* Power down before switching to TX or RX mode */
  1174. if (fmdev->curr_fmmode != FM_MODE_OFF) {
  1175. ret = fm_power_down(fmdev);
  1176. if (ret < 0) {
  1177. fmerr("Failed to set OFF mode\n");
  1178. return ret;
  1179. }
  1180. msleep(30);
  1181. }
  1182. ret = fm_power_up(fmdev, fm_mode);
  1183. if (ret < 0) {
  1184. fmerr("Failed to load firmware\n");
  1185. return ret;
  1186. }
  1187. }
  1188. fmdev->curr_fmmode = fm_mode;
  1189. /* Set default configuration */
  1190. if (fmdev->curr_fmmode == FM_MODE_RX) {
  1191. fmdbg("Loading default rx configuration..\n");
  1192. ret = load_default_rx_configuration(fmdev);
  1193. if (ret < 0)
  1194. fmerr("Failed to load default values\n");
  1195. }
  1196. return ret;
  1197. }
  1198. /* Returns current FM mode (TX, RX, OFF) */
  1199. int fmc_get_mode(struct fmdev *fmdev, u8 *fmmode)
  1200. {
  1201. if (!test_bit(FM_CORE_READY, &fmdev->flag)) {
  1202. fmerr("FM core is not ready\n");
  1203. return -EPERM;
  1204. }
  1205. if (fmmode == NULL) {
  1206. fmerr("Invalid memory\n");
  1207. return -ENOMEM;
  1208. }
  1209. *fmmode = fmdev->curr_fmmode;
  1210. return 0;
  1211. }
  1212. /* Called by ST layer when FM packet is available */
  1213. static long fm_st_receive(void *arg, struct sk_buff *skb)
  1214. {
  1215. struct fmdev *fmdev;
  1216. fmdev = (struct fmdev *)arg;
  1217. if (skb == NULL) {
  1218. fmerr("Invalid SKB received from ST\n");
  1219. return -EFAULT;
  1220. }
  1221. if (skb->cb[0] != FM_PKT_LOGICAL_CHAN_NUMBER) {
  1222. fmerr("Received SKB (%p) is not FM Channel 8 pkt\n", skb);
  1223. return -EINVAL;
  1224. }
  1225. memcpy(skb_push(skb, 1), &skb->cb[0], 1);
  1226. skb_queue_tail(&fmdev->rx_q, skb);
  1227. tasklet_schedule(&fmdev->rx_task);
  1228. return 0;
  1229. }
  1230. /*
  1231. * Called by ST layer to indicate protocol registration completion
  1232. * status.
  1233. */
  1234. static void fm_st_reg_comp_cb(void *arg, int data)
  1235. {
  1236. struct fmdev *fmdev;
  1237. fmdev = (struct fmdev *)arg;
  1238. fmdev->streg_cbdata = data;
  1239. complete(&wait_for_fmdrv_reg_comp);
  1240. }
  1241. /*
  1242. * This function will be called from FM V4L2 open function.
  1243. * Register with ST driver and initialize driver data.
  1244. */
  1245. int fmc_prepare(struct fmdev *fmdev)
  1246. {
  1247. static struct st_proto_s fm_st_proto;
  1248. int ret;
  1249. if (test_bit(FM_CORE_READY, &fmdev->flag)) {
  1250. fmdbg("FM Core is already up\n");
  1251. return 0;
  1252. }
  1253. memset(&fm_st_proto, 0, sizeof(fm_st_proto));
  1254. fm_st_proto.recv = fm_st_receive;
  1255. fm_st_proto.match_packet = NULL;
  1256. fm_st_proto.reg_complete_cb = fm_st_reg_comp_cb;
  1257. fm_st_proto.write = NULL; /* TI ST driver will fill write pointer */
  1258. fm_st_proto.priv_data = fmdev;
  1259. fm_st_proto.chnl_id = 0x08;
  1260. fm_st_proto.max_frame_size = 0xff;
  1261. fm_st_proto.hdr_len = 1;
  1262. fm_st_proto.offset_len_in_hdr = 0;
  1263. fm_st_proto.len_size = 1;
  1264. fm_st_proto.reserve = 1;
  1265. ret = st_register(&fm_st_proto);
  1266. if (ret == -EINPROGRESS) {
  1267. init_completion(&wait_for_fmdrv_reg_comp);
  1268. fmdev->streg_cbdata = -EINPROGRESS;
  1269. fmdbg("%s waiting for ST reg completion signal\n", __func__);
  1270. if (!wait_for_completion_timeout(&wait_for_fmdrv_reg_comp,
  1271. FM_ST_REG_TIMEOUT)) {
  1272. fmerr("Timeout(%d sec), didn't get reg completion signal from ST\n",
  1273. jiffies_to_msecs(FM_ST_REG_TIMEOUT) / 1000);
  1274. return -ETIMEDOUT;
  1275. }
  1276. if (fmdev->streg_cbdata != 0) {
  1277. fmerr("ST reg comp CB called with error status %d\n",
  1278. fmdev->streg_cbdata);
  1279. return -EAGAIN;
  1280. }
  1281. ret = 0;
  1282. } else if (ret < 0) {
  1283. fmerr("st_register failed %d\n", ret);
  1284. return -EAGAIN;
  1285. }
  1286. if (fm_st_proto.write != NULL) {
  1287. g_st_write = fm_st_proto.write;
  1288. } else {
  1289. fmerr("Failed to get ST write func pointer\n");
  1290. ret = st_unregister(&fm_st_proto);
  1291. if (ret < 0)
  1292. fmerr("st_unregister failed %d\n", ret);
  1293. return -EAGAIN;
  1294. }
  1295. spin_lock_init(&fmdev->rds_buff_lock);
  1296. spin_lock_init(&fmdev->resp_skb_lock);
  1297. /* Initialize TX queue and TX tasklet */
  1298. skb_queue_head_init(&fmdev->tx_q);
  1299. tasklet_setup(&fmdev->tx_task, send_tasklet);
  1300. /* Initialize RX Queue and RX tasklet */
  1301. skb_queue_head_init(&fmdev->rx_q);
  1302. tasklet_setup(&fmdev->rx_task, recv_tasklet);
  1303. fmdev->irq_info.stage = 0;
  1304. atomic_set(&fmdev->tx_cnt, 1);
  1305. fmdev->resp_comp = NULL;
  1306. timer_setup(&fmdev->irq_info.timer, int_timeout_handler, 0);
  1307. /*TODO: add FM_STIC_EVENT later */
  1308. fmdev->irq_info.mask = FM_MAL_EVENT;
  1309. /* Region info */
  1310. fmdev->rx.region = region_configs[default_radio_region];
  1311. fmdev->rx.mute_mode = FM_MUTE_OFF;
  1312. fmdev->rx.rf_depend_mute = FM_RX_RF_DEPENDENT_MUTE_OFF;
  1313. fmdev->rx.rds.flag = FM_RDS_DISABLE;
  1314. fmdev->rx.freq = FM_UNDEFINED_FREQ;
  1315. fmdev->rx.rds_mode = FM_RDS_SYSTEM_RDS;
  1316. fmdev->rx.af_mode = FM_RX_RDS_AF_SWITCH_MODE_OFF;
  1317. fmdev->irq_info.retry = 0;
  1318. fm_rx_reset_rds_cache(fmdev);
  1319. init_waitqueue_head(&fmdev->rx.rds.read_queue);
  1320. fm_rx_reset_station_info(fmdev);
  1321. set_bit(FM_CORE_READY, &fmdev->flag);
  1322. return ret;
  1323. }
  1324. /*
  1325. * This function will be called from FM V4L2 release function.
  1326. * Unregister from ST driver.
  1327. */
  1328. int fmc_release(struct fmdev *fmdev)
  1329. {
  1330. static struct st_proto_s fm_st_proto;
  1331. int ret;
  1332. if (!test_bit(FM_CORE_READY, &fmdev->flag)) {
  1333. fmdbg("FM Core is already down\n");
  1334. return 0;
  1335. }
  1336. /* Service pending read */
  1337. wake_up_interruptible(&fmdev->rx.rds.read_queue);
  1338. tasklet_kill(&fmdev->tx_task);
  1339. tasklet_kill(&fmdev->rx_task);
  1340. skb_queue_purge(&fmdev->tx_q);
  1341. skb_queue_purge(&fmdev->rx_q);
  1342. fmdev->resp_comp = NULL;
  1343. fmdev->rx.freq = 0;
  1344. memset(&fm_st_proto, 0, sizeof(fm_st_proto));
  1345. fm_st_proto.chnl_id = 0x08;
  1346. ret = st_unregister(&fm_st_proto);
  1347. if (ret < 0)
  1348. fmerr("Failed to de-register FM from ST %d\n", ret);
  1349. else
  1350. fmdbg("Successfully unregistered from ST\n");
  1351. clear_bit(FM_CORE_READY, &fmdev->flag);
  1352. return ret;
  1353. }
  1354. /*
  1355. * Module init function. Ask FM V4L module to register video device.
  1356. * Allocate memory for FM driver context and RX RDS buffer.
  1357. */
  1358. static int __init fm_drv_init(void)
  1359. {
  1360. struct fmdev *fmdev = NULL;
  1361. int ret = -ENOMEM;
  1362. fmdbg("FM driver version %s\n", FM_DRV_VERSION);
  1363. fmdev = kzalloc(sizeof(struct fmdev), GFP_KERNEL);
  1364. if (NULL == fmdev) {
  1365. fmerr("Can't allocate operation structure memory\n");
  1366. return ret;
  1367. }
  1368. fmdev->rx.rds.buf_size = default_rds_buf * FM_RDS_BLK_SIZE;
  1369. fmdev->rx.rds.buff = kzalloc(fmdev->rx.rds.buf_size, GFP_KERNEL);
  1370. if (NULL == fmdev->rx.rds.buff) {
  1371. fmerr("Can't allocate rds ring buffer\n");
  1372. goto rel_dev;
  1373. }
  1374. ret = fm_v4l2_init_video_device(fmdev, radio_nr);
  1375. if (ret < 0)
  1376. goto rel_rdsbuf;
  1377. fmdev->irq_info.handlers = int_handler_table;
  1378. fmdev->curr_fmmode = FM_MODE_OFF;
  1379. fmdev->tx_data.pwr_lvl = FM_PWR_LVL_DEF;
  1380. fmdev->tx_data.preemph = FM_TX_PREEMPH_50US;
  1381. return ret;
  1382. rel_rdsbuf:
  1383. kfree(fmdev->rx.rds.buff);
  1384. rel_dev:
  1385. kfree(fmdev);
  1386. return ret;
  1387. }
  1388. /* Module exit function. Ask FM V4L module to unregister video device */
  1389. static void __exit fm_drv_exit(void)
  1390. {
  1391. struct fmdev *fmdev = NULL;
  1392. fmdev = fm_v4l2_deinit_video_device();
  1393. if (fmdev != NULL) {
  1394. kfree(fmdev->rx.rds.buff);
  1395. kfree(fmdev);
  1396. }
  1397. }
  1398. module_init(fm_drv_init);
  1399. module_exit(fm_drv_exit);
  1400. /* ------------- Module Info ------------- */
  1401. MODULE_AUTHOR("Manjunatha Halli <[email protected]>");
  1402. MODULE_DESCRIPTION("FM Driver for TI's Connectivity chip. " FM_DRV_VERSION);
  1403. MODULE_VERSION(FM_DRV_VERSION);
  1404. MODULE_LICENSE("GPL");