wcd_cpe_services.c 65 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725
  1. /* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. */
  12. #include <linux/module.h>
  13. #include <linux/kernel.h>
  14. #include <linux/slab.h>
  15. #include <linux/sched.h>
  16. #include <linux/completion.h>
  17. #include <linux/kthread.h>
  18. #include <linux/delay.h>
  19. #include <linux/mfd/wcd9xxx/core.h>
  20. #include <sound/cpe_cmi.h>
  21. #include <sound/soc.h>
  22. #include <linux/mfd/wcd9335/registers.h>
  23. #include "wcd_cpe_services.h"
  24. #include "wcd_cmi_api.h"
  25. #define CPE_MSG_BUFFER_SIZE 132
  26. #define CPE_NO_SERVICE 0
  27. #define CMI_DRIVER_SUPPORTED_VERSION 0
  28. #define CMI_API_SUCCESS 0
  29. #define CMI_MSG_TRANSPORT (0x0002)
  30. #define CPE_SVC_INACTIVE_STATE_RETRIES_MAX 10
  31. #define TOMTOM_A_SVASS_SPE_DRAM_OFFSET 0x50000
  32. #define TOMTOM_A_SVASS_SPE_DRAM_SIZE 0x30000
  33. #define TOMTOM_A_SVASS_SPE_IRAM_OFFSET 0x80000
  34. #define TOMTOM_A_SVASS_SPE_IRAM_SIZE 0xC000
  35. #define TOMTOM_A_SVASS_SPE_INBOX_SIZE 12
  36. #define TOMTOM_A_SVASS_SPE_OUTBOX_SIZE 12
  37. #define MEM_ACCESS_NONE_VAL 0x0
  38. #define MEM_ACCESS_IRAM_VAL 0x1
  39. #define MEM_ACCESS_DRAM_VAL 0x2
  40. #define LISTEN_CTL_SPE_VAL 0x0
  41. #define LISTEN_CTL_MSM_VAL 0x1
  42. #define WCD9335_CPE_SS_SPE_DRAM_OFFSET 0x48000
  43. #define WCD9335_CPE_SS_SPE_DRAM_SIZE 0x34000
  44. #define WCD9335_CPE_SS_SPE_IRAM_OFFSET 0x80000
  45. #define WCD9335_CPE_SS_SPE_IRAM_SIZE 0x20000
  46. #define WCD9335_CPE_SS_SPE_INBOX_SIZE 16
  47. #define WCD9335_CPE_SS_SPE_OUTBOX_SIZE 16
  48. #define WCD9335_CPE_SS_SPE_MEM_BANK_SIZ 16
  49. #define WCD9335_CPE_SS_SPE_INBOX1(N) (WCD9335_CPE_SS_INBOX1_0 + (N))
  50. #define WCD9335_CPE_SS_SPE_OUTBOX1(N) (WCD9335_CPE_SS_OUTBOX1_0 + (N))
  51. #define WCD9335_CPE_SS_MEM_BANK(N) (WCD9335_CPE_SS_MEM_BANK_0 + (N))
  52. #define CHUNK_SIZE 16
  53. #define CPE_SVC_GRAB_LOCK(lock, name) \
  54. { \
  55. pr_debug("%s: %s lock acquire\n", \
  56. __func__, name); \
  57. mutex_lock(lock); \
  58. }
  59. #define CPE_SVC_REL_LOCK(lock, name) \
  60. { \
  61. pr_debug("%s: %s lock release\n", \
  62. __func__, name); \
  63. mutex_unlock(lock); \
  64. }
  65. static const struct cpe_svc_hw_cfg cpe_svc_tomtom_info = {
  66. TOMTOM_A_SVASS_SPE_DRAM_SIZE,
  67. TOMTOM_A_SVASS_SPE_DRAM_OFFSET,
  68. TOMTOM_A_SVASS_SPE_IRAM_SIZE,
  69. TOMTOM_A_SVASS_SPE_IRAM_OFFSET,
  70. TOMTOM_A_SVASS_SPE_INBOX_SIZE,
  71. TOMTOM_A_SVASS_SPE_OUTBOX_SIZE
  72. };
  73. static const struct cpe_svc_hw_cfg cpe_svc_wcd9335_info = {
  74. WCD9335_CPE_SS_SPE_DRAM_SIZE,
  75. WCD9335_CPE_SS_SPE_DRAM_OFFSET,
  76. WCD9335_CPE_SS_SPE_IRAM_SIZE,
  77. WCD9335_CPE_SS_SPE_IRAM_OFFSET,
  78. WCD9335_CPE_SS_SPE_INBOX_SIZE,
  79. WCD9335_CPE_SS_SPE_OUTBOX_SIZE
  80. };
  81. enum cpe_state {
  82. CPE_STATE_UNINITIALIZED = 0,
  83. CPE_STATE_INITIALIZED,
  84. CPE_STATE_IDLE,
  85. CPE_STATE_DOWNLOADING,
  86. CPE_STATE_BOOTING,
  87. CPE_STATE_SENDING_MSG,
  88. CPE_STATE_OFFLINE,
  89. CPE_STATE_BUFFERING,
  90. CPE_STATE_BUFFERING_CANCELLED
  91. };
  92. enum cpe_substate {
  93. CPE_SS_IDLE = 0,
  94. CPE_SS_MSG_REQUEST_ACCESS,
  95. CPE_SS_MSG_SEND_INBOX,
  96. CPE_SS_MSG_SENT,
  97. CPE_SS_DL_DOWNLOADING,
  98. CPE_SS_DL_COMPLETED,
  99. CPE_SS_BOOT,
  100. CPE_SS_BOOT_INIT,
  101. CPE_SS_ONLINE
  102. };
  103. enum cpe_command {
  104. CPE_CMD_KILL_THREAD = 0,
  105. CPE_CMD_BOOT,
  106. CPE_CMD_BOOT_INITIALIZE,
  107. CPE_CMD_BOOT_COMPLETE,
  108. CPE_CMD_SEND_MSG,
  109. CPE_CMD_SEND_TRANS_MSG,
  110. CPE_CMD_SEND_MSG_COMPLETE,
  111. CPE_CMD_PROCESS_IRQ,
  112. CPE_CMD_RAMDUMP,
  113. CPE_CMD_DL_SEGMENT,
  114. CPE_CMD_SHUTDOWN,
  115. CPE_CMD_RESET,
  116. CPE_CMD_DEINITIALIZE,
  117. CPE_CMD_READ,
  118. CPE_CMD_ENABLE_LAB,
  119. CPE_CMD_DISABLE_LAB,
  120. CPE_CMD_SWAP_BUFFER,
  121. CPE_LAB_CFG_SB,
  122. CPE_CMD_CANCEL_MEMACCESS,
  123. CPE_CMD_PROC_INCOMING_MSG,
  124. CPE_CMD_FTM_TEST,
  125. };
  126. enum cpe_process_result {
  127. CPE_PROC_SUCCESS = 0,
  128. CPE_PROC_FAILED,
  129. CPE_PROC_KILLED,
  130. CPE_PROC_QUEUED,
  131. };
  132. struct cpe_command_node {
  133. enum cpe_command command;
  134. enum cpe_svc_result result;
  135. void *data;
  136. struct list_head list;
  137. };
  138. struct cpe_info {
  139. struct list_head main_queue;
  140. struct completion cmd_complete;
  141. struct completion thread_comp;
  142. void *thread_handler;
  143. bool stop_thread;
  144. struct mutex msg_lock;
  145. enum cpe_state state;
  146. enum cpe_substate substate;
  147. struct list_head client_list;
  148. enum cpe_process_result (*cpe_process_command)
  149. (struct cpe_command_node *command_node);
  150. enum cpe_svc_result (*cpe_cmd_validate)
  151. (const struct cpe_info *i,
  152. enum cpe_command command);
  153. enum cpe_svc_result (*cpe_start_notification)
  154. (struct cpe_info *i);
  155. u32 initialized;
  156. struct cpe_svc_tgt_abstraction *tgt;
  157. void *pending;
  158. void *data;
  159. void *client_context;
  160. u32 codec_id;
  161. struct work_struct clk_plan_work;
  162. struct completion core_svc_cmd_compl;
  163. };
  164. struct cpe_tgt_waiti_info {
  165. u8 tgt_waiti_size;
  166. u8 *tgt_waiti_data;
  167. };
  168. struct cpe_svc_tgt_abstraction {
  169. enum cpe_svc_result (*tgt_boot)(int debug_mode);
  170. u32 (*tgt_cpar_init_done)(void);
  171. u32 (*tgt_is_active)(void);
  172. enum cpe_svc_result (*tgt_reset)(void);
  173. enum cpe_svc_result (*tgt_stop)(void);
  174. enum cpe_svc_result (*tgt_read_mailbox)
  175. (u8 *buffer, size_t size);
  176. enum cpe_svc_result (*tgt_write_mailbox)
  177. (u8 *buffer, size_t size);
  178. enum cpe_svc_result (*tgt_read_ram)
  179. (struct cpe_info *c,
  180. struct cpe_svc_mem_segment *data);
  181. enum cpe_svc_result (*tgt_write_ram)
  182. (struct cpe_info *c,
  183. const struct cpe_svc_mem_segment *data);
  184. enum cpe_svc_result (*tgt_route_notification)
  185. (enum cpe_svc_module module,
  186. enum cpe_svc_route_dest dest);
  187. enum cpe_svc_result (*tgt_set_debug_mode)(u32 enable);
  188. const struct cpe_svc_hw_cfg *(*tgt_get_cpe_info)(void);
  189. enum cpe_svc_result (*tgt_deinit)
  190. (struct cpe_svc_tgt_abstraction *param);
  191. enum cpe_svc_result (*tgt_voice_tx_lab)
  192. (bool);
  193. u8 *inbox;
  194. u8 *outbox;
  195. struct cpe_tgt_waiti_info *tgt_waiti_info;
  196. };
  197. static enum cpe_svc_result cpe_tgt_tomtom_init(
  198. struct cpe_svc_codec_info_v1 *codec_info,
  199. struct cpe_svc_tgt_abstraction *param);
  200. static enum cpe_svc_result cpe_tgt_wcd9335_init(
  201. struct cpe_svc_codec_info_v1 *codec_info,
  202. struct cpe_svc_tgt_abstraction *param);
  203. struct cpe_send_msg {
  204. u8 *payload;
  205. u32 isobm;
  206. u32 address;
  207. size_t size;
  208. };
  209. struct cpe_read_handle {
  210. void *registration;
  211. struct cpe_info t_info;
  212. struct list_head buffers;
  213. void *config;
  214. };
  215. struct generic_notification {
  216. void (*notification)
  217. (const struct cpe_svc_notification *parameter);
  218. void (*cmi_notification)
  219. (const struct cmi_api_notification *parameter);
  220. };
  221. struct cpe_notif_node {
  222. struct generic_notification notif;
  223. u32 mask;
  224. u32 service;
  225. const struct cpe_info *context;
  226. const char *name;
  227. u32 disabled;
  228. struct list_head list;
  229. };
  230. struct cpe_priv {
  231. struct cpe_info *cpe_default_handle;
  232. void (*cpe_irq_control_callback)(u32 enable);
  233. void (*cpe_query_freq_plans_cb)
  234. (void *cdc_priv,
  235. struct cpe_svc_cfg_clk_plan *clk_freq);
  236. void (*cpe_change_freq_plan_cb)(void *cdc_priv,
  237. u32 clk_freq);
  238. u32 cpe_msg_buffer;
  239. void *cpe_cmi_handle;
  240. struct mutex cpe_api_mutex;
  241. struct mutex cpe_svc_lock;
  242. struct cpe_svc_boot_event cpe_debug_vector;
  243. void *cdc_priv;
  244. };
  245. static struct cpe_priv cpe_d;
  246. static enum cpe_svc_result __cpe_svc_shutdown(void *cpe_handle);
  247. static enum cpe_svc_result cpe_is_command_valid(
  248. const struct cpe_info *t_info,
  249. enum cpe_command command);
  250. static int cpe_register_read(u32 reg, u8 *val)
  251. {
  252. *(val) = snd_soc_read(cpe_d.cdc_priv, reg);
  253. return 0;
  254. }
  255. static enum cpe_svc_result cpe_update_bits(u32 reg,
  256. u32 mask, u32 value)
  257. {
  258. int ret = 0;
  259. ret = snd_soc_update_bits(cpe_d.cdc_priv, reg,
  260. mask, value);
  261. if (ret < 0)
  262. return CPE_SVC_FAILED;
  263. return CPE_SVC_SUCCESS;
  264. }
  265. static int cpe_register_write(u32 reg, u32 val)
  266. {
  267. int ret = 0;
  268. if (reg != WCD9335_CPE_SS_MEM_BANK_0)
  269. pr_debug("%s: reg = 0x%x, value = 0x%x\n",
  270. __func__, reg, val);
  271. ret = snd_soc_write(cpe_d.cdc_priv, reg, val);
  272. if (ret < 0)
  273. return CPE_SVC_FAILED;
  274. return CPE_SVC_SUCCESS;
  275. }
  276. static int cpe_register_write_repeat(u32 reg, u8 *ptr, u32 to_write)
  277. {
  278. struct snd_soc_codec *codec = cpe_d.cdc_priv;
  279. struct wcd9xxx *wcd9xxx = dev_get_drvdata(codec->dev->parent);
  280. int ret = 0;
  281. ret = wcd9xxx_slim_write_repeat(wcd9xxx, reg, to_write, ptr);
  282. if (ret != 0)
  283. pr_err("%s: slim_write_repeat failed\n", __func__);
  284. if (ret < 0)
  285. return CPE_SVC_FAILED;
  286. return CPE_SVC_SUCCESS;
  287. }
  288. static bool cpe_register_read_autoinc_supported(void)
  289. {
  290. return true;
  291. }
  292. /* Called under msgq locked context */
  293. static void cpe_cmd_received(struct cpe_info *t_info)
  294. {
  295. struct cpe_command_node *node = NULL;
  296. enum cpe_process_result proc_rc = CPE_PROC_SUCCESS;
  297. if (!t_info) {
  298. pr_err("%s: Invalid thread info\n",
  299. __func__);
  300. return;
  301. }
  302. while (!list_empty(&t_info->main_queue)) {
  303. if (proc_rc != CPE_PROC_SUCCESS)
  304. break;
  305. node = list_first_entry(&t_info->main_queue,
  306. struct cpe_command_node, list);
  307. if (!node)
  308. break;
  309. list_del(&node->list);
  310. proc_rc = t_info->cpe_process_command(node);
  311. pr_debug("%s: process command return %d\n",
  312. __func__, proc_rc);
  313. switch (proc_rc) {
  314. case CPE_PROC_SUCCESS:
  315. kfree(node);
  316. break;
  317. case CPE_PROC_FAILED:
  318. kfree(node);
  319. pr_err("%s: cmd failed\n", __func__);
  320. break;
  321. case CPE_PROC_KILLED:
  322. break;
  323. default:
  324. list_add(&node->list, &(t_info->main_queue));
  325. }
  326. }
  327. }
  328. static int cpe_worker_thread(void *context)
  329. {
  330. struct cpe_info *t_info = (struct cpe_info *)context;
  331. /*
  332. * Thread will run until requested to stop explicitly
  333. * by setting the t_info->stop_thread flag
  334. */
  335. while (1) {
  336. /* Wait for command to be processed */
  337. wait_for_completion(&t_info->cmd_complete);
  338. CPE_SVC_GRAB_LOCK(&t_info->msg_lock, "msg_lock");
  339. cpe_cmd_received(t_info);
  340. reinit_completion(&t_info->cmd_complete);
  341. /* Check if thread needs to be stopped */
  342. if (t_info->stop_thread)
  343. goto unlock_and_exit;
  344. CPE_SVC_REL_LOCK(&t_info->msg_lock, "msg_lock");
  345. };
  346. unlock_and_exit:
  347. pr_debug("%s: thread stopped\n", __func__);
  348. CPE_SVC_REL_LOCK(&t_info->msg_lock, "msg_lock");
  349. complete_and_exit(&t_info->thread_comp, 0);
  350. }
  351. static void cpe_create_worker_thread(struct cpe_info *t_info)
  352. {
  353. INIT_LIST_HEAD(&t_info->main_queue);
  354. init_completion(&t_info->cmd_complete);
  355. init_completion(&t_info->thread_comp);
  356. t_info->stop_thread = false;
  357. t_info->thread_handler = kthread_run(cpe_worker_thread,
  358. (void *)t_info, "cpe-worker-thread");
  359. pr_debug("%s: Created new worker thread\n",
  360. __func__);
  361. }
  362. static void cpe_cleanup_worker_thread(struct cpe_info *t_info)
  363. {
  364. if (!t_info->thread_handler) {
  365. pr_err("%s: thread not created\n", __func__);
  366. return;
  367. }
  368. /*
  369. * Wake up the command handler in case
  370. * it is waiting for an command to be processed.
  371. */
  372. CPE_SVC_GRAB_LOCK(&t_info->msg_lock, "msg_lock");
  373. t_info->stop_thread = true;
  374. complete(&t_info->cmd_complete);
  375. CPE_SVC_REL_LOCK(&t_info->msg_lock, "msg_lock");
  376. /* Wait for the thread to exit */
  377. wait_for_completion(&t_info->thread_comp);
  378. t_info->thread_handler = NULL;
  379. pr_debug("%s: Thread cleaned up successfully\n",
  380. __func__);
  381. }
  382. static enum cpe_svc_result
  383. cpe_send_cmd_to_thread(struct cpe_info *t_info,
  384. enum cpe_command command, void *data,
  385. bool high_prio)
  386. {
  387. enum cpe_svc_result rc = CPE_SVC_SUCCESS;
  388. struct cpe_command_node *cmd = NULL;
  389. rc = cpe_is_command_valid(t_info, command);
  390. if (rc != CPE_SVC_SUCCESS) {
  391. pr_err("%s: Invalid command %d\n",
  392. __func__, command);
  393. return rc;
  394. }
  395. cmd = kzalloc(sizeof(struct cpe_command_node),
  396. GFP_ATOMIC);
  397. if (!cmd)
  398. return CPE_SVC_NO_MEMORY;
  399. cmd->command = command;
  400. cmd->data = data;
  401. CPE_SVC_GRAB_LOCK(&t_info->msg_lock, "msg_lock");
  402. if (high_prio)
  403. list_add(&(cmd->list),
  404. &(t_info->main_queue));
  405. else
  406. list_add_tail(&(cmd->list),
  407. &(t_info->main_queue));
  408. complete(&t_info->cmd_complete);
  409. CPE_SVC_REL_LOCK(&t_info->msg_lock, "msg_lock");
  410. return rc;
  411. }
  412. static enum cpe_svc_result cpe_change_state(
  413. struct cpe_info *t_info,
  414. enum cpe_state state, enum cpe_substate ss)
  415. {
  416. enum cpe_svc_result rc = CPE_SVC_SUCCESS;
  417. if (!t_info)
  418. t_info = cpe_d.cpe_default_handle;
  419. t_info->state = state;
  420. t_info->substate = ss;
  421. pr_debug("%s: current state: %d,%d, new_state: %d,%d\n",
  422. __func__, t_info->state, t_info->substate,
  423. state, ss);
  424. return rc;
  425. }
  426. static enum cpe_svc_result
  427. cpe_is_command_valid(const struct cpe_info *t_info,
  428. enum cpe_command command)
  429. {
  430. enum cpe_svc_result rc = CPE_SVC_INVALID_HANDLE;
  431. if (t_info && t_info->cpe_cmd_validate)
  432. rc = t_info->cpe_cmd_validate(t_info, command);
  433. else
  434. pr_err("%s: invalid handle or callback\n",
  435. __func__);
  436. return rc;
  437. }
  438. static void cpe_notify_client(struct cpe_notif_node *client,
  439. struct cpe_svc_notification *payload)
  440. {
  441. if (!client || !payload) {
  442. pr_err("%s: invalid client or payload\n",
  443. __func__);
  444. return;
  445. }
  446. if (!(client->mask & payload->event)) {
  447. pr_debug("%s: client mask 0x%x not registered for event 0x%x\n",
  448. __func__, client->mask, payload->event);
  449. return;
  450. }
  451. if (client->notif.notification && !client->disabled)
  452. client->notif.notification(payload);
  453. if ((client->mask & CPE_SVC_CMI_MSG) &&
  454. client->notif.cmi_notification)
  455. client->notif.cmi_notification(
  456. (const struct cmi_api_notification *)payload);
  457. }
  458. static void cpe_broadcast_notification(const struct cpe_info *t_info,
  459. struct cpe_svc_notification *payload)
  460. {
  461. struct cpe_notif_node *n = NULL;
  462. if (!t_info || !payload) {
  463. pr_err("%s: invalid handle\n", __func__);
  464. return;
  465. }
  466. pr_debug("%s: notify clients, event = %d\n",
  467. __func__, payload->event);
  468. payload->private_data = cpe_d.cdc_priv;
  469. CPE_SVC_GRAB_LOCK(&cpe_d.cpe_svc_lock, "cpe_svc");
  470. list_for_each_entry(n, &t_info->client_list, list) {
  471. if (!(n->mask & CPE_SVC_CMI_MSG))
  472. cpe_notify_client(n, payload);
  473. }
  474. CPE_SVC_REL_LOCK(&cpe_d.cpe_svc_lock, "cpe_svc");
  475. }
  476. static void *cpe_register_generic(struct cpe_info *t_info,
  477. void notification_callback(
  478. const struct cpe_svc_notification *parameter),
  479. void cmi_callback(
  480. const struct cmi_api_notification *parameter),
  481. u32 mask, u32 service, const char *name)
  482. {
  483. struct cpe_notif_node *n = NULL;
  484. n = kzalloc(sizeof(struct cpe_notif_node),
  485. GFP_KERNEL);
  486. if (!n)
  487. return NULL;
  488. n->mask = mask;
  489. n->service = service;
  490. n->notif.notification = notification_callback;
  491. n->notif.cmi_notification = cmi_callback;
  492. n->context = t_info;
  493. n->disabled = false;
  494. n->name = name;
  495. CPE_SVC_GRAB_LOCK(&cpe_d.cpe_svc_lock, "cpe_svc");
  496. /* Make sure CPE core service is first */
  497. if (service == CMI_CPE_CORE_SERVICE_ID)
  498. list_add(&n->list, &t_info->client_list);
  499. else
  500. list_add_tail(&n->list, &t_info->client_list);
  501. CPE_SVC_REL_LOCK(&cpe_d.cpe_svc_lock, "cpe_svc");
  502. return n;
  503. }
  504. static enum cpe_svc_result cpe_deregister_generic(struct cpe_info *t_info,
  505. void *reg_handle)
  506. {
  507. struct cpe_notif_node *n = (struct cpe_notif_node *)reg_handle;
  508. if (!t_info || !reg_handle) {
  509. pr_err("%s: invalid handle\n", __func__);
  510. return CPE_SVC_INVALID_HANDLE;
  511. }
  512. list_del(&(n->list));
  513. kfree(reg_handle);
  514. return CPE_SVC_SUCCESS;
  515. }
  516. static enum cpe_svc_result cpe_svc_tgt_init(struct cpe_svc_codec_info_v1 *i,
  517. struct cpe_svc_tgt_abstraction *abs)
  518. {
  519. if (!i || !abs) {
  520. pr_err("%s: Incorrect information provided\n",
  521. __func__);
  522. return CPE_SVC_FAILED;
  523. }
  524. switch (i->id) {
  525. case CPE_SVC_CODEC_TOMTOM:
  526. return cpe_tgt_tomtom_init(i, abs);
  527. case CPE_SVC_CODEC_WCD9335:
  528. return cpe_tgt_wcd9335_init(i, abs);
  529. default:
  530. pr_err("%s: Codec type %d not supported\n",
  531. __func__, i->id);
  532. return CPE_SVC_FAILED;
  533. }
  534. return CPE_SVC_SUCCESS;
  535. }
  536. static void cpe_notify_cmi_client(struct cpe_info *t_info, u8 *payload,
  537. enum cpe_svc_result result)
  538. {
  539. struct cpe_notif_node *n = NULL;
  540. struct cmi_api_notification notif;
  541. struct cmi_hdr *hdr;
  542. u8 service = 0;
  543. if (!t_info || !payload) {
  544. pr_err("%s: invalid payload/handle\n",
  545. __func__);
  546. return;
  547. }
  548. hdr = CMI_GET_HEADER(payload);
  549. service = CMI_HDR_GET_SERVICE(hdr);
  550. notif.event = CPE_SVC_CMI_MSG;
  551. notif.result = result;
  552. notif.message = payload;
  553. CPE_SVC_GRAB_LOCK(&cpe_d.cpe_svc_lock, "cpe_svc");
  554. list_for_each_entry(n, &t_info->client_list, list) {
  555. if ((n->mask & CPE_SVC_CMI_MSG) &&
  556. n->service == service &&
  557. n->notif.cmi_notification) {
  558. n->notif.cmi_notification(&notif);
  559. break;
  560. }
  561. }
  562. CPE_SVC_REL_LOCK(&cpe_d.cpe_svc_lock, "cpe_svc");
  563. }
  564. static void cpe_toggle_irq_notification(struct cpe_info *t_info, u32 value)
  565. {
  566. if (cpe_d.cpe_irq_control_callback)
  567. cpe_d.cpe_irq_control_callback(value);
  568. }
  569. static void cpe_command_cleanup(struct cpe_command_node *command_node)
  570. {
  571. switch (command_node->command) {
  572. case CPE_CMD_SEND_MSG:
  573. case CPE_CMD_SEND_TRANS_MSG:
  574. case CPE_CMD_SEND_MSG_COMPLETE:
  575. case CPE_CMD_SHUTDOWN:
  576. case CPE_CMD_READ:
  577. kfree(command_node->data);
  578. command_node->data = NULL;
  579. break;
  580. default:
  581. pr_err("%s: unhandled command\n",
  582. __func__);
  583. break;
  584. }
  585. }
  586. static enum cpe_svc_result cpe_send_msg_to_inbox(
  587. struct cpe_info *t_info, u32 opcode,
  588. struct cpe_send_msg *msg)
  589. {
  590. size_t bytes = 0;
  591. size_t inbox_size =
  592. t_info->tgt->tgt_get_cpe_info()->inbox_size;
  593. struct cmi_hdr *hdr;
  594. enum cpe_svc_result rc = CPE_SVC_SUCCESS;
  595. memset(t_info->tgt->inbox, 0, inbox_size);
  596. hdr = CMI_GET_HEADER(t_info->tgt->inbox);
  597. CMI_HDR_SET_SESSION(hdr, 1);
  598. CMI_HDR_SET_SERVICE(hdr, CMI_CPE_CORE_SERVICE_ID);
  599. CMI_HDR_SET_VERSION(hdr, CMI_DRIVER_SUPPORTED_VERSION);
  600. CMI_HDR_SET_OBM(hdr, CMI_OBM_FLAG_IN_BAND);
  601. switch (opcode) {
  602. case CPE_CORE_SVC_CMD_SHARED_MEM_ALLOC: {
  603. struct cmi_core_svc_cmd_shared_mem_alloc *m;
  604. CMI_HDR_SET_OPCODE(hdr,
  605. CPE_CORE_SVC_CMD_SHARED_MEM_ALLOC);
  606. CMI_HDR_SET_PAYLOAD_SIZE(hdr,
  607. sizeof(struct cmi_core_svc_cmd_shared_mem_alloc));
  608. m = (struct cmi_core_svc_cmd_shared_mem_alloc *)
  609. CMI_GET_PAYLOAD(t_info->tgt->inbox);
  610. m->size = CPE_MSG_BUFFER_SIZE;
  611. pr_debug("send shared mem alloc msg to cpe inbox\n");
  612. }
  613. break;
  614. case CPE_CORE_SVC_CMD_DRAM_ACCESS_REQ:
  615. CMI_HDR_SET_OPCODE(hdr,
  616. CPE_CORE_SVC_CMD_DRAM_ACCESS_REQ);
  617. CMI_HDR_SET_PAYLOAD_SIZE(hdr, 0);
  618. pr_debug("%s: Creating DRAM acces request msg\n",
  619. __func__);
  620. break;
  621. case CPE_CMI_BASIC_RSP_OPCODE: {
  622. struct cmi_basic_rsp_result *rsp;
  623. CMI_HDR_SET_OPCODE(hdr,
  624. CPE_CMI_BASIC_RSP_OPCODE);
  625. CMI_HDR_SET_PAYLOAD_SIZE(hdr,
  626. sizeof(struct cmi_basic_rsp_result));
  627. rsp = (struct cmi_basic_rsp_result *)
  628. CMI_GET_PAYLOAD(t_info->tgt->inbox);
  629. rsp->status = 0;
  630. pr_debug("%s: send basic response\n", __func__);
  631. }
  632. break;
  633. default:
  634. if (msg->address != 0) {
  635. struct cmi_msg_transport *m = NULL;
  636. struct cpe_svc_mem_segment mem_seg;
  637. mem_seg.type = CPE_SVC_DATA_MEM;
  638. if (msg->isobm) {
  639. struct cmi_obm *obm = (struct cmi_obm *)
  640. CMI_GET_PAYLOAD(msg->payload);
  641. mem_seg.cpe_addr = obm->mem_handle;
  642. mem_seg.data = (u8 *)obm->data_ptr.kvaddr;
  643. mem_seg.size = obm->size;
  644. t_info->tgt->tgt_write_ram(t_info, &mem_seg);
  645. }
  646. mem_seg.cpe_addr = msg->address;
  647. mem_seg.data = msg->payload;
  648. mem_seg.size = msg->size;
  649. t_info->tgt->tgt_write_ram(t_info, &mem_seg);
  650. hdr = CMI_GET_HEADER(t_info->tgt->inbox);
  651. CMI_HDR_SET_OPCODE(hdr, CMI_MSG_TRANSPORT);
  652. m = (struct cmi_msg_transport *)
  653. CMI_GET_PAYLOAD(t_info->tgt->inbox);
  654. m->addr = msg->address;
  655. m->size = msg->size;
  656. CMI_HDR_SET_PAYLOAD_SIZE(hdr,
  657. sizeof(struct cmi_msg_transport));
  658. } else {
  659. memcpy(t_info->tgt->inbox, msg->payload,
  660. msg->size);
  661. }
  662. break;
  663. }
  664. pr_debug("%s: sending message to cpe inbox\n",
  665. __func__);
  666. bytes = sizeof(struct cmi_hdr);
  667. hdr = CMI_GET_HEADER(t_info->tgt->inbox);
  668. bytes += CMI_HDR_GET_PAYLOAD_SIZE(hdr);
  669. rc = t_info->tgt->tgt_write_mailbox(t_info->tgt->inbox, bytes);
  670. return rc;
  671. }
  672. static bool cpe_is_cmd_clk_req(void *cmd)
  673. {
  674. struct cmi_hdr *hdr;
  675. hdr = CMI_GET_HEADER(cmd);
  676. if ((CMI_HDR_GET_SERVICE(hdr) ==
  677. CMI_CPE_CORE_SERVICE_ID)) {
  678. if (CMI_GET_OPCODE(cmd) ==
  679. CPE_CORE_SVC_CMD_CLK_FREQ_REQUEST)
  680. return true;
  681. }
  682. return false;
  683. }
  684. static enum cpe_svc_result cpe_process_clk_change_req(
  685. struct cpe_info *t_info)
  686. {
  687. struct cmi_core_svc_cmd_clk_freq_request *req;
  688. req = (struct cmi_core_svc_cmd_clk_freq_request *)
  689. CMI_GET_PAYLOAD(t_info->tgt->outbox);
  690. if (!cpe_d.cpe_change_freq_plan_cb) {
  691. pr_err("%s: No support for clk freq change\n",
  692. __func__);
  693. return CPE_SVC_FAILED;
  694. }
  695. cpe_d.cpe_change_freq_plan_cb(cpe_d.cdc_priv,
  696. req->clk_freq);
  697. /*send a basic response*/
  698. cpe_send_msg_to_inbox(t_info,
  699. CPE_CMI_BASIC_RSP_OPCODE, NULL);
  700. return CPE_SVC_SUCCESS;
  701. }
  702. static void cpe_process_irq_int(u32 irq,
  703. struct cpe_info *t_info)
  704. {
  705. struct cpe_command_node temp_node;
  706. struct cpe_send_msg *m;
  707. u8 size = 0;
  708. bool err_irq = false;
  709. struct cmi_hdr *hdr;
  710. pr_debug("%s: irq = %u\n", __func__, irq);
  711. if (!t_info) {
  712. pr_err("%s: Invalid handle\n",
  713. __func__);
  714. return;
  715. }
  716. CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
  717. switch (irq) {
  718. case CPE_IRQ_OUTBOX_IRQ:
  719. size = t_info->tgt->tgt_get_cpe_info()->outbox_size;
  720. t_info->tgt->tgt_read_mailbox(t_info->tgt->outbox, size);
  721. break;
  722. case CPE_IRQ_MEM_ACCESS_ERROR:
  723. err_irq = true;
  724. cpe_change_state(t_info, CPE_STATE_OFFLINE, CPE_SS_IDLE);
  725. break;
  726. case CPE_IRQ_WDOG_BITE:
  727. case CPE_IRQ_RCO_WDOG_INT:
  728. err_irq = true;
  729. __cpe_svc_shutdown(t_info);
  730. break;
  731. case CPE_IRQ_FLL_LOCK_LOST:
  732. default:
  733. err_irq = true;
  734. break;
  735. }
  736. if (err_irq) {
  737. pr_err("%s: CPE error IRQ %u occurred\n",
  738. __func__, irq);
  739. CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
  740. return;
  741. }
  742. switch (t_info->state) {
  743. case CPE_STATE_BOOTING:
  744. switch (t_info->substate) {
  745. case CPE_SS_BOOT:
  746. temp_node.command = CPE_CMD_BOOT_INITIALIZE;
  747. temp_node.result = CPE_SVC_SUCCESS;
  748. t_info->substate = CPE_SS_BOOT_INIT;
  749. t_info->cpe_process_command(&temp_node);
  750. break;
  751. case CPE_SS_BOOT_INIT:
  752. temp_node.command = CPE_CMD_BOOT_COMPLETE;
  753. temp_node.result = CPE_SVC_SUCCESS;
  754. t_info->substate = CPE_SS_ONLINE;
  755. t_info->cpe_process_command(&temp_node);
  756. break;
  757. default:
  758. pr_debug("%s: unhandled substate %d for state %d\n",
  759. __func__, t_info->state, t_info->substate);
  760. break;
  761. }
  762. break;
  763. case CPE_STATE_SENDING_MSG:
  764. hdr = CMI_GET_HEADER(t_info->tgt->outbox);
  765. if (CMI_GET_OPCODE(t_info->tgt->outbox) ==
  766. CPE_LSM_SESSION_EVENT_DETECTION_STATUS_V2) {
  767. pr_debug("%s: session_id: %u, state: %d,%d, event received\n",
  768. __func__, CMI_HDR_GET_SESSION_ID(hdr),
  769. t_info->state, t_info->substate);
  770. temp_node.command = CPE_CMD_PROC_INCOMING_MSG;
  771. temp_node.data = NULL;
  772. t_info->cpe_process_command(&temp_node);
  773. break;
  774. }
  775. m = (struct cpe_send_msg *)t_info->pending;
  776. switch (t_info->substate) {
  777. case CPE_SS_MSG_REQUEST_ACCESS:
  778. cpe_send_cmd_to_thread(t_info,
  779. CPE_CMD_SEND_TRANS_MSG, m, true);
  780. break;
  781. case CPE_SS_MSG_SEND_INBOX:
  782. if (cpe_is_cmd_clk_req(t_info->tgt->outbox))
  783. cpe_process_clk_change_req(t_info);
  784. else
  785. cpe_send_cmd_to_thread(t_info,
  786. CPE_CMD_SEND_MSG_COMPLETE, m, true);
  787. break;
  788. default:
  789. pr_debug("%s: unhandled substate %d for state %d\n",
  790. __func__, t_info->state, t_info->substate);
  791. break;
  792. }
  793. break;
  794. case CPE_STATE_IDLE:
  795. pr_debug("%s: Message received, notifying client\n",
  796. __func__);
  797. temp_node.command = CPE_CMD_PROC_INCOMING_MSG;
  798. temp_node.data = NULL;
  799. t_info->cpe_process_command(&temp_node);
  800. break;
  801. default:
  802. pr_debug("%s: unhandled state %d\n",
  803. __func__, t_info->state);
  804. break;
  805. }
  806. CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
  807. }
  808. static void broacast_boot_failed(void)
  809. {
  810. struct cpe_info *t_info = cpe_d.cpe_default_handle;
  811. struct cpe_svc_notification payload;
  812. payload.event = CPE_SVC_BOOT_FAILED;
  813. payload.result = CPE_SVC_FAILED;
  814. payload.payload = NULL;
  815. if (t_info)
  816. payload.private_data =
  817. t_info->client_context;
  818. cpe_broadcast_notification(t_info, &payload);
  819. }
  820. static enum cpe_svc_result broadcast_boot_event(
  821. struct cpe_info *t_info)
  822. {
  823. struct cpe_svc_notification payload;
  824. payload.event = CPE_SVC_ONLINE;
  825. payload.result = CPE_SVC_SUCCESS;
  826. payload.payload = NULL;
  827. if (t_info)
  828. payload.private_data =
  829. t_info->client_context;
  830. cpe_broadcast_notification(t_info, &payload);
  831. return CPE_SVC_SUCCESS;
  832. }
  833. static enum cpe_process_result cpe_boot_initialize(struct cpe_info *t_info,
  834. enum cpe_svc_result *cpe_rc)
  835. {
  836. enum cpe_process_result rc = CPE_SVC_FAILED;
  837. struct cpe_svc_notification payload;
  838. struct cmi_core_svc_event_system_boot *p = NULL;
  839. if (CMI_GET_OPCODE(t_info->tgt->outbox) !=
  840. CPE_CORE_SVC_EVENT_SYSTEM_BOOT) {
  841. broacast_boot_failed();
  842. return rc;
  843. }
  844. p = (struct cmi_core_svc_event_system_boot *)
  845. CMI_GET_PAYLOAD(t_info->tgt->outbox);
  846. if (p->status != CPE_BOOT_SUCCESS) {
  847. pr_err("%s: cpe boot failed, status = %d\n",
  848. __func__, p->status);
  849. broacast_boot_failed();
  850. return rc;
  851. }
  852. /* boot was successful */
  853. if (p->version ==
  854. CPE_CORE_VERSION_SYSTEM_BOOT_EVENT) {
  855. cpe_d.cpe_debug_vector.debug_address =
  856. p->sfr_buff_address;
  857. cpe_d.cpe_debug_vector.debug_buffer_size =
  858. p->sfr_buff_size;
  859. cpe_d.cpe_debug_vector.status = p->status;
  860. payload.event = CPE_SVC_BOOT;
  861. payload.result = CPE_SVC_SUCCESS;
  862. payload.payload = (void *)&cpe_d.cpe_debug_vector;
  863. payload.private_data = t_info->client_context;
  864. cpe_broadcast_notification(t_info, &payload);
  865. }
  866. cpe_change_state(t_info, CPE_STATE_BOOTING,
  867. CPE_SS_BOOT_INIT);
  868. (*cpe_rc) = cpe_send_msg_to_inbox(t_info,
  869. CPE_CORE_SVC_CMD_SHARED_MEM_ALLOC, NULL);
  870. rc = CPE_PROC_SUCCESS;
  871. return rc;
  872. }
  873. static void cpe_svc_core_cmi_handler(
  874. const struct cmi_api_notification *parameter)
  875. {
  876. struct cmi_hdr *hdr;
  877. if (!parameter)
  878. return;
  879. pr_debug("%s: event = %d\n",
  880. __func__, parameter->event);
  881. if (parameter->event != CMI_API_MSG)
  882. return;
  883. hdr = (struct cmi_hdr *) parameter->message;
  884. if (hdr->opcode == CPE_CMI_BASIC_RSP_OPCODE) {
  885. struct cmi_basic_rsp_result *result;
  886. result = (struct cmi_basic_rsp_result *)
  887. ((u8 *)parameter->message) + (sizeof(*hdr));
  888. if (result->status)
  889. pr_err("%s: error response, error code = %u\n",
  890. __func__, result->status);
  891. complete(&cpe_d.cpe_default_handle->core_svc_cmd_compl);
  892. }
  893. }
  894. static void cpe_clk_plan_work(struct work_struct *work)
  895. {
  896. struct cpe_info *t_info = NULL;
  897. size_t size = 0;
  898. struct cpe_svc_cfg_clk_plan plan;
  899. u8 *cmi_msg;
  900. struct cmi_hdr *hdr;
  901. int rc;
  902. t_info = container_of(work, struct cpe_info, clk_plan_work);
  903. if (!t_info) {
  904. pr_err("%s: Invalid handle for cpe_info\n",
  905. __func__);
  906. return;
  907. }
  908. /* Register the core service */
  909. cpe_d.cpe_cmi_handle = cmi_register(
  910. cpe_svc_core_cmi_handler,
  911. CMI_CPE_CORE_SERVICE_ID);
  912. /* send the clk plan command */
  913. if (!cpe_d.cpe_query_freq_plans_cb) {
  914. pr_err("%s: No support for querying clk plans\n",
  915. __func__);
  916. return;
  917. }
  918. cpe_d.cpe_query_freq_plans_cb(cpe_d.cdc_priv, &plan);
  919. size = sizeof(plan.current_clk_feq) +
  920. sizeof(plan.num_clk_freqs);
  921. size += plan.num_clk_freqs *
  922. sizeof(plan.clk_freqs[0]);
  923. cmi_msg = kzalloc(size + sizeof(struct cmi_hdr),
  924. GFP_KERNEL);
  925. if (!cmi_msg)
  926. return;
  927. hdr = (struct cmi_hdr *) cmi_msg;
  928. CMI_HDR_SET_OPCODE(hdr,
  929. CPE_CORE_SVC_CMD_CFG_CLK_PLAN);
  930. CMI_HDR_SET_SERVICE(hdr, CMI_CPE_CORE_SERVICE_ID);
  931. CMI_HDR_SET_SESSION(hdr, 1);
  932. CMI_HDR_SET_VERSION(hdr, CMI_DRIVER_SUPPORTED_VERSION);
  933. CMI_HDR_SET_PAYLOAD_SIZE(hdr, size);
  934. memcpy(CMI_GET_PAYLOAD(cmi_msg), &plan,
  935. size);
  936. cmi_send_msg(cmi_msg);
  937. /* Wait for clk plan command to complete */
  938. rc = wait_for_completion_timeout(&t_info->core_svc_cmd_compl,
  939. (10 * HZ));
  940. if (!rc) {
  941. pr_err("%s: clk plan cmd timed out\n",
  942. __func__);
  943. goto cmd_fail;
  944. }
  945. /* clk plan cmd is successful, send start notification */
  946. if (t_info->cpe_start_notification)
  947. t_info->cpe_start_notification(t_info);
  948. else
  949. pr_err("%s: no start notification\n",
  950. __func__);
  951. cmd_fail:
  952. kfree(cmi_msg);
  953. cmi_deregister(cpe_d.cpe_cmi_handle);
  954. }
  955. static enum cpe_process_result cpe_boot_complete(
  956. struct cpe_info *t_info)
  957. {
  958. struct cmi_core_svc_cmdrsp_shared_mem_alloc *p = NULL;
  959. if (CMI_GET_OPCODE(t_info->tgt->outbox) !=
  960. CPE_CORE_SVC_CMDRSP_SHARED_MEM_ALLOC) {
  961. broacast_boot_failed();
  962. return CPE_PROC_FAILED;
  963. }
  964. p = (struct cmi_core_svc_cmdrsp_shared_mem_alloc *)
  965. CMI_GET_PAYLOAD(t_info->tgt->outbox);
  966. cpe_d.cpe_msg_buffer = p->addr;
  967. if (cpe_d.cpe_msg_buffer == 0) {
  968. pr_err("%s: Invalid cpe buffer for message\n",
  969. __func__);
  970. broacast_boot_failed();
  971. return CPE_PROC_FAILED;
  972. }
  973. cpe_change_state(t_info, CPE_STATE_IDLE, CPE_SS_IDLE);
  974. cpe_create_worker_thread(t_info);
  975. if (t_info->codec_id != CPE_SVC_CODEC_TOMTOM) {
  976. schedule_work(&t_info->clk_plan_work);
  977. } else {
  978. if (t_info->cpe_start_notification)
  979. t_info->cpe_start_notification(t_info);
  980. else
  981. pr_err("%s: no start notification\n",
  982. __func__);
  983. }
  984. pr_debug("%s: boot complete\n", __func__);
  985. return CPE_SVC_SUCCESS;
  986. }
  987. static enum cpe_process_result cpe_process_send_msg(
  988. struct cpe_info *t_info,
  989. enum cpe_svc_result *cpe_rc,
  990. struct cpe_command_node *command_node)
  991. {
  992. enum cpe_process_result rc = CPE_PROC_SUCCESS;
  993. struct cpe_send_msg *m =
  994. (struct cpe_send_msg *)command_node->data;
  995. u32 size = m->size;
  996. if (t_info->pending) {
  997. pr_debug("%s: message queued\n", __func__);
  998. *cpe_rc = CPE_SVC_SUCCESS;
  999. return CPE_PROC_QUEUED;
  1000. }
  1001. pr_debug("%s: Send CMI message, size = %u\n",
  1002. __func__, size);
  1003. if (size <= t_info->tgt->tgt_get_cpe_info()->inbox_size) {
  1004. pr_debug("%s: Msg fits mailbox, size %u\n",
  1005. __func__, size);
  1006. cpe_change_state(t_info, CPE_STATE_SENDING_MSG,
  1007. CPE_SS_MSG_SEND_INBOX);
  1008. t_info->pending = m;
  1009. *cpe_rc = cpe_send_msg_to_inbox(t_info, 0, m);
  1010. } else if (size < CPE_MSG_BUFFER_SIZE) {
  1011. m->address = cpe_d.cpe_msg_buffer;
  1012. pr_debug("%s: Message req CMI mem access\n",
  1013. __func__);
  1014. t_info->pending = m;
  1015. cpe_change_state(t_info, CPE_STATE_SENDING_MSG,
  1016. CPE_SS_MSG_REQUEST_ACCESS);
  1017. *cpe_rc = cpe_send_msg_to_inbox(t_info,
  1018. CPE_CORE_SVC_CMD_DRAM_ACCESS_REQ, m);
  1019. } else {
  1020. pr_debug("%s: Invalid msg size %u\n",
  1021. __func__, size);
  1022. cpe_command_cleanup(command_node);
  1023. rc = CPE_PROC_FAILED;
  1024. cpe_change_state(t_info, CPE_STATE_IDLE,
  1025. CPE_SS_IDLE);
  1026. }
  1027. return rc;
  1028. }
  1029. static enum cpe_process_result cpe_process_incoming(
  1030. struct cpe_info *t_info)
  1031. {
  1032. enum cpe_process_result rc = CPE_PROC_FAILED;
  1033. struct cmi_hdr *hdr;
  1034. hdr = CMI_GET_HEADER(t_info->tgt->outbox);
  1035. if (CMI_HDR_GET_SERVICE(hdr) ==
  1036. CMI_CPE_CORE_SERVICE_ID) {
  1037. pr_debug("%s: core service message received\n",
  1038. __func__);
  1039. switch (CMI_GET_OPCODE(t_info->tgt->outbox)) {
  1040. case CPE_CORE_SVC_CMD_CLK_FREQ_REQUEST:
  1041. cpe_process_clk_change_req(t_info);
  1042. rc = CPE_PROC_SUCCESS;
  1043. break;
  1044. case CMI_MSG_TRANSPORT:
  1045. pr_debug("%s: transport msg received\n",
  1046. __func__);
  1047. rc = CPE_PROC_SUCCESS;
  1048. break;
  1049. case CPE_CMI_BASIC_RSP_OPCODE:
  1050. pr_debug("%s: received basic rsp\n",
  1051. __func__);
  1052. rc = CPE_PROC_SUCCESS;
  1053. break;
  1054. default:
  1055. pr_debug("%s: unknown message received\n",
  1056. __func__);
  1057. break;
  1058. }
  1059. } else {
  1060. /* if service id if for a CMI client, notify client */
  1061. pr_debug("%s: Message received, notifying client\n",
  1062. __func__);
  1063. cpe_notify_cmi_client(t_info,
  1064. t_info->tgt->outbox, CPE_SVC_SUCCESS);
  1065. rc = CPE_PROC_SUCCESS;
  1066. }
  1067. return rc;
  1068. }
  1069. static enum cpe_process_result cpe_process_kill_thread(
  1070. struct cpe_info *t_info,
  1071. struct cpe_command_node *command_node)
  1072. {
  1073. struct cpe_svc_notification payload;
  1074. cpe_d.cpe_msg_buffer = 0;
  1075. payload.result = CPE_SVC_SHUTTING_DOWN;
  1076. payload.event = CPE_SVC_OFFLINE;
  1077. payload.payload = NULL;
  1078. payload.private_data = t_info->client_context;
  1079. /*
  1080. * Make state as offline before broadcasting
  1081. * the message to clients.
  1082. */
  1083. cpe_change_state(t_info, CPE_STATE_OFFLINE,
  1084. CPE_SS_IDLE);
  1085. cpe_broadcast_notification(t_info, &payload);
  1086. return CPE_PROC_KILLED;
  1087. }
  1088. static enum cpe_process_result cpe_mt_process_cmd(
  1089. struct cpe_command_node *command_node)
  1090. {
  1091. struct cpe_info *t_info = cpe_d.cpe_default_handle;
  1092. enum cpe_svc_result cpe_rc = CPE_SVC_SUCCESS;
  1093. enum cpe_process_result rc = CPE_PROC_SUCCESS;
  1094. struct cpe_send_msg *m;
  1095. struct cmi_hdr *hdr;
  1096. u8 service = 0;
  1097. u8 retries = 0;
  1098. if (!t_info || !command_node) {
  1099. pr_err("%s: Invalid handle/command node\n",
  1100. __func__);
  1101. return CPE_PROC_FAILED;
  1102. }
  1103. pr_debug("%s: cmd = %u\n", __func__, command_node->command);
  1104. cpe_rc = cpe_is_command_valid(t_info, command_node->command);
  1105. if (cpe_rc != CPE_SVC_SUCCESS) {
  1106. pr_err("%s: Invalid command %d, err = %d\n",
  1107. __func__, command_node->command, cpe_rc);
  1108. return CPE_PROC_FAILED;
  1109. }
  1110. switch (command_node->command) {
  1111. case CPE_CMD_BOOT_INITIALIZE:
  1112. rc = cpe_boot_initialize(t_info, &cpe_rc);
  1113. break;
  1114. case CPE_CMD_BOOT_COMPLETE:
  1115. rc = cpe_boot_complete(t_info);
  1116. break;
  1117. case CPE_CMD_SEND_MSG:
  1118. rc = cpe_process_send_msg(t_info, &cpe_rc,
  1119. command_node);
  1120. break;
  1121. case CPE_CMD_SEND_TRANS_MSG:
  1122. m = (struct cpe_send_msg *)command_node->data;
  1123. while (retries < CPE_SVC_INACTIVE_STATE_RETRIES_MAX) {
  1124. if (t_info->tgt->tgt_is_active()) {
  1125. ++retries;
  1126. /* Wait for CPE to be inactive */
  1127. usleep_range(5000, 5100);
  1128. } else {
  1129. break;
  1130. }
  1131. }
  1132. pr_debug("%s: cpe inactive after %d attempts\n",
  1133. __func__, retries);
  1134. cpe_change_state(t_info, CPE_STATE_SENDING_MSG,
  1135. CPE_SS_MSG_SEND_INBOX);
  1136. rc = cpe_send_msg_to_inbox(t_info, 0, m);
  1137. break;
  1138. case CPE_CMD_SEND_MSG_COMPLETE:
  1139. hdr = CMI_GET_HEADER(t_info->tgt->outbox);
  1140. service = CMI_HDR_GET_SERVICE(hdr);
  1141. pr_debug("%s: msg send success, notifying clients\n",
  1142. __func__);
  1143. cpe_command_cleanup(command_node);
  1144. t_info->pending = NULL;
  1145. cpe_change_state(t_info,
  1146. CPE_STATE_IDLE, CPE_SS_IDLE);
  1147. cpe_notify_cmi_client(t_info,
  1148. t_info->tgt->outbox, CPE_SVC_SUCCESS);
  1149. break;
  1150. case CPE_CMD_PROC_INCOMING_MSG:
  1151. rc = cpe_process_incoming(t_info);
  1152. break;
  1153. case CPE_CMD_KILL_THREAD:
  1154. rc = cpe_process_kill_thread(t_info, command_node);
  1155. break;
  1156. default:
  1157. pr_err("%s: unhandled cpe cmd = %d\n",
  1158. __func__, command_node->command);
  1159. break;
  1160. }
  1161. if (cpe_rc != CPE_SVC_SUCCESS) {
  1162. pr_err("%s: failed to execute command\n", __func__);
  1163. if (t_info->pending) {
  1164. m = (struct cpe_send_msg *)t_info->pending;
  1165. cpe_notify_cmi_client(t_info, m->payload,
  1166. CPE_SVC_FAILED);
  1167. t_info->pending = NULL;
  1168. }
  1169. cpe_command_cleanup(command_node);
  1170. rc = CPE_PROC_FAILED;
  1171. cpe_change_state(t_info, CPE_STATE_IDLE,
  1172. CPE_SS_IDLE);
  1173. }
  1174. return rc;
  1175. }
  1176. static enum cpe_svc_result cpe_mt_validate_cmd(
  1177. const struct cpe_info *t_info,
  1178. enum cpe_command command)
  1179. {
  1180. enum cpe_svc_result rc = CPE_SVC_SUCCESS;
  1181. if ((t_info == NULL) || t_info->initialized == false) {
  1182. pr_err("%s: cpe service is not ready\n",
  1183. __func__);
  1184. return CPE_SVC_NOT_READY;
  1185. }
  1186. switch (t_info->state) {
  1187. case CPE_STATE_UNINITIALIZED:
  1188. case CPE_STATE_INITIALIZED:
  1189. switch (command) {
  1190. case CPE_CMD_RESET:
  1191. case CPE_CMD_DL_SEGMENT:
  1192. case CPE_CMD_RAMDUMP:
  1193. case CPE_CMD_PROCESS_IRQ:
  1194. case CPE_CMD_KILL_THREAD:
  1195. case CPE_CMD_DEINITIALIZE:
  1196. case CPE_CMD_FTM_TEST:
  1197. rc = CPE_SVC_SUCCESS;
  1198. break;
  1199. default:
  1200. rc = CPE_SVC_NOT_READY;
  1201. break;
  1202. }
  1203. break;
  1204. case CPE_STATE_DOWNLOADING:
  1205. switch (command) {
  1206. case CPE_CMD_RESET:
  1207. case CPE_CMD_DL_SEGMENT:
  1208. case CPE_CMD_BOOT:
  1209. case CPE_CMD_FTM_TEST:
  1210. rc = CPE_SVC_SUCCESS;
  1211. break;
  1212. default:
  1213. rc = CPE_SVC_NOT_READY;
  1214. break;
  1215. }
  1216. break;
  1217. case CPE_STATE_BOOTING:
  1218. switch (command) {
  1219. case CPE_CMD_PROCESS_IRQ:
  1220. case CPE_CMD_BOOT_INITIALIZE:
  1221. case CPE_CMD_BOOT_COMPLETE:
  1222. case CPE_CMD_SHUTDOWN:
  1223. rc = CPE_SVC_SUCCESS;
  1224. break;
  1225. case CPE_CMD_FTM_TEST:
  1226. rc = CPE_SVC_BUSY;
  1227. break;
  1228. default:
  1229. rc = CPE_SVC_NOT_READY;
  1230. break;
  1231. }
  1232. break;
  1233. case CPE_STATE_IDLE:
  1234. switch (command) {
  1235. case CPE_CMD_SEND_MSG:
  1236. case CPE_CMD_SEND_TRANS_MSG:
  1237. case CPE_CMD_SEND_MSG_COMPLETE:
  1238. case CPE_CMD_PROCESS_IRQ:
  1239. case CPE_CMD_RESET:
  1240. case CPE_CMD_SHUTDOWN:
  1241. case CPE_CMD_KILL_THREAD:
  1242. case CPE_CMD_PROC_INCOMING_MSG:
  1243. rc = CPE_SVC_SUCCESS;
  1244. break;
  1245. case CPE_CMD_FTM_TEST:
  1246. rc = CPE_SVC_BUSY;
  1247. break;
  1248. default:
  1249. rc = CPE_SVC_FAILED;
  1250. break;
  1251. }
  1252. break;
  1253. case CPE_STATE_SENDING_MSG:
  1254. switch (command) {
  1255. case CPE_CMD_SEND_MSG:
  1256. case CPE_CMD_SEND_TRANS_MSG:
  1257. case CPE_CMD_SEND_MSG_COMPLETE:
  1258. case CPE_CMD_PROCESS_IRQ:
  1259. case CPE_CMD_SHUTDOWN:
  1260. case CPE_CMD_KILL_THREAD:
  1261. case CPE_CMD_PROC_INCOMING_MSG:
  1262. rc = CPE_SVC_SUCCESS;
  1263. break;
  1264. case CPE_CMD_FTM_TEST:
  1265. rc = CPE_SVC_BUSY;
  1266. break;
  1267. default:
  1268. rc = CPE_SVC_FAILED;
  1269. break;
  1270. }
  1271. break;
  1272. case CPE_STATE_OFFLINE:
  1273. switch (command) {
  1274. case CPE_CMD_RESET:
  1275. case CPE_CMD_RAMDUMP:
  1276. case CPE_CMD_KILL_THREAD:
  1277. rc = CPE_SVC_SUCCESS;
  1278. break;
  1279. default:
  1280. rc = CPE_SVC_NOT_READY;
  1281. break;
  1282. }
  1283. break;
  1284. default:
  1285. pr_debug("%s: unhandled state %d\n",
  1286. __func__, t_info->state);
  1287. break;
  1288. }
  1289. if (rc != CPE_SVC_SUCCESS)
  1290. pr_err("%s: invalid command %d, state = %d\n",
  1291. __func__, command, t_info->state);
  1292. return rc;
  1293. }
  1294. void *cpe_svc_initialize(
  1295. void irq_control_callback(u32 enable),
  1296. const void *codec_info, void *context)
  1297. {
  1298. struct cpe_info *t_info = NULL;
  1299. const struct cpe_svc_hw_cfg *cap = NULL;
  1300. enum cpe_svc_result rc = CPE_SVC_SUCCESS;
  1301. struct cpe_svc_init_param *init_context =
  1302. (struct cpe_svc_init_param *) context;
  1303. void *client_context = NULL;
  1304. if (cpe_d.cpe_default_handle &&
  1305. cpe_d.cpe_default_handle->initialized == true)
  1306. return (void *)cpe_d.cpe_default_handle;
  1307. cpe_d.cpe_query_freq_plans_cb = NULL;
  1308. cpe_d.cpe_change_freq_plan_cb = NULL;
  1309. if (context) {
  1310. client_context = init_context->context;
  1311. switch (init_context->version) {
  1312. case CPE_SVC_INIT_PARAM_V1:
  1313. cpe_d.cpe_query_freq_plans_cb =
  1314. init_context->query_freq_plans_cb;
  1315. cpe_d.cpe_change_freq_plan_cb =
  1316. init_context->change_freq_plan_cb;
  1317. break;
  1318. default:
  1319. break;
  1320. }
  1321. }
  1322. if (!cpe_d.cpe_default_handle) {
  1323. cpe_d.cpe_default_handle = kzalloc(sizeof(struct cpe_info),
  1324. GFP_KERNEL);
  1325. if (!cpe_d.cpe_default_handle)
  1326. goto err_register;
  1327. memset(cpe_d.cpe_default_handle, 0,
  1328. sizeof(struct cpe_info));
  1329. }
  1330. t_info = cpe_d.cpe_default_handle;
  1331. t_info->client_context = client_context;
  1332. INIT_LIST_HEAD(&t_info->client_list);
  1333. cpe_d.cdc_priv = client_context;
  1334. INIT_WORK(&t_info->clk_plan_work, cpe_clk_plan_work);
  1335. init_completion(&t_info->core_svc_cmd_compl);
  1336. t_info->tgt = kzalloc(sizeof(struct cpe_svc_tgt_abstraction),
  1337. GFP_KERNEL);
  1338. if (!t_info->tgt)
  1339. goto err_tgt_alloc;
  1340. t_info->codec_id =
  1341. ((struct cpe_svc_codec_info_v1 *) codec_info)->id;
  1342. rc = cpe_svc_tgt_init((struct cpe_svc_codec_info_v1 *)codec_info,
  1343. t_info->tgt);
  1344. if (rc != CPE_SVC_SUCCESS)
  1345. goto err_tgt_init;
  1346. cap = t_info->tgt->tgt_get_cpe_info();
  1347. memset(t_info->tgt->outbox, 0, cap->outbox_size);
  1348. memset(t_info->tgt->inbox, 0, cap->inbox_size);
  1349. mutex_init(&t_info->msg_lock);
  1350. cpe_d.cpe_irq_control_callback = irq_control_callback;
  1351. t_info->cpe_process_command = cpe_mt_process_cmd;
  1352. t_info->cpe_cmd_validate = cpe_mt_validate_cmd;
  1353. t_info->cpe_start_notification = broadcast_boot_event;
  1354. mutex_init(&cpe_d.cpe_api_mutex);
  1355. mutex_init(&cpe_d.cpe_svc_lock);
  1356. pr_debug("%s: cpe services initialized\n", __func__);
  1357. t_info->state = CPE_STATE_INITIALIZED;
  1358. t_info->initialized = true;
  1359. return t_info;
  1360. err_tgt_init:
  1361. kfree(t_info->tgt);
  1362. err_tgt_alloc:
  1363. kfree(cpe_d.cpe_default_handle);
  1364. cpe_d.cpe_default_handle = NULL;
  1365. err_register:
  1366. return NULL;
  1367. }
  1368. enum cpe_svc_result cpe_svc_deinitialize(void *cpe_handle)
  1369. {
  1370. enum cpe_svc_result rc = CPE_SVC_SUCCESS;
  1371. struct cpe_info *t_info = (struct cpe_info *)cpe_handle;
  1372. if (!t_info)
  1373. t_info = cpe_d.cpe_default_handle;
  1374. rc = cpe_is_command_valid(t_info, CPE_CMD_DEINITIALIZE);
  1375. if (rc != CPE_SVC_SUCCESS) {
  1376. pr_err("%s: Invalid command %d\n",
  1377. __func__, CPE_CMD_DEINITIALIZE);
  1378. return rc;
  1379. }
  1380. if (cpe_d.cpe_default_handle == t_info)
  1381. cpe_d.cpe_default_handle = NULL;
  1382. t_info->tgt->tgt_deinit(t_info->tgt);
  1383. cpe_change_state(t_info, CPE_STATE_UNINITIALIZED,
  1384. CPE_SS_IDLE);
  1385. mutex_destroy(&t_info->msg_lock);
  1386. kfree(t_info->tgt);
  1387. kfree(t_info);
  1388. mutex_destroy(&cpe_d.cpe_api_mutex);
  1389. mutex_destroy(&cpe_d.cpe_svc_lock);
  1390. return rc;
  1391. }
  1392. void *cpe_svc_register(void *cpe_handle,
  1393. void (*notification_callback)
  1394. (const struct cpe_svc_notification *parameter),
  1395. u32 mask, const char *name)
  1396. {
  1397. void *reg_handle;
  1398. CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
  1399. if (!cpe_d.cpe_default_handle) {
  1400. cpe_d.cpe_default_handle = kzalloc(sizeof(struct cpe_info),
  1401. GFP_KERNEL);
  1402. if (!cpe_d.cpe_default_handle) {
  1403. CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
  1404. return NULL;
  1405. }
  1406. memset(cpe_d.cpe_default_handle, 0,
  1407. sizeof(struct cpe_info));
  1408. }
  1409. if (!cpe_handle)
  1410. cpe_handle = cpe_d.cpe_default_handle;
  1411. reg_handle = cpe_register_generic((struct cpe_info *)cpe_handle,
  1412. notification_callback,
  1413. NULL,
  1414. mask, CPE_NO_SERVICE, name);
  1415. CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
  1416. return reg_handle;
  1417. }
  1418. enum cpe_svc_result cpe_svc_deregister(void *cpe_handle, void *reg_handle)
  1419. {
  1420. enum cpe_svc_result rc;
  1421. CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
  1422. if (!cpe_handle)
  1423. cpe_handle = cpe_d.cpe_default_handle;
  1424. rc = cpe_deregister_generic((struct cpe_info *)cpe_handle,
  1425. reg_handle);
  1426. CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
  1427. return rc;
  1428. }
  1429. enum cpe_svc_result cpe_svc_download_segment(void *cpe_handle,
  1430. const struct cpe_svc_mem_segment *segment)
  1431. {
  1432. enum cpe_svc_result rc = CPE_SVC_SUCCESS;
  1433. struct cpe_info *t_info = (struct cpe_info *)cpe_handle;
  1434. CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
  1435. if (!t_info)
  1436. t_info = cpe_d.cpe_default_handle;
  1437. rc = cpe_is_command_valid(t_info, CPE_CMD_DL_SEGMENT);
  1438. if (rc != CPE_SVC_SUCCESS) {
  1439. pr_err("%s: cmd validation fail, cmd = %d\n",
  1440. __func__, CPE_CMD_DL_SEGMENT);
  1441. CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
  1442. return rc;
  1443. }
  1444. cpe_toggle_irq_notification(t_info, false);
  1445. t_info->state = CPE_STATE_DOWNLOADING;
  1446. t_info->substate = CPE_SS_DL_DOWNLOADING;
  1447. rc = t_info->tgt->tgt_write_ram(t_info, segment);
  1448. cpe_toggle_irq_notification(t_info, true);
  1449. CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
  1450. return rc;
  1451. }
  1452. enum cpe_svc_result cpe_svc_boot(void *cpe_handle, int debug_mode)
  1453. {
  1454. enum cpe_svc_result rc = CPE_SVC_SUCCESS;
  1455. struct cpe_info *t_info = (struct cpe_info *)cpe_handle;
  1456. CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
  1457. if (!t_info)
  1458. t_info = cpe_d.cpe_default_handle;
  1459. rc = cpe_is_command_valid(t_info, CPE_CMD_BOOT);
  1460. if (rc != CPE_SVC_SUCCESS) {
  1461. pr_err("%s: cmd validation fail, cmd = %d\n",
  1462. __func__, CPE_CMD_BOOT);
  1463. CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
  1464. return rc;
  1465. }
  1466. if (rc == CPE_SVC_SUCCESS) {
  1467. t_info->tgt->tgt_boot(debug_mode);
  1468. t_info->state = CPE_STATE_BOOTING;
  1469. t_info->substate = CPE_SS_BOOT;
  1470. pr_debug("%s: cpe service booting\n",
  1471. __func__);
  1472. }
  1473. CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
  1474. return rc;
  1475. }
  1476. enum cpe_svc_result cpe_svc_process_irq(void *cpe_handle, u32 cpe_irq)
  1477. {
  1478. enum cpe_svc_result rc = CPE_SVC_SUCCESS;
  1479. struct cpe_info *t_info = (struct cpe_info *)cpe_handle;
  1480. if (!t_info)
  1481. t_info = cpe_d.cpe_default_handle;
  1482. cpe_toggle_irq_notification(t_info, false);
  1483. cpe_process_irq_int(cpe_irq, t_info);
  1484. cpe_toggle_irq_notification(t_info, true);
  1485. return rc;
  1486. }
  1487. enum cpe_svc_result cpe_svc_route_notification(void *cpe_handle,
  1488. enum cpe_svc_module module, enum cpe_svc_route_dest dest)
  1489. {
  1490. struct cpe_info *t_info = (struct cpe_info *)cpe_handle;
  1491. enum cpe_svc_result rc = CPE_SVC_NOT_READY;
  1492. CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
  1493. if (!t_info)
  1494. t_info = cpe_d.cpe_default_handle;
  1495. if (t_info->tgt)
  1496. rc = t_info->tgt->tgt_route_notification(module, dest);
  1497. CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
  1498. return rc;
  1499. }
  1500. static enum cpe_svc_result __cpe_svc_shutdown(void *cpe_handle)
  1501. {
  1502. enum cpe_svc_result rc = CPE_SVC_SUCCESS;
  1503. struct cpe_info *t_info = (struct cpe_info *)cpe_handle;
  1504. struct cpe_command_node *n = NULL;
  1505. struct cpe_command_node kill_cmd;
  1506. if (!t_info)
  1507. t_info = cpe_d.cpe_default_handle;
  1508. rc = cpe_is_command_valid(t_info, CPE_CMD_SHUTDOWN);
  1509. if (rc != CPE_SVC_SUCCESS) {
  1510. pr_err("%s: cmd validation fail, cmd = %d\n",
  1511. __func__, CPE_CMD_SHUTDOWN);
  1512. return rc;
  1513. }
  1514. while (!list_empty(&t_info->main_queue)) {
  1515. n = list_first_entry(&t_info->main_queue,
  1516. struct cpe_command_node, list);
  1517. if (n->command == CPE_CMD_SEND_MSG) {
  1518. cpe_notify_cmi_client(t_info, (u8 *)n->data,
  1519. CPE_SVC_SHUTTING_DOWN);
  1520. }
  1521. /*
  1522. * Since command cannot be processed,
  1523. * delete it from the list and perform cleanup
  1524. */
  1525. list_del(&n->list);
  1526. cpe_command_cleanup(n);
  1527. kfree(n);
  1528. }
  1529. pr_debug("%s: cpe service OFFLINE state\n", __func__);
  1530. t_info->state = CPE_STATE_OFFLINE;
  1531. t_info->substate = CPE_SS_IDLE;
  1532. memset(&kill_cmd, 0, sizeof(kill_cmd));
  1533. kill_cmd.command = CPE_CMD_KILL_THREAD;
  1534. if (t_info->pending) {
  1535. struct cpe_send_msg *m =
  1536. (struct cpe_send_msg *)t_info->pending;
  1537. cpe_notify_cmi_client(t_info, m->payload,
  1538. CPE_SVC_SHUTTING_DOWN);
  1539. kfree(t_info->pending);
  1540. t_info->pending = NULL;
  1541. }
  1542. cpe_cleanup_worker_thread(t_info);
  1543. t_info->cpe_process_command(&kill_cmd);
  1544. return rc;
  1545. }
  1546. enum cpe_svc_result cpe_svc_shutdown(void *cpe_handle)
  1547. {
  1548. enum cpe_svc_result rc = CPE_SVC_SUCCESS;
  1549. CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
  1550. rc = __cpe_svc_shutdown(cpe_handle);
  1551. CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
  1552. return rc;
  1553. }
  1554. enum cpe_svc_result cpe_svc_reset(void *cpe_handle)
  1555. {
  1556. enum cpe_svc_result rc = CPE_SVC_SUCCESS;
  1557. struct cpe_info *t_info = (struct cpe_info *)cpe_handle;
  1558. CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
  1559. if (!t_info)
  1560. t_info = cpe_d.cpe_default_handle;
  1561. rc = cpe_is_command_valid(t_info, CPE_CMD_RESET);
  1562. if (rc != CPE_SVC_SUCCESS) {
  1563. pr_err("%s: cmd validation fail, cmd = %d\n",
  1564. __func__, CPE_CMD_RESET);
  1565. CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
  1566. return rc;
  1567. }
  1568. if (t_info && t_info->tgt) {
  1569. rc = t_info->tgt->tgt_reset();
  1570. pr_debug("%s: cpe services in INITIALIZED state\n",
  1571. __func__);
  1572. t_info->state = CPE_STATE_INITIALIZED;
  1573. t_info->substate = CPE_SS_IDLE;
  1574. }
  1575. CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
  1576. return rc;
  1577. }
  1578. enum cpe_svc_result cpe_svc_ramdump(void *cpe_handle,
  1579. struct cpe_svc_mem_segment *buffer)
  1580. {
  1581. enum cpe_svc_result rc = CPE_SVC_SUCCESS;
  1582. struct cpe_info *t_info = (struct cpe_info *)cpe_handle;
  1583. CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
  1584. if (!t_info)
  1585. t_info = cpe_d.cpe_default_handle;
  1586. rc = cpe_is_command_valid(t_info, CPE_CMD_RAMDUMP);
  1587. if (rc != CPE_SVC_SUCCESS) {
  1588. pr_err("%s: cmd validation fail, cmd = %d\n",
  1589. __func__, CPE_CMD_RAMDUMP);
  1590. CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
  1591. return rc;
  1592. }
  1593. if (t_info->tgt) {
  1594. rc = t_info->tgt->tgt_read_ram(t_info, buffer);
  1595. } else {
  1596. pr_err("%s: cpe service not ready\n", __func__);
  1597. rc = CPE_SVC_NOT_READY;
  1598. }
  1599. CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
  1600. return rc;
  1601. }
  1602. enum cpe_svc_result cpe_svc_set_debug_mode(void *cpe_handle, u32 mode)
  1603. {
  1604. struct cpe_info *t_info = (struct cpe_info *)cpe_handle;
  1605. enum cpe_svc_result rc = CPE_SVC_INVALID_HANDLE;
  1606. CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
  1607. if (!t_info)
  1608. t_info = cpe_d.cpe_default_handle;
  1609. if (t_info->tgt)
  1610. rc = t_info->tgt->tgt_set_debug_mode(mode);
  1611. CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
  1612. return rc;
  1613. }
  1614. const struct cpe_svc_hw_cfg *cpe_svc_get_hw_cfg(void *cpe_handle)
  1615. {
  1616. struct cpe_info *t_info = (struct cpe_info *)cpe_handle;
  1617. if (!t_info)
  1618. t_info = cpe_d.cpe_default_handle;
  1619. if (t_info->tgt)
  1620. return t_info->tgt->tgt_get_cpe_info();
  1621. return NULL;
  1622. }
  1623. void *cmi_register(
  1624. void notification_callback(
  1625. const struct cmi_api_notification *parameter),
  1626. u32 service)
  1627. {
  1628. void *reg_handle = NULL;
  1629. CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
  1630. reg_handle = cpe_register_generic(cpe_d.cpe_default_handle,
  1631. NULL,
  1632. notification_callback,
  1633. (CPE_SVC_CMI_MSG | CPE_SVC_OFFLINE |
  1634. CPE_SVC_ONLINE),
  1635. service,
  1636. "CMI_CLIENT");
  1637. CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
  1638. return reg_handle;
  1639. }
  1640. enum cmi_api_result cmi_deregister(void *reg_handle)
  1641. {
  1642. u32 clients = 0;
  1643. struct cpe_notif_node *n = NULL;
  1644. enum cmi_api_result rc = CMI_API_SUCCESS;
  1645. struct cpe_svc_notification payload;
  1646. CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
  1647. rc = (enum cmi_api_result) cpe_deregister_generic(
  1648. cpe_d.cpe_default_handle, reg_handle);
  1649. CPE_SVC_GRAB_LOCK(&cpe_d.cpe_svc_lock, "cpe_svc");
  1650. list_for_each_entry(n, &cpe_d.cpe_default_handle->client_list, list) {
  1651. if (n->mask & CPE_SVC_CMI_MSG)
  1652. clients++;
  1653. }
  1654. CPE_SVC_REL_LOCK(&cpe_d.cpe_svc_lock, "cpe_svc");
  1655. if (clients == 0) {
  1656. payload.event = CPE_SVC_CMI_CLIENTS_DEREG;
  1657. payload.payload = NULL;
  1658. payload.result = CPE_SVC_SUCCESS;
  1659. cpe_broadcast_notification(cpe_d.cpe_default_handle, &payload);
  1660. }
  1661. CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
  1662. return rc;
  1663. }
  1664. enum cmi_api_result cmi_send_msg(void *message)
  1665. {
  1666. enum cmi_api_result rc = CMI_API_SUCCESS;
  1667. struct cpe_send_msg *msg = NULL;
  1668. struct cmi_hdr *hdr;
  1669. CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
  1670. hdr = CMI_GET_HEADER(message);
  1671. msg = kzalloc(sizeof(struct cpe_send_msg),
  1672. GFP_ATOMIC);
  1673. if (!msg) {
  1674. CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
  1675. return CPE_SVC_NO_MEMORY;
  1676. }
  1677. if (CMI_HDR_GET_OBM_FLAG(hdr) == CMI_OBM_FLAG_OUT_BAND)
  1678. msg->isobm = 1;
  1679. else
  1680. msg->isobm = 0;
  1681. msg->size = sizeof(struct cmi_hdr) +
  1682. CMI_HDR_GET_PAYLOAD_SIZE(hdr);
  1683. msg->payload = kzalloc(msg->size, GFP_ATOMIC);
  1684. if (!msg->payload) {
  1685. kfree(msg);
  1686. CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
  1687. return CPE_SVC_NO_MEMORY;
  1688. }
  1689. msg->address = 0;
  1690. memcpy((void *)msg->payload, message, msg->size);
  1691. rc = (enum cmi_api_result) cpe_send_cmd_to_thread(
  1692. cpe_d.cpe_default_handle,
  1693. CPE_CMD_SEND_MSG,
  1694. (void *)msg, false);
  1695. if (rc != 0) {
  1696. pr_err("%s: Failed to queue message\n", __func__);
  1697. kfree(msg->payload);
  1698. kfree(msg);
  1699. }
  1700. CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
  1701. return rc;
  1702. }
  1703. enum cpe_svc_result cpe_svc_ftm_test(void *cpe_handle, u32 *status)
  1704. {
  1705. enum cpe_svc_result rc = CPE_SVC_SUCCESS;
  1706. struct cpe_info *t_info = (struct cpe_info *)cpe_handle;
  1707. struct cpe_svc_mem_segment backup_seg;
  1708. struct cpe_svc_mem_segment waiti_seg;
  1709. u8 *backup_data = NULL;
  1710. CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
  1711. if (!t_info)
  1712. t_info = cpe_d.cpe_default_handle;
  1713. rc = cpe_is_command_valid(t_info, CPE_CMD_FTM_TEST);
  1714. if (rc != CPE_SVC_SUCCESS) {
  1715. pr_err("%s: cmd validation fail, cmd = %d\n",
  1716. __func__, CPE_CMD_FTM_TEST);
  1717. goto fail_cmd;
  1718. }
  1719. if (t_info && t_info->tgt) {
  1720. backup_data = kzalloc(
  1721. t_info->tgt->tgt_waiti_info->tgt_waiti_size,
  1722. GFP_KERNEL);
  1723. /* CPE reset */
  1724. rc = t_info->tgt->tgt_reset();
  1725. if (rc != CPE_SVC_SUCCESS) {
  1726. pr_err("%s: CPE reset fail! err = %d\n",
  1727. __func__, rc);
  1728. goto err_return;
  1729. }
  1730. /* Back up the 4 byte IRAM data first */
  1731. backup_seg.type = CPE_SVC_INSTRUCTION_MEM;
  1732. backup_seg.cpe_addr =
  1733. t_info->tgt->tgt_get_cpe_info()->IRAM_offset;
  1734. backup_seg.size = t_info->tgt->tgt_waiti_info->tgt_waiti_size;
  1735. backup_seg.data = backup_data;
  1736. pr_debug("%s: Backing up IRAM data from CPE\n",
  1737. __func__);
  1738. rc = t_info->tgt->tgt_read_ram(t_info, &backup_seg);
  1739. if (rc != CPE_SVC_SUCCESS) {
  1740. pr_err("%s: Fail to backup CPE IRAM data, err = %d\n",
  1741. __func__, rc);
  1742. goto err_return;
  1743. }
  1744. pr_debug("%s: Complete backing up IRAM data from CPE\n",
  1745. __func__);
  1746. /* Write the WAITI instruction data */
  1747. waiti_seg.type = CPE_SVC_INSTRUCTION_MEM;
  1748. waiti_seg.cpe_addr =
  1749. t_info->tgt->tgt_get_cpe_info()->IRAM_offset;
  1750. waiti_seg.size = t_info->tgt->tgt_waiti_info->tgt_waiti_size;
  1751. waiti_seg.data = t_info->tgt->tgt_waiti_info->tgt_waiti_data;
  1752. rc = t_info->tgt->tgt_write_ram(t_info, &waiti_seg);
  1753. if (rc != CPE_SVC_SUCCESS) {
  1754. pr_err("%s: Fail to write the WAITI data, err = %d\n",
  1755. __func__, rc);
  1756. goto restore_iram;
  1757. }
  1758. /* Boot up cpe to execute the WAITI instructions */
  1759. rc = t_info->tgt->tgt_boot(1);
  1760. if (rc != CPE_SVC_SUCCESS) {
  1761. pr_err("%s: Fail to boot CPE, err = %d\n",
  1762. __func__, rc);
  1763. goto reset;
  1764. }
  1765. /*
  1766. * 1ms delay is suggested by the hw team to
  1767. * wait for cpe to boot up.
  1768. */
  1769. usleep_range(1000, 1100);
  1770. /* Check if the cpe init is done after executing the WAITI */
  1771. *status = t_info->tgt->tgt_cpar_init_done();
  1772. reset:
  1773. /* Set the cpe back to reset state */
  1774. rc = t_info->tgt->tgt_reset();
  1775. if (rc != CPE_SVC_SUCCESS) {
  1776. pr_err("%s: CPE reset fail! err = %d\n",
  1777. __func__, rc);
  1778. goto restore_iram;
  1779. }
  1780. restore_iram:
  1781. /* Restore the IRAM 4 bytes data */
  1782. rc = t_info->tgt->tgt_write_ram(t_info, &backup_seg);
  1783. if (rc != CPE_SVC_SUCCESS) {
  1784. pr_err("%s: Fail to restore the IRAM data, err = %d\n",
  1785. __func__, rc);
  1786. goto err_return;
  1787. }
  1788. }
  1789. err_return:
  1790. kfree(backup_data);
  1791. fail_cmd:
  1792. CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
  1793. return rc;
  1794. }
  1795. static enum cpe_svc_result cpe_tgt_tomtom_boot(int debug_mode)
  1796. {
  1797. return CPE_SVC_SUCCESS;
  1798. }
  1799. static u32 cpe_tgt_tomtom_is_cpar_init_done(void)
  1800. {
  1801. return 0;
  1802. }
  1803. static u32 cpe_tgt_tomtom_is_active(void)
  1804. {
  1805. return 0;
  1806. }
  1807. static enum cpe_svc_result cpe_tgt_tomtom_reset(void)
  1808. {
  1809. return CPE_SVC_SUCCESS;
  1810. }
  1811. enum cpe_svc_result cpe_tgt_tomtom_voicetx(bool enable)
  1812. {
  1813. return CPE_SVC_SUCCESS;
  1814. }
  1815. enum cpe_svc_result cpe_svc_toggle_lab(void *cpe_handle, bool enable)
  1816. {
  1817. struct cpe_info *t_info = (struct cpe_info *)cpe_handle;
  1818. if (!t_info)
  1819. t_info = cpe_d.cpe_default_handle;
  1820. if (t_info->tgt)
  1821. return t_info->tgt->tgt_voice_tx_lab(enable);
  1822. else
  1823. return CPE_SVC_INVALID_HANDLE;
  1824. }
  1825. static enum cpe_svc_result cpe_tgt_tomtom_read_mailbox(u8 *buffer,
  1826. size_t size)
  1827. {
  1828. return CPE_SVC_SUCCESS;
  1829. }
  1830. static enum cpe_svc_result cpe_tgt_tomtom_write_mailbox(u8 *buffer,
  1831. size_t size)
  1832. {
  1833. return CPE_SVC_SUCCESS;
  1834. }
  1835. static enum cpe_svc_result cpe_tgt_tomtom_read_RAM(struct cpe_info *t_info,
  1836. struct cpe_svc_mem_segment *mem_seg)
  1837. {
  1838. return CPE_SVC_SUCCESS;
  1839. }
  1840. static enum cpe_svc_result cpe_tgt_tomtom_write_RAM(struct cpe_info *t_info,
  1841. const struct cpe_svc_mem_segment *mem_seg)
  1842. {
  1843. return CPE_SVC_SUCCESS;
  1844. }
  1845. static enum cpe_svc_result cpe_tgt_tomtom_route_notification(
  1846. enum cpe_svc_module module,
  1847. enum cpe_svc_route_dest dest)
  1848. {
  1849. return CPE_SVC_SUCCESS;
  1850. }
  1851. static enum cpe_svc_result cpe_tgt_tomtom_set_debug_mode(u32 enable)
  1852. {
  1853. return CPE_SVC_SUCCESS;
  1854. }
  1855. static const struct cpe_svc_hw_cfg *cpe_tgt_tomtom_get_cpe_info(void)
  1856. {
  1857. return &cpe_svc_tomtom_info;
  1858. }
  1859. static enum cpe_svc_result cpe_tgt_tomtom_deinit(
  1860. struct cpe_svc_tgt_abstraction *param)
  1861. {
  1862. kfree(param->inbox);
  1863. param->inbox = NULL;
  1864. kfree(param->outbox);
  1865. param->outbox = NULL;
  1866. memset(param, 0, sizeof(struct cpe_svc_tgt_abstraction));
  1867. return CPE_SVC_SUCCESS;
  1868. }
  1869. static u8 cpe_tgt_tomtom_waiti_data[] = {0x00, 0x70, 0x00, 0x00};
  1870. static struct cpe_tgt_waiti_info cpe_tgt_tomtom_waiti_info = {
  1871. .tgt_waiti_size = ARRAY_SIZE(cpe_tgt_tomtom_waiti_data),
  1872. .tgt_waiti_data = cpe_tgt_tomtom_waiti_data,
  1873. };
  1874. static enum cpe_svc_result cpe_tgt_tomtom_init(
  1875. struct cpe_svc_codec_info_v1 *codec_info,
  1876. struct cpe_svc_tgt_abstraction *param)
  1877. {
  1878. if (!codec_info)
  1879. return CPE_SVC_INVALID_HANDLE;
  1880. if (!param)
  1881. return CPE_SVC_INVALID_HANDLE;
  1882. if (codec_info->id == CPE_SVC_CODEC_TOMTOM) {
  1883. param->tgt_boot = cpe_tgt_tomtom_boot;
  1884. param->tgt_cpar_init_done = cpe_tgt_tomtom_is_cpar_init_done;
  1885. param->tgt_is_active = cpe_tgt_tomtom_is_active;
  1886. param->tgt_reset = cpe_tgt_tomtom_reset;
  1887. param->tgt_read_mailbox = cpe_tgt_tomtom_read_mailbox;
  1888. param->tgt_write_mailbox = cpe_tgt_tomtom_write_mailbox;
  1889. param->tgt_read_ram = cpe_tgt_tomtom_read_RAM;
  1890. param->tgt_write_ram = cpe_tgt_tomtom_write_RAM;
  1891. param->tgt_route_notification =
  1892. cpe_tgt_tomtom_route_notification;
  1893. param->tgt_set_debug_mode = cpe_tgt_tomtom_set_debug_mode;
  1894. param->tgt_get_cpe_info = cpe_tgt_tomtom_get_cpe_info;
  1895. param->tgt_deinit = cpe_tgt_tomtom_deinit;
  1896. param->tgt_voice_tx_lab = cpe_tgt_tomtom_voicetx;
  1897. param->tgt_waiti_info = &cpe_tgt_tomtom_waiti_info;
  1898. param->inbox = kzalloc(TOMTOM_A_SVASS_SPE_INBOX_SIZE,
  1899. GFP_KERNEL);
  1900. if (!param->inbox)
  1901. return CPE_SVC_NO_MEMORY;
  1902. param->outbox = kzalloc(TOMTOM_A_SVASS_SPE_OUTBOX_SIZE,
  1903. GFP_KERNEL);
  1904. if (!param->outbox) {
  1905. kfree(param->inbox);
  1906. return CPE_SVC_NO_MEMORY;
  1907. }
  1908. }
  1909. return CPE_SVC_SUCCESS;
  1910. }
  1911. static enum cpe_svc_result cpe_tgt_wcd9335_boot(int debug_mode)
  1912. {
  1913. enum cpe_svc_result rc = CPE_SVC_SUCCESS;
  1914. if (!debug_mode)
  1915. rc |= cpe_update_bits(
  1916. WCD9335_CPE_SS_WDOG_CFG,
  1917. 0x3f, 0x31);
  1918. else
  1919. pr_info("%s: CPE in debug mode, WDOG disabled\n",
  1920. __func__);
  1921. rc |= cpe_register_write(WCD9335_CPE_SS_CPARMAD_BUFRDY_INT_PERIOD, 19);
  1922. rc |= cpe_update_bits(WCD9335_CPE_SS_CPAR_CTL, 0x04, 0x00);
  1923. rc |= cpe_update_bits(WCD9335_CPE_SS_CPAR_CTL, 0x02, 0x02);
  1924. rc |= cpe_update_bits(WCD9335_CPE_SS_CPAR_CTL, 0x01, 0x01);
  1925. if (unlikely(rc)) {
  1926. pr_err("%s: Failed to boot, err = %d\n",
  1927. __func__, rc);
  1928. rc = CPE_SVC_FAILED;
  1929. }
  1930. return rc;
  1931. }
  1932. static u32 cpe_tgt_wcd9335_is_cpar_init_done(void)
  1933. {
  1934. u8 temp = 0;
  1935. cpe_register_read(WCD9335_CPE_SS_STATUS, &temp);
  1936. return temp & 0x1;
  1937. }
  1938. static u32 cpe_tgt_wcd9335_is_active(void)
  1939. {
  1940. u8 temp = 0;
  1941. cpe_register_read(WCD9335_CPE_SS_STATUS, &temp);
  1942. return temp & 0x4;
  1943. }
  1944. static enum cpe_svc_result cpe_tgt_wcd9335_reset(void)
  1945. {
  1946. enum cpe_svc_result rc = CPE_SVC_SUCCESS;
  1947. rc |= cpe_update_bits(WCD9335_CPE_SS_CPAR_CFG, 0x01, 0x00);
  1948. rc |= cpe_register_write(
  1949. WCD9335_CODEC_RPM_PWR_CPE_IRAM_SHUTDOWN, 0x00);
  1950. rc |= cpe_register_write(
  1951. WCD9335_CODEC_RPM_PWR_CPE_DRAM1_SHUTDOWN, 0x00);
  1952. rc |= cpe_register_write(
  1953. WCD9335_CODEC_RPM_PWR_CPE_DRAM0_SHUTDOWN_1, 0x00);
  1954. rc |= cpe_register_write(
  1955. WCD9335_CODEC_RPM_PWR_CPE_DRAM0_SHUTDOWN_2, 0x00);
  1956. rc |= cpe_update_bits(WCD9335_CPE_SS_CPAR_CTL, 0x04, 0x04);
  1957. if (unlikely(rc)) {
  1958. pr_err("%s: failed to reset cpe, err = %d\n",
  1959. __func__, rc);
  1960. rc = CPE_SVC_FAILED;
  1961. }
  1962. return rc;
  1963. }
  1964. static enum cpe_svc_result cpe_tgt_wcd9335_read_mailbox(u8 *buffer,
  1965. size_t size)
  1966. {
  1967. enum cpe_svc_result rc = CPE_SVC_SUCCESS;
  1968. u32 cnt = 0;
  1969. pr_debug("%s: size=%zd\n", __func__, size);
  1970. if (size > WCD9335_CPE_SS_SPE_OUTBOX_SIZE)
  1971. size = WCD9335_CPE_SS_SPE_OUTBOX_SIZE;
  1972. for (cnt = 0; (cnt < size) && (rc == CPE_SVC_SUCCESS); cnt++)
  1973. rc = cpe_register_read(WCD9335_CPE_SS_SPE_OUTBOX1(cnt),
  1974. &buffer[cnt]);
  1975. rc = cpe_register_write(WCD9335_CPE_SS_OUTBOX1_ACK, 0x01);
  1976. if (unlikely(rc)) {
  1977. pr_err("%s: failed to ACK outbox, err = %d\n",
  1978. __func__, rc);
  1979. rc = CPE_SVC_FAILED;
  1980. }
  1981. return rc;
  1982. }
  1983. static enum cpe_svc_result cpe_tgt_wcd9335_write_mailbox(u8 *buffer,
  1984. size_t size)
  1985. {
  1986. enum cpe_svc_result rc = CPE_SVC_SUCCESS;
  1987. u32 cnt = 0;
  1988. pr_debug("%s: size = %zd\n", __func__, size);
  1989. if (size > WCD9335_CPE_SS_SPE_INBOX_SIZE)
  1990. size = WCD9335_CPE_SS_SPE_INBOX_SIZE;
  1991. for (cnt = 0; (cnt < size) && (rc == CPE_SVC_SUCCESS); cnt++) {
  1992. rc |= cpe_register_write(WCD9335_CPE_SS_SPE_INBOX1(cnt),
  1993. buffer[cnt]);
  1994. }
  1995. if (unlikely(rc)) {
  1996. pr_err("%s: Error %d writing mailbox registers\n",
  1997. __func__, rc);
  1998. return rc;
  1999. }
  2000. rc = cpe_register_write(WCD9335_CPE_SS_INBOX1_TRG, 1);
  2001. return rc;
  2002. }
  2003. static enum cpe_svc_result cpe_wcd9335_get_mem_addr(struct cpe_info *t_info,
  2004. const struct cpe_svc_mem_segment *mem_seg,
  2005. u32 *addr, u8 *mem)
  2006. {
  2007. enum cpe_svc_result rc = CPE_SVC_SUCCESS;
  2008. u32 offset, mem_sz, address;
  2009. u8 mem_type;
  2010. switch (mem_seg->type) {
  2011. case CPE_SVC_DATA_MEM:
  2012. mem_type = MEM_ACCESS_DRAM_VAL;
  2013. offset = WCD9335_CPE_SS_SPE_DRAM_OFFSET;
  2014. mem_sz = WCD9335_CPE_SS_SPE_DRAM_SIZE;
  2015. break;
  2016. case CPE_SVC_INSTRUCTION_MEM:
  2017. mem_type = MEM_ACCESS_IRAM_VAL;
  2018. offset = WCD9335_CPE_SS_SPE_IRAM_OFFSET;
  2019. mem_sz = WCD9335_CPE_SS_SPE_IRAM_SIZE;
  2020. break;
  2021. default:
  2022. pr_err("%s: Invalid mem type = %u\n",
  2023. __func__, mem_seg->type);
  2024. return CPE_SVC_INVALID_HANDLE;
  2025. }
  2026. if (mem_seg->cpe_addr < offset) {
  2027. pr_err("%s: Invalid addr %x for mem type %u\n",
  2028. __func__, mem_seg->cpe_addr, mem_type);
  2029. return CPE_SVC_INVALID_HANDLE;
  2030. }
  2031. address = mem_seg->cpe_addr - offset;
  2032. if (address + mem_seg->size > mem_sz) {
  2033. pr_err("%s: wrong size %zu, start address %x, mem_type %u\n",
  2034. __func__, mem_seg->size, address, mem_type);
  2035. return CPE_SVC_INVALID_HANDLE;
  2036. }
  2037. (*addr) = address;
  2038. (*mem) = mem_type;
  2039. return rc;
  2040. }
  2041. static enum cpe_svc_result cpe_tgt_wcd9335_read_RAM(struct cpe_info *t_info,
  2042. struct cpe_svc_mem_segment *mem_seg)
  2043. {
  2044. enum cpe_svc_result rc = CPE_SVC_SUCCESS;
  2045. u8 temp = 0;
  2046. u32 cnt = 0;
  2047. u8 mem = 0x0;
  2048. u32 addr = 0;
  2049. u32 lastaddr = 0;
  2050. u32 ptr_update = true;
  2051. bool autoinc;
  2052. if (!mem_seg) {
  2053. pr_err("%s: Invalid buffer\n", __func__);
  2054. return CPE_SVC_INVALID_HANDLE;
  2055. }
  2056. rc = cpe_wcd9335_get_mem_addr(t_info, mem_seg, &addr, &mem);
  2057. if (rc != CPE_SVC_SUCCESS) {
  2058. pr_err("%s: Cannot obtain address, mem_type %u\n",
  2059. __func__, mem_seg->type);
  2060. return rc;
  2061. }
  2062. rc |= cpe_register_write(WCD9335_CPE_SS_MEM_CTRL, 0);
  2063. autoinc = cpe_register_read_autoinc_supported();
  2064. if (autoinc)
  2065. temp = 0x18;
  2066. else
  2067. temp = 0x10;
  2068. temp |= mem;
  2069. lastaddr = ~addr;
  2070. do {
  2071. if (!autoinc || (ptr_update)) {
  2072. /* write LSB only if modified */
  2073. if ((lastaddr & 0xFF) != (addr & 0xFF))
  2074. rc |= cpe_register_write(
  2075. WCD9335_CPE_SS_MEM_PTR_0,
  2076. (addr & 0xFF));
  2077. /* write middle byte only if modified */
  2078. if (((lastaddr >> 8) & 0xFF) != ((addr >> 8) & 0xFF))
  2079. rc |= cpe_register_write(
  2080. WCD9335_CPE_SS_MEM_PTR_1,
  2081. ((addr>>8) & 0xFF));
  2082. /* write MSB only if modified */
  2083. if (((lastaddr >> 16) & 0xFF) != ((addr >> 16) & 0xFF))
  2084. rc |= cpe_register_write(
  2085. WCD9335_CPE_SS_MEM_PTR_2,
  2086. ((addr>>16) & 0xFF));
  2087. rc |= cpe_register_write(WCD9335_CPE_SS_MEM_CTRL, temp);
  2088. lastaddr = addr;
  2089. addr++;
  2090. ptr_update = false;
  2091. }
  2092. rc |= cpe_register_read(WCD9335_CPE_SS_MEM_BANK_0,
  2093. &mem_seg->data[cnt]);
  2094. if (!autoinc)
  2095. rc |= cpe_register_write(WCD9335_CPE_SS_MEM_CTRL, 0);
  2096. } while ((++cnt < mem_seg->size) ||
  2097. (rc != CPE_SVC_SUCCESS));
  2098. rc |= cpe_register_write(WCD9335_CPE_SS_MEM_CTRL, 0);
  2099. if (rc)
  2100. pr_err("%s: Failed to read registers, err = %d\n",
  2101. __func__, rc);
  2102. return rc;
  2103. }
  2104. static enum cpe_svc_result cpe_tgt_wcd9335_write_RAM(struct cpe_info *t_info,
  2105. const struct cpe_svc_mem_segment *mem_seg)
  2106. {
  2107. enum cpe_svc_result rc = CPE_SVC_SUCCESS;
  2108. u8 mem_reg_val = 0;
  2109. u8 mem = MEM_ACCESS_NONE_VAL;
  2110. u32 addr = 0;
  2111. u8 *temp_ptr = NULL;
  2112. u32 temp_size = 0;
  2113. bool autoinc;
  2114. if (!mem_seg) {
  2115. pr_err("%s: Invalid mem segment\n",
  2116. __func__);
  2117. return CPE_SVC_INVALID_HANDLE;
  2118. }
  2119. rc = cpe_wcd9335_get_mem_addr(t_info, mem_seg, &addr, &mem);
  2120. if (rc != CPE_SVC_SUCCESS) {
  2121. pr_err("%s: Cannot obtain address, mem_type %u\n",
  2122. __func__, mem_seg->type);
  2123. return rc;
  2124. }
  2125. autoinc = cpe_register_read_autoinc_supported();
  2126. if (autoinc)
  2127. mem_reg_val = 0x18;
  2128. else
  2129. mem_reg_val = 0x10;
  2130. mem_reg_val |= mem;
  2131. rc = cpe_update_bits(WCD9335_CPE_SS_MEM_CTRL,
  2132. 0x0F, mem_reg_val);
  2133. rc = cpe_register_write(WCD9335_CPE_SS_MEM_PTR_0,
  2134. (addr & 0xFF));
  2135. rc = cpe_register_write(WCD9335_CPE_SS_MEM_PTR_1,
  2136. ((addr >> 8) & 0xFF));
  2137. rc = cpe_register_write(WCD9335_CPE_SS_MEM_PTR_2,
  2138. ((addr >> 16) & 0xFF));
  2139. temp_size = 0;
  2140. temp_ptr = mem_seg->data;
  2141. while (temp_size <= mem_seg->size) {
  2142. u32 to_write = (mem_seg->size >= temp_size+CHUNK_SIZE)
  2143. ? CHUNK_SIZE : (mem_seg->size - temp_size);
  2144. if (t_info->state == CPE_STATE_OFFLINE) {
  2145. pr_err("%s: CPE is offline\n", __func__);
  2146. return CPE_SVC_FAILED;
  2147. }
  2148. cpe_register_write_repeat(WCD9335_CPE_SS_MEM_BANK_0,
  2149. temp_ptr, to_write);
  2150. temp_size += CHUNK_SIZE;
  2151. temp_ptr += CHUNK_SIZE;
  2152. }
  2153. rc = cpe_register_write(WCD9335_CPE_SS_MEM_CTRL, 0);
  2154. if (rc)
  2155. pr_err("%s: Failed to write registers, err = %d\n",
  2156. __func__, rc);
  2157. return rc;
  2158. }
  2159. static enum cpe_svc_result cpe_tgt_wcd9335_route_notification(
  2160. enum cpe_svc_module module,
  2161. enum cpe_svc_route_dest dest)
  2162. {
  2163. enum cpe_svc_result rc = CPE_SVC_SUCCESS;
  2164. pr_debug("%s: Module = %d, Destination = %d\n",
  2165. __func__, module, dest);
  2166. switch (module) {
  2167. case CPE_SVC_LISTEN_PROC:
  2168. switch (dest) {
  2169. case CPE_SVC_EXTERNAL:
  2170. rc = cpe_update_bits(WCD9335_CPE_SS_CFG, 0x01, 0x01);
  2171. break;
  2172. case CPE_SVC_INTERNAL:
  2173. rc = cpe_update_bits(WCD9335_CPE_SS_CFG, 0x01, 0x00);
  2174. break;
  2175. default:
  2176. pr_err("%s: Invalid destination %d\n",
  2177. __func__, dest);
  2178. return CPE_SVC_FAILED;
  2179. }
  2180. break;
  2181. default:
  2182. pr_err("%s: Invalid module %d\n",
  2183. __func__, module);
  2184. rc = CPE_SVC_FAILED;
  2185. break;
  2186. }
  2187. return rc;
  2188. }
  2189. static enum cpe_svc_result cpe_tgt_wcd9335_set_debug_mode(u32 enable)
  2190. {
  2191. enum cpe_svc_result rc = CPE_SVC_SUCCESS;
  2192. pr_debug("%s: enable = %s\n", __func__,
  2193. (enable) ? "true" : "false");
  2194. return rc;
  2195. }
  2196. static const struct cpe_svc_hw_cfg *cpe_tgt_wcd9335_get_cpe_info(void)
  2197. {
  2198. return &cpe_svc_wcd9335_info;
  2199. }
  2200. static enum cpe_svc_result
  2201. cpe_tgt_wcd9335_deinit(struct cpe_svc_tgt_abstraction *param)
  2202. {
  2203. kfree(param->inbox);
  2204. param->inbox = NULL;
  2205. kfree(param->outbox);
  2206. param->outbox = NULL;
  2207. memset(param, 0, sizeof(struct cpe_svc_tgt_abstraction));
  2208. return CPE_SVC_SUCCESS;
  2209. }
  2210. static enum cpe_svc_result
  2211. cpe_tgt_wcd9335_voicetx(bool enable)
  2212. {
  2213. enum cpe_svc_result rc = CPE_SVC_SUCCESS;
  2214. u8 val = 0;
  2215. pr_debug("%s: enable = %u\n", __func__, enable);
  2216. if (enable)
  2217. val = 0x02;
  2218. else
  2219. val = 0x00;
  2220. rc = cpe_update_bits(WCD9335_CPE_SS_CFG, 0x02, val);
  2221. val = 0;
  2222. cpe_register_read(WCD9335_CPE_SS_CFG, &val);
  2223. return rc;
  2224. }
  2225. static u8 cpe_tgt_wcd9335_waiti_data[] = {0x00, 0x70, 0x00, 0x00};
  2226. static struct cpe_tgt_waiti_info cpe_tgt_wcd9335_waiti_info = {
  2227. .tgt_waiti_size = ARRAY_SIZE(cpe_tgt_wcd9335_waiti_data),
  2228. .tgt_waiti_data = cpe_tgt_wcd9335_waiti_data,
  2229. };
  2230. static enum cpe_svc_result cpe_tgt_wcd9335_init(
  2231. struct cpe_svc_codec_info_v1 *codec_info,
  2232. struct cpe_svc_tgt_abstraction *param)
  2233. {
  2234. if (!codec_info)
  2235. return CPE_SVC_INVALID_HANDLE;
  2236. if (!param)
  2237. return CPE_SVC_INVALID_HANDLE;
  2238. if (codec_info->id == CPE_SVC_CODEC_WCD9335) {
  2239. param->tgt_boot = cpe_tgt_wcd9335_boot;
  2240. param->tgt_cpar_init_done = cpe_tgt_wcd9335_is_cpar_init_done;
  2241. param->tgt_is_active = cpe_tgt_wcd9335_is_active;
  2242. param->tgt_reset = cpe_tgt_wcd9335_reset;
  2243. param->tgt_read_mailbox = cpe_tgt_wcd9335_read_mailbox;
  2244. param->tgt_write_mailbox = cpe_tgt_wcd9335_write_mailbox;
  2245. param->tgt_read_ram = cpe_tgt_wcd9335_read_RAM;
  2246. param->tgt_write_ram = cpe_tgt_wcd9335_write_RAM;
  2247. param->tgt_route_notification =
  2248. cpe_tgt_wcd9335_route_notification;
  2249. param->tgt_set_debug_mode = cpe_tgt_wcd9335_set_debug_mode;
  2250. param->tgt_get_cpe_info = cpe_tgt_wcd9335_get_cpe_info;
  2251. param->tgt_deinit = cpe_tgt_wcd9335_deinit;
  2252. param->tgt_voice_tx_lab = cpe_tgt_wcd9335_voicetx;
  2253. param->tgt_waiti_info = &cpe_tgt_wcd9335_waiti_info;
  2254. param->inbox = kzalloc(WCD9335_CPE_SS_SPE_INBOX_SIZE,
  2255. GFP_KERNEL);
  2256. if (!param->inbox)
  2257. return CPE_SVC_NO_MEMORY;
  2258. param->outbox = kzalloc(WCD9335_CPE_SS_SPE_OUTBOX_SIZE,
  2259. GFP_KERNEL);
  2260. if (!param->outbox) {
  2261. kfree(param->inbox);
  2262. return CPE_SVC_NO_MEMORY;
  2263. }
  2264. }
  2265. return CPE_SVC_SUCCESS;
  2266. }
  2267. MODULE_DESCRIPTION("WCD CPE Services");
  2268. MODULE_LICENSE("GPL v2");