wcd_cpe_services.c 66 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727
  1. /* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. */
  12. #include <linux/module.h>
  13. #include <linux/kernel.h>
  14. #include <linux/slab.h>
  15. #include <linux/sched.h>
  16. #include <linux/completion.h>
  17. #include <linux/kthread.h>
  18. #include <linux/delay.h>
  19. #include <sound/soc.h>
  20. #include "wcd9335_registers.h"
  21. #include "core.h"
  22. #include "cpe_cmi.h"
  23. #include "wcd_cpe_services.h"
  24. #include "wcd_cmi_api.h"
  25. #define CPE_MSG_BUFFER_SIZE 132
  26. #define CPE_NO_SERVICE 0
  27. #define CMI_DRIVER_SUPPORTED_VERSION 0
  28. #define CMI_API_SUCCESS 0
  29. #define CMI_MSG_TRANSPORT (0x0002)
  30. #define CPE_SVC_INACTIVE_STATE_RETRIES_MAX 10
  31. #define TOMTOM_A_SVASS_SPE_DRAM_OFFSET 0x50000
  32. #define TOMTOM_A_SVASS_SPE_DRAM_SIZE 0x30000
  33. #define TOMTOM_A_SVASS_SPE_IRAM_OFFSET 0x80000
  34. #define TOMTOM_A_SVASS_SPE_IRAM_SIZE 0xC000
  35. #define TOMTOM_A_SVASS_SPE_INBOX_SIZE 12
  36. #define TOMTOM_A_SVASS_SPE_OUTBOX_SIZE 12
  37. #define MEM_ACCESS_NONE_VAL 0x0
  38. #define MEM_ACCESS_IRAM_VAL 0x1
  39. #define MEM_ACCESS_DRAM_VAL 0x2
  40. #define LISTEN_CTL_SPE_VAL 0x0
  41. #define LISTEN_CTL_MSM_VAL 0x1
  42. #define WCD9335_CPE_SS_SPE_DRAM_OFFSET 0x48000
  43. #define WCD9335_CPE_SS_SPE_DRAM_SIZE 0x34000
  44. #define WCD9335_CPE_SS_SPE_IRAM_OFFSET 0x80000
  45. #define WCD9335_CPE_SS_SPE_IRAM_SIZE 0x20000
  46. #define WCD9335_CPE_SS_SPE_INBOX_SIZE 16
  47. #define WCD9335_CPE_SS_SPE_OUTBOX_SIZE 16
  48. #define WCD9335_CPE_SS_SPE_MEM_BANK_SIZ 16
  49. #define WCD9335_CPE_SS_SPE_INBOX1(N) (WCD9335_CPE_SS_INBOX1_0 + (N))
  50. #define WCD9335_CPE_SS_SPE_OUTBOX1(N) (WCD9335_CPE_SS_OUTBOX1_0 + (N))
  51. #define WCD9335_CPE_SS_MEM_BANK(N) (WCD9335_CPE_SS_MEM_BANK_0 + (N))
  52. #define CHUNK_SIZE 16
  53. #define CPE_SVC_GRAB_LOCK(lock, name) \
  54. { \
  55. pr_debug("%s: %s lock acquire\n", \
  56. __func__, name); \
  57. mutex_lock(lock); \
  58. }
  59. #define CPE_SVC_REL_LOCK(lock, name) \
  60. { \
  61. pr_debug("%s: %s lock release\n", \
  62. __func__, name); \
  63. mutex_unlock(lock); \
  64. }
  65. static const struct cpe_svc_hw_cfg cpe_svc_tomtom_info = {
  66. TOMTOM_A_SVASS_SPE_DRAM_SIZE,
  67. TOMTOM_A_SVASS_SPE_DRAM_OFFSET,
  68. TOMTOM_A_SVASS_SPE_IRAM_SIZE,
  69. TOMTOM_A_SVASS_SPE_IRAM_OFFSET,
  70. TOMTOM_A_SVASS_SPE_INBOX_SIZE,
  71. TOMTOM_A_SVASS_SPE_OUTBOX_SIZE
  72. };
  73. static const struct cpe_svc_hw_cfg cpe_svc_wcd9335_info = {
  74. WCD9335_CPE_SS_SPE_DRAM_SIZE,
  75. WCD9335_CPE_SS_SPE_DRAM_OFFSET,
  76. WCD9335_CPE_SS_SPE_IRAM_SIZE,
  77. WCD9335_CPE_SS_SPE_IRAM_OFFSET,
  78. WCD9335_CPE_SS_SPE_INBOX_SIZE,
  79. WCD9335_CPE_SS_SPE_OUTBOX_SIZE
  80. };
  81. enum cpe_state {
  82. CPE_STATE_UNINITIALIZED = 0,
  83. CPE_STATE_INITIALIZED,
  84. CPE_STATE_IDLE,
  85. CPE_STATE_DOWNLOADING,
  86. CPE_STATE_BOOTING,
  87. CPE_STATE_SENDING_MSG,
  88. CPE_STATE_OFFLINE,
  89. CPE_STATE_BUFFERING,
  90. CPE_STATE_BUFFERING_CANCELLED
  91. };
  92. enum cpe_substate {
  93. CPE_SS_IDLE = 0,
  94. CPE_SS_MSG_REQUEST_ACCESS,
  95. CPE_SS_MSG_SEND_INBOX,
  96. CPE_SS_MSG_SENT,
  97. CPE_SS_DL_DOWNLOADING,
  98. CPE_SS_DL_COMPLETED,
  99. CPE_SS_BOOT,
  100. CPE_SS_BOOT_INIT,
  101. CPE_SS_ONLINE
  102. };
  103. enum cpe_command {
  104. CPE_CMD_KILL_THREAD = 0,
  105. CPE_CMD_BOOT,
  106. CPE_CMD_BOOT_INITIALIZE,
  107. CPE_CMD_BOOT_COMPLETE,
  108. CPE_CMD_SEND_MSG,
  109. CPE_CMD_SEND_TRANS_MSG,
  110. CPE_CMD_SEND_MSG_COMPLETE,
  111. CPE_CMD_PROCESS_IRQ,
  112. CPE_CMD_RAMDUMP,
  113. CPE_CMD_DL_SEGMENT,
  114. CPE_CMD_SHUTDOWN,
  115. CPE_CMD_RESET,
  116. CPE_CMD_DEINITIALIZE,
  117. CPE_CMD_READ,
  118. CPE_CMD_ENABLE_LAB,
  119. CPE_CMD_DISABLE_LAB,
  120. CPE_CMD_SWAP_BUFFER,
  121. CPE_LAB_CFG_SB,
  122. CPE_CMD_CANCEL_MEMACCESS,
  123. CPE_CMD_PROC_INCOMING_MSG,
  124. CPE_CMD_FTM_TEST,
  125. };
  126. enum cpe_process_result {
  127. CPE_PROC_SUCCESS = 0,
  128. CPE_PROC_FAILED,
  129. CPE_PROC_KILLED,
  130. CPE_PROC_QUEUED,
  131. };
  132. struct cpe_command_node {
  133. enum cpe_command command;
  134. enum cpe_svc_result result;
  135. void *data;
  136. struct list_head list;
  137. };
  138. struct cpe_info {
  139. struct list_head main_queue;
  140. struct completion cmd_complete;
  141. struct completion thread_comp;
  142. void *thread_handler;
  143. bool stop_thread;
  144. struct mutex msg_lock;
  145. enum cpe_state state;
  146. enum cpe_substate substate;
  147. struct list_head client_list;
  148. enum cpe_process_result (*cpe_process_command)
  149. (struct cpe_command_node *command_node);
  150. enum cpe_svc_result (*cpe_cmd_validate)
  151. (const struct cpe_info *i,
  152. enum cpe_command command);
  153. enum cpe_svc_result (*cpe_start_notification)
  154. (struct cpe_info *i);
  155. u32 initialized;
  156. struct cpe_svc_tgt_abstraction *tgt;
  157. void *pending;
  158. void *data;
  159. void *client_context;
  160. u32 codec_id;
  161. struct work_struct clk_plan_work;
  162. struct completion core_svc_cmd_compl;
  163. };
  164. struct cpe_tgt_waiti_info {
  165. u8 tgt_waiti_size;
  166. u8 *tgt_waiti_data;
  167. };
  168. struct cpe_svc_tgt_abstraction {
  169. enum cpe_svc_result (*tgt_boot)(int debug_mode);
  170. u32 (*tgt_cpar_init_done)(void);
  171. u32 (*tgt_is_active)(void);
  172. enum cpe_svc_result (*tgt_reset)(void);
  173. enum cpe_svc_result (*tgt_stop)(void);
  174. enum cpe_svc_result (*tgt_read_mailbox)
  175. (u8 *buffer, size_t size);
  176. enum cpe_svc_result (*tgt_write_mailbox)
  177. (u8 *buffer, size_t size);
  178. enum cpe_svc_result (*tgt_read_ram)
  179. (struct cpe_info *c,
  180. struct cpe_svc_mem_segment *data);
  181. enum cpe_svc_result (*tgt_write_ram)
  182. (struct cpe_info *c,
  183. const struct cpe_svc_mem_segment *data);
  184. enum cpe_svc_result (*tgt_route_notification)
  185. (enum cpe_svc_module module,
  186. enum cpe_svc_route_dest dest);
  187. enum cpe_svc_result (*tgt_set_debug_mode)(u32 enable);
  188. const struct cpe_svc_hw_cfg *(*tgt_get_cpe_info)(void);
  189. enum cpe_svc_result (*tgt_deinit)
  190. (struct cpe_svc_tgt_abstraction *param);
  191. enum cpe_svc_result (*tgt_voice_tx_lab)
  192. (bool);
  193. u8 *inbox;
  194. u8 *outbox;
  195. struct cpe_tgt_waiti_info *tgt_waiti_info;
  196. };
  197. static enum cpe_svc_result cpe_tgt_tomtom_init(
  198. struct cpe_svc_codec_info_v1 *codec_info,
  199. struct cpe_svc_tgt_abstraction *param);
  200. static enum cpe_svc_result cpe_tgt_wcd9335_init(
  201. struct cpe_svc_codec_info_v1 *codec_info,
  202. struct cpe_svc_tgt_abstraction *param);
  203. struct cpe_send_msg {
  204. u8 *payload;
  205. u32 isobm;
  206. u32 address;
  207. size_t size;
  208. };
  209. struct cpe_read_handle {
  210. void *registration;
  211. struct cpe_info t_info;
  212. struct list_head buffers;
  213. void *config;
  214. };
  215. struct generic_notification {
  216. void (*notification)
  217. (const struct cpe_svc_notification *parameter);
  218. void (*cmi_notification)
  219. (const struct cmi_api_notification *parameter);
  220. };
  221. struct cpe_notif_node {
  222. struct generic_notification notif;
  223. u32 mask;
  224. u32 service;
  225. const struct cpe_info *context;
  226. const char *name;
  227. u32 disabled;
  228. struct list_head list;
  229. };
  230. struct cpe_priv {
  231. struct cpe_info *cpe_default_handle;
  232. void (*cpe_irq_control_callback)(u32 enable);
  233. void (*cpe_query_freq_plans_cb)
  234. (void *cdc_priv,
  235. struct cpe_svc_cfg_clk_plan *clk_freq);
  236. void (*cpe_change_freq_plan_cb)(void *cdc_priv,
  237. u32 clk_freq);
  238. u32 cpe_msg_buffer;
  239. void *cpe_cmi_handle;
  240. struct mutex cpe_api_mutex;
  241. struct mutex cpe_svc_lock;
  242. struct cpe_svc_boot_event cpe_debug_vector;
  243. void *cdc_priv;
  244. };
  245. static struct cpe_priv cpe_d;
  246. static enum cpe_svc_result __cpe_svc_shutdown(void *cpe_handle);
  247. static enum cpe_svc_result cpe_is_command_valid(
  248. const struct cpe_info *t_info,
  249. enum cpe_command command);
  250. static int cpe_register_read(u32 reg, u8 *val)
  251. {
  252. *(val) = snd_soc_read(cpe_d.cdc_priv, reg);
  253. return 0;
  254. }
  255. static enum cpe_svc_result cpe_update_bits(u32 reg,
  256. u32 mask, u32 value)
  257. {
  258. int ret = 0;
  259. ret = snd_soc_update_bits(cpe_d.cdc_priv, reg,
  260. mask, value);
  261. if (ret < 0)
  262. return CPE_SVC_FAILED;
  263. return CPE_SVC_SUCCESS;
  264. }
  265. static int cpe_register_write(u32 reg, u32 val)
  266. {
  267. int ret = 0;
  268. if (reg != WCD9335_CPE_SS_MEM_BANK_0)
  269. pr_debug("%s: reg = 0x%x, value = 0x%x\n",
  270. __func__, reg, val);
  271. ret = snd_soc_write(cpe_d.cdc_priv, reg, val);
  272. if (ret < 0)
  273. return CPE_SVC_FAILED;
  274. return CPE_SVC_SUCCESS;
  275. }
  276. static int cpe_register_write_repeat(u32 reg, u8 *ptr, u32 to_write)
  277. {
  278. struct snd_soc_codec *codec = cpe_d.cdc_priv;
  279. struct wcd9xxx *wcd9xxx = dev_get_drvdata(codec->dev->parent);
  280. int ret = 0;
  281. ret = wcd9xxx_slim_write_repeat(wcd9xxx, reg, to_write, ptr);
  282. if (ret != 0)
  283. pr_err("%s: slim_write_repeat failed\n", __func__);
  284. if (ret < 0)
  285. return CPE_SVC_FAILED;
  286. return CPE_SVC_SUCCESS;
  287. }
  288. static bool cpe_register_read_autoinc_supported(void)
  289. {
  290. return true;
  291. }
  292. /* Called under msgq locked context */
  293. static void cpe_cmd_received(struct cpe_info *t_info)
  294. {
  295. struct cpe_command_node *node = NULL;
  296. enum cpe_process_result proc_rc = CPE_PROC_SUCCESS;
  297. if (!t_info) {
  298. pr_err("%s: Invalid thread info\n",
  299. __func__);
  300. return;
  301. }
  302. while (!list_empty(&t_info->main_queue)) {
  303. if (proc_rc != CPE_PROC_SUCCESS)
  304. break;
  305. node = list_first_entry(&t_info->main_queue,
  306. struct cpe_command_node, list);
  307. if (!node)
  308. break;
  309. list_del(&node->list);
  310. proc_rc = t_info->cpe_process_command(node);
  311. pr_debug("%s: process command return %d\n",
  312. __func__, proc_rc);
  313. switch (proc_rc) {
  314. case CPE_PROC_SUCCESS:
  315. kfree(node);
  316. break;
  317. case CPE_PROC_FAILED:
  318. kfree(node);
  319. pr_err("%s: cmd failed\n", __func__);
  320. break;
  321. case CPE_PROC_KILLED:
  322. break;
  323. default:
  324. list_add(&node->list, &(t_info->main_queue));
  325. }
  326. }
  327. }
  328. static int cpe_worker_thread(void *context)
  329. {
  330. struct cpe_info *t_info = (struct cpe_info *)context;
  331. /*
  332. * Thread will run until requested to stop explicitly
  333. * by setting the t_info->stop_thread flag
  334. */
  335. while (1) {
  336. /* Wait for command to be processed */
  337. wait_for_completion(&t_info->cmd_complete);
  338. CPE_SVC_GRAB_LOCK(&t_info->msg_lock, "msg_lock");
  339. cpe_cmd_received(t_info);
  340. reinit_completion(&t_info->cmd_complete);
  341. /* Check if thread needs to be stopped */
  342. if (t_info->stop_thread)
  343. goto unlock_and_exit;
  344. CPE_SVC_REL_LOCK(&t_info->msg_lock, "msg_lock");
  345. };
  346. unlock_and_exit:
  347. pr_debug("%s: thread stopped\n", __func__);
  348. CPE_SVC_REL_LOCK(&t_info->msg_lock, "msg_lock");
  349. complete_and_exit(&t_info->thread_comp, 0);
  350. }
  351. static void cpe_create_worker_thread(struct cpe_info *t_info)
  352. {
  353. INIT_LIST_HEAD(&t_info->main_queue);
  354. init_completion(&t_info->cmd_complete);
  355. init_completion(&t_info->thread_comp);
  356. t_info->stop_thread = false;
  357. t_info->thread_handler = kthread_run(cpe_worker_thread,
  358. (void *)t_info, "cpe-worker-thread");
  359. pr_debug("%s: Created new worker thread\n",
  360. __func__);
  361. }
  362. static void cpe_cleanup_worker_thread(struct cpe_info *t_info)
  363. {
  364. if (!t_info->thread_handler) {
  365. pr_err("%s: thread not created\n", __func__);
  366. return;
  367. }
  368. /*
  369. * Wake up the command handler in case
  370. * it is waiting for an command to be processed.
  371. */
  372. CPE_SVC_GRAB_LOCK(&t_info->msg_lock, "msg_lock");
  373. t_info->stop_thread = true;
  374. complete(&t_info->cmd_complete);
  375. CPE_SVC_REL_LOCK(&t_info->msg_lock, "msg_lock");
  376. /* Wait for the thread to exit */
  377. wait_for_completion(&t_info->thread_comp);
  378. t_info->thread_handler = NULL;
  379. pr_debug("%s: Thread cleaned up successfully\n",
  380. __func__);
  381. }
  382. static enum cpe_svc_result
  383. cpe_send_cmd_to_thread(struct cpe_info *t_info,
  384. enum cpe_command command, void *data,
  385. bool high_prio)
  386. {
  387. enum cpe_svc_result rc = CPE_SVC_SUCCESS;
  388. struct cpe_command_node *cmd = NULL;
  389. rc = cpe_is_command_valid(t_info, command);
  390. if (rc != CPE_SVC_SUCCESS) {
  391. pr_err("%s: Invalid command %d\n",
  392. __func__, command);
  393. return rc;
  394. }
  395. cmd = kzalloc(sizeof(struct cpe_command_node),
  396. GFP_ATOMIC);
  397. if (!cmd)
  398. return CPE_SVC_NO_MEMORY;
  399. cmd->command = command;
  400. cmd->data = data;
  401. CPE_SVC_GRAB_LOCK(&t_info->msg_lock, "msg_lock");
  402. if (high_prio)
  403. list_add(&(cmd->list),
  404. &(t_info->main_queue));
  405. else
  406. list_add_tail(&(cmd->list),
  407. &(t_info->main_queue));
  408. complete(&t_info->cmd_complete);
  409. CPE_SVC_REL_LOCK(&t_info->msg_lock, "msg_lock");
  410. return rc;
  411. }
  412. static enum cpe_svc_result cpe_change_state(
  413. struct cpe_info *t_info,
  414. enum cpe_state state, enum cpe_substate ss)
  415. {
  416. enum cpe_svc_result rc = CPE_SVC_SUCCESS;
  417. if (!t_info)
  418. t_info = cpe_d.cpe_default_handle;
  419. t_info->state = state;
  420. t_info->substate = ss;
  421. pr_debug("%s: current state: %d,%d, new_state: %d,%d\n",
  422. __func__, t_info->state, t_info->substate,
  423. state, ss);
  424. return rc;
  425. }
  426. static enum cpe_svc_result
  427. cpe_is_command_valid(const struct cpe_info *t_info,
  428. enum cpe_command command)
  429. {
  430. enum cpe_svc_result rc = CPE_SVC_INVALID_HANDLE;
  431. if (t_info && t_info->cpe_cmd_validate)
  432. rc = t_info->cpe_cmd_validate(t_info, command);
  433. else
  434. pr_err("%s: invalid handle or callback\n",
  435. __func__);
  436. return rc;
  437. }
  438. static void cpe_notify_client(struct cpe_notif_node *client,
  439. struct cpe_svc_notification *payload)
  440. {
  441. if (!client || !payload) {
  442. pr_err("%s: invalid client or payload\n",
  443. __func__);
  444. return;
  445. }
  446. if (!(client->mask & payload->event)) {
  447. pr_debug("%s: client mask 0x%x not registered for event 0x%x\n",
  448. __func__, client->mask, payload->event);
  449. return;
  450. }
  451. if (client->notif.notification && !client->disabled)
  452. client->notif.notification(payload);
  453. if ((client->mask & CPE_SVC_CMI_MSG) &&
  454. client->notif.cmi_notification)
  455. client->notif.cmi_notification(
  456. (const struct cmi_api_notification *)payload);
  457. }
  458. static void cpe_broadcast_notification(const struct cpe_info *t_info,
  459. struct cpe_svc_notification *payload)
  460. {
  461. struct cpe_notif_node *n = NULL;
  462. if (!t_info || !payload) {
  463. pr_err("%s: invalid handle\n", __func__);
  464. return;
  465. }
  466. pr_debug("%s: notify clients, event = %d\n",
  467. __func__, payload->event);
  468. payload->private_data = cpe_d.cdc_priv;
  469. CPE_SVC_GRAB_LOCK(&cpe_d.cpe_svc_lock, "cpe_svc");
  470. list_for_each_entry(n, &t_info->client_list, list) {
  471. if (!(n->mask & CPE_SVC_CMI_MSG))
  472. cpe_notify_client(n, payload);
  473. }
  474. CPE_SVC_REL_LOCK(&cpe_d.cpe_svc_lock, "cpe_svc");
  475. }
  476. static void *cpe_register_generic(struct cpe_info *t_info,
  477. void notification_callback(
  478. const struct cpe_svc_notification *parameter),
  479. void cmi_callback(
  480. const struct cmi_api_notification *parameter),
  481. u32 mask, u32 service, const char *name)
  482. {
  483. struct cpe_notif_node *n = NULL;
  484. n = kzalloc(sizeof(struct cpe_notif_node),
  485. GFP_KERNEL);
  486. if (!n)
  487. return NULL;
  488. n->mask = mask;
  489. n->service = service;
  490. n->notif.notification = notification_callback;
  491. n->notif.cmi_notification = cmi_callback;
  492. n->context = t_info;
  493. n->disabled = false;
  494. n->name = name;
  495. CPE_SVC_GRAB_LOCK(&cpe_d.cpe_svc_lock, "cpe_svc");
  496. /* Make sure CPE core service is first */
  497. if (service == CMI_CPE_CORE_SERVICE_ID)
  498. list_add(&n->list, &t_info->client_list);
  499. else
  500. list_add_tail(&n->list, &t_info->client_list);
  501. CPE_SVC_REL_LOCK(&cpe_d.cpe_svc_lock, "cpe_svc");
  502. return n;
  503. }
  504. static enum cpe_svc_result cpe_deregister_generic(struct cpe_info *t_info,
  505. void *reg_handle)
  506. {
  507. struct cpe_notif_node *n = (struct cpe_notif_node *)reg_handle;
  508. if (!t_info || !reg_handle) {
  509. pr_err("%s: invalid handle\n", __func__);
  510. return CPE_SVC_INVALID_HANDLE;
  511. }
  512. CPE_SVC_GRAB_LOCK(&cpe_d.cpe_svc_lock, "cpe_svc");
  513. list_del(&(n->list));
  514. kfree(reg_handle);
  515. CPE_SVC_REL_LOCK(&cpe_d.cpe_svc_lock, "cpe_svc");
  516. return CPE_SVC_SUCCESS;
  517. }
  518. static enum cpe_svc_result cpe_svc_tgt_init(struct cpe_svc_codec_info_v1 *i,
  519. struct cpe_svc_tgt_abstraction *abs)
  520. {
  521. if (!i || !abs) {
  522. pr_err("%s: Incorrect information provided\n",
  523. __func__);
  524. return CPE_SVC_FAILED;
  525. }
  526. switch (i->id) {
  527. case CPE_SVC_CODEC_TOMTOM:
  528. return cpe_tgt_tomtom_init(i, abs);
  529. case CPE_SVC_CODEC_WCD9335:
  530. return cpe_tgt_wcd9335_init(i, abs);
  531. default:
  532. pr_err("%s: Codec type %d not supported\n",
  533. __func__, i->id);
  534. return CPE_SVC_FAILED;
  535. }
  536. return CPE_SVC_SUCCESS;
  537. }
  538. static void cpe_notify_cmi_client(struct cpe_info *t_info, u8 *payload,
  539. enum cpe_svc_result result)
  540. {
  541. struct cpe_notif_node *n = NULL;
  542. struct cmi_api_notification notif;
  543. struct cmi_hdr *hdr;
  544. u8 service = 0;
  545. if (!t_info || !payload) {
  546. pr_err("%s: invalid payload/handle\n",
  547. __func__);
  548. return;
  549. }
  550. hdr = CMI_GET_HEADER(payload);
  551. service = CMI_HDR_GET_SERVICE(hdr);
  552. notif.event = CMI_API_MSG;
  553. notif.result = result;
  554. notif.message = payload;
  555. CPE_SVC_GRAB_LOCK(&cpe_d.cpe_svc_lock, "cpe_svc");
  556. list_for_each_entry(n, &t_info->client_list, list) {
  557. if ((n->mask & CPE_SVC_CMI_MSG) &&
  558. n->service == service &&
  559. n->notif.cmi_notification) {
  560. n->notif.cmi_notification(&notif);
  561. break;
  562. }
  563. }
  564. CPE_SVC_REL_LOCK(&cpe_d.cpe_svc_lock, "cpe_svc");
  565. }
  566. static void cpe_toggle_irq_notification(struct cpe_info *t_info, u32 value)
  567. {
  568. if (cpe_d.cpe_irq_control_callback)
  569. cpe_d.cpe_irq_control_callback(value);
  570. }
  571. static void cpe_command_cleanup(struct cpe_command_node *command_node)
  572. {
  573. switch (command_node->command) {
  574. case CPE_CMD_SEND_MSG:
  575. case CPE_CMD_SEND_TRANS_MSG:
  576. case CPE_CMD_SEND_MSG_COMPLETE:
  577. case CPE_CMD_SHUTDOWN:
  578. case CPE_CMD_READ:
  579. kfree(command_node->data);
  580. command_node->data = NULL;
  581. break;
  582. default:
  583. pr_err("%s: unhandled command\n",
  584. __func__);
  585. break;
  586. }
  587. }
  588. static enum cpe_svc_result cpe_send_msg_to_inbox(
  589. struct cpe_info *t_info, u32 opcode,
  590. struct cpe_send_msg *msg)
  591. {
  592. size_t bytes = 0;
  593. size_t inbox_size =
  594. t_info->tgt->tgt_get_cpe_info()->inbox_size;
  595. struct cmi_hdr *hdr;
  596. enum cpe_svc_result rc = CPE_SVC_SUCCESS;
  597. memset(t_info->tgt->inbox, 0, inbox_size);
  598. hdr = CMI_GET_HEADER(t_info->tgt->inbox);
  599. CMI_HDR_SET_SESSION(hdr, 1);
  600. CMI_HDR_SET_SERVICE(hdr, CMI_CPE_CORE_SERVICE_ID);
  601. CMI_HDR_SET_VERSION(hdr, CMI_DRIVER_SUPPORTED_VERSION);
  602. CMI_HDR_SET_OBM(hdr, CMI_OBM_FLAG_IN_BAND);
  603. switch (opcode) {
  604. case CPE_CORE_SVC_CMD_SHARED_MEM_ALLOC: {
  605. struct cmi_core_svc_cmd_shared_mem_alloc *m;
  606. CMI_HDR_SET_OPCODE(hdr,
  607. CPE_CORE_SVC_CMD_SHARED_MEM_ALLOC);
  608. CMI_HDR_SET_PAYLOAD_SIZE(hdr,
  609. sizeof(struct cmi_core_svc_cmd_shared_mem_alloc));
  610. m = (struct cmi_core_svc_cmd_shared_mem_alloc *)
  611. CMI_GET_PAYLOAD(t_info->tgt->inbox);
  612. m->size = CPE_MSG_BUFFER_SIZE;
  613. pr_debug("send shared mem alloc msg to cpe inbox\n");
  614. }
  615. break;
  616. case CPE_CORE_SVC_CMD_DRAM_ACCESS_REQ:
  617. CMI_HDR_SET_OPCODE(hdr,
  618. CPE_CORE_SVC_CMD_DRAM_ACCESS_REQ);
  619. CMI_HDR_SET_PAYLOAD_SIZE(hdr, 0);
  620. pr_debug("%s: Creating DRAM acces request msg\n",
  621. __func__);
  622. break;
  623. case CPE_CMI_BASIC_RSP_OPCODE: {
  624. struct cmi_basic_rsp_result *rsp;
  625. CMI_HDR_SET_OPCODE(hdr,
  626. CPE_CMI_BASIC_RSP_OPCODE);
  627. CMI_HDR_SET_PAYLOAD_SIZE(hdr,
  628. sizeof(struct cmi_basic_rsp_result));
  629. rsp = (struct cmi_basic_rsp_result *)
  630. CMI_GET_PAYLOAD(t_info->tgt->inbox);
  631. rsp->status = 0;
  632. pr_debug("%s: send basic response\n", __func__);
  633. }
  634. break;
  635. default:
  636. if (msg->address != 0) {
  637. struct cmi_msg_transport *m = NULL;
  638. struct cpe_svc_mem_segment mem_seg;
  639. mem_seg.type = CPE_SVC_DATA_MEM;
  640. if (msg->isobm) {
  641. struct cmi_obm *obm = (struct cmi_obm *)
  642. CMI_GET_PAYLOAD(msg->payload);
  643. mem_seg.cpe_addr = obm->mem_handle;
  644. mem_seg.data = (u8 *)obm->data_ptr.kvaddr;
  645. mem_seg.size = obm->size;
  646. t_info->tgt->tgt_write_ram(t_info, &mem_seg);
  647. }
  648. mem_seg.cpe_addr = msg->address;
  649. mem_seg.data = msg->payload;
  650. mem_seg.size = msg->size;
  651. t_info->tgt->tgt_write_ram(t_info, &mem_seg);
  652. hdr = CMI_GET_HEADER(t_info->tgt->inbox);
  653. CMI_HDR_SET_OPCODE(hdr, CMI_MSG_TRANSPORT);
  654. m = (struct cmi_msg_transport *)
  655. CMI_GET_PAYLOAD(t_info->tgt->inbox);
  656. m->addr = msg->address;
  657. m->size = msg->size;
  658. CMI_HDR_SET_PAYLOAD_SIZE(hdr,
  659. sizeof(struct cmi_msg_transport));
  660. } else {
  661. memcpy(t_info->tgt->inbox, msg->payload,
  662. msg->size);
  663. }
  664. break;
  665. }
  666. pr_debug("%s: sending message to cpe inbox\n",
  667. __func__);
  668. bytes = sizeof(struct cmi_hdr);
  669. hdr = CMI_GET_HEADER(t_info->tgt->inbox);
  670. bytes += CMI_HDR_GET_PAYLOAD_SIZE(hdr);
  671. rc = t_info->tgt->tgt_write_mailbox(t_info->tgt->inbox, bytes);
  672. return rc;
  673. }
  674. static bool cpe_is_cmd_clk_req(void *cmd)
  675. {
  676. struct cmi_hdr *hdr;
  677. hdr = CMI_GET_HEADER(cmd);
  678. if ((CMI_HDR_GET_SERVICE(hdr) ==
  679. CMI_CPE_CORE_SERVICE_ID)) {
  680. if (CMI_GET_OPCODE(cmd) ==
  681. CPE_CORE_SVC_CMD_CLK_FREQ_REQUEST)
  682. return true;
  683. }
  684. return false;
  685. }
  686. static enum cpe_svc_result cpe_process_clk_change_req(
  687. struct cpe_info *t_info)
  688. {
  689. struct cmi_core_svc_cmd_clk_freq_request *req;
  690. req = (struct cmi_core_svc_cmd_clk_freq_request *)
  691. CMI_GET_PAYLOAD(t_info->tgt->outbox);
  692. if (!cpe_d.cpe_change_freq_plan_cb) {
  693. pr_err("%s: No support for clk freq change\n",
  694. __func__);
  695. return CPE_SVC_FAILED;
  696. }
  697. cpe_d.cpe_change_freq_plan_cb(cpe_d.cdc_priv,
  698. req->clk_freq);
  699. /*send a basic response*/
  700. cpe_send_msg_to_inbox(t_info,
  701. CPE_CMI_BASIC_RSP_OPCODE, NULL);
  702. return CPE_SVC_SUCCESS;
  703. }
  704. static void cpe_process_irq_int(u32 irq,
  705. struct cpe_info *t_info)
  706. {
  707. struct cpe_command_node temp_node;
  708. struct cpe_send_msg *m;
  709. u8 size = 0;
  710. bool err_irq = false;
  711. struct cmi_hdr *hdr;
  712. pr_debug("%s: irq = %u\n", __func__, irq);
  713. if (!t_info) {
  714. pr_err("%s: Invalid handle\n",
  715. __func__);
  716. return;
  717. }
  718. CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
  719. switch (irq) {
  720. case CPE_IRQ_OUTBOX_IRQ:
  721. size = t_info->tgt->tgt_get_cpe_info()->outbox_size;
  722. t_info->tgt->tgt_read_mailbox(t_info->tgt->outbox, size);
  723. break;
  724. case CPE_IRQ_MEM_ACCESS_ERROR:
  725. err_irq = true;
  726. cpe_change_state(t_info, CPE_STATE_OFFLINE, CPE_SS_IDLE);
  727. break;
  728. case CPE_IRQ_WDOG_BITE:
  729. case CPE_IRQ_RCO_WDOG_INT:
  730. err_irq = true;
  731. __cpe_svc_shutdown(t_info);
  732. break;
  733. case CPE_IRQ_FLL_LOCK_LOST:
  734. default:
  735. err_irq = true;
  736. break;
  737. }
  738. if (err_irq) {
  739. pr_err("%s: CPE error IRQ %u occurred\n",
  740. __func__, irq);
  741. CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
  742. return;
  743. }
  744. switch (t_info->state) {
  745. case CPE_STATE_BOOTING:
  746. switch (t_info->substate) {
  747. case CPE_SS_BOOT:
  748. temp_node.command = CPE_CMD_BOOT_INITIALIZE;
  749. temp_node.result = CPE_SVC_SUCCESS;
  750. t_info->substate = CPE_SS_BOOT_INIT;
  751. t_info->cpe_process_command(&temp_node);
  752. break;
  753. case CPE_SS_BOOT_INIT:
  754. temp_node.command = CPE_CMD_BOOT_COMPLETE;
  755. temp_node.result = CPE_SVC_SUCCESS;
  756. t_info->substate = CPE_SS_ONLINE;
  757. t_info->cpe_process_command(&temp_node);
  758. break;
  759. default:
  760. pr_debug("%s: unhandled substate %d for state %d\n",
  761. __func__, t_info->state, t_info->substate);
  762. break;
  763. }
  764. break;
  765. case CPE_STATE_SENDING_MSG:
  766. hdr = CMI_GET_HEADER(t_info->tgt->outbox);
  767. if (CMI_GET_OPCODE(t_info->tgt->outbox) ==
  768. CPE_LSM_SESSION_EVENT_DETECTION_STATUS_V2) {
  769. pr_debug("%s: session_id: %u, state: %d,%d, event received\n",
  770. __func__, CMI_HDR_GET_SESSION_ID(hdr),
  771. t_info->state, t_info->substate);
  772. temp_node.command = CPE_CMD_PROC_INCOMING_MSG;
  773. temp_node.data = NULL;
  774. t_info->cpe_process_command(&temp_node);
  775. break;
  776. }
  777. m = (struct cpe_send_msg *)t_info->pending;
  778. switch (t_info->substate) {
  779. case CPE_SS_MSG_REQUEST_ACCESS:
  780. cpe_send_cmd_to_thread(t_info,
  781. CPE_CMD_SEND_TRANS_MSG, m, true);
  782. break;
  783. case CPE_SS_MSG_SEND_INBOX:
  784. if (cpe_is_cmd_clk_req(t_info->tgt->outbox))
  785. cpe_process_clk_change_req(t_info);
  786. else
  787. cpe_send_cmd_to_thread(t_info,
  788. CPE_CMD_SEND_MSG_COMPLETE, m, true);
  789. break;
  790. default:
  791. pr_debug("%s: unhandled substate %d for state %d\n",
  792. __func__, t_info->state, t_info->substate);
  793. break;
  794. }
  795. break;
  796. case CPE_STATE_IDLE:
  797. pr_debug("%s: Message received, notifying client\n",
  798. __func__);
  799. temp_node.command = CPE_CMD_PROC_INCOMING_MSG;
  800. temp_node.data = NULL;
  801. t_info->cpe_process_command(&temp_node);
  802. break;
  803. default:
  804. pr_debug("%s: unhandled state %d\n",
  805. __func__, t_info->state);
  806. break;
  807. }
  808. CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
  809. }
  810. static void broacast_boot_failed(void)
  811. {
  812. struct cpe_info *t_info = cpe_d.cpe_default_handle;
  813. struct cpe_svc_notification payload;
  814. payload.event = CPE_SVC_BOOT_FAILED;
  815. payload.result = CPE_SVC_FAILED;
  816. payload.payload = NULL;
  817. if (t_info)
  818. payload.private_data =
  819. t_info->client_context;
  820. cpe_broadcast_notification(t_info, &payload);
  821. }
  822. static enum cpe_svc_result broadcast_boot_event(
  823. struct cpe_info *t_info)
  824. {
  825. struct cpe_svc_notification payload;
  826. payload.event = CPE_SVC_ONLINE;
  827. payload.result = CPE_SVC_SUCCESS;
  828. payload.payload = NULL;
  829. if (t_info)
  830. payload.private_data =
  831. t_info->client_context;
  832. cpe_broadcast_notification(t_info, &payload);
  833. return CPE_SVC_SUCCESS;
  834. }
  835. static enum cpe_process_result cpe_boot_initialize(struct cpe_info *t_info,
  836. enum cpe_svc_result *cpe_rc)
  837. {
  838. enum cpe_process_result rc = CPE_SVC_FAILED;
  839. struct cpe_svc_notification payload;
  840. struct cmi_core_svc_event_system_boot *p = NULL;
  841. if (CMI_GET_OPCODE(t_info->tgt->outbox) !=
  842. CPE_CORE_SVC_EVENT_SYSTEM_BOOT) {
  843. broacast_boot_failed();
  844. return rc;
  845. }
  846. p = (struct cmi_core_svc_event_system_boot *)
  847. CMI_GET_PAYLOAD(t_info->tgt->outbox);
  848. if (p->status != CPE_BOOT_SUCCESS) {
  849. pr_err("%s: cpe boot failed, status = %d\n",
  850. __func__, p->status);
  851. broacast_boot_failed();
  852. return rc;
  853. }
  854. /* boot was successful */
  855. if (p->version ==
  856. CPE_CORE_VERSION_SYSTEM_BOOT_EVENT) {
  857. cpe_d.cpe_debug_vector.debug_address =
  858. p->sfr_buff_address;
  859. cpe_d.cpe_debug_vector.debug_buffer_size =
  860. p->sfr_buff_size;
  861. cpe_d.cpe_debug_vector.status = p->status;
  862. payload.event = CPE_SVC_BOOT;
  863. payload.result = CPE_SVC_SUCCESS;
  864. payload.payload = (void *)&cpe_d.cpe_debug_vector;
  865. payload.private_data = t_info->client_context;
  866. cpe_broadcast_notification(t_info, &payload);
  867. }
  868. cpe_change_state(t_info, CPE_STATE_BOOTING,
  869. CPE_SS_BOOT_INIT);
  870. (*cpe_rc) = cpe_send_msg_to_inbox(t_info,
  871. CPE_CORE_SVC_CMD_SHARED_MEM_ALLOC, NULL);
  872. rc = CPE_PROC_SUCCESS;
  873. return rc;
  874. }
  875. static void cpe_svc_core_cmi_handler(
  876. const struct cmi_api_notification *parameter)
  877. {
  878. struct cmi_hdr *hdr;
  879. if (!parameter)
  880. return;
  881. pr_debug("%s: event = %d\n",
  882. __func__, parameter->event);
  883. if (parameter->event != CMI_API_MSG)
  884. return;
  885. hdr = (struct cmi_hdr *) parameter->message;
  886. if (hdr->opcode == CPE_CMI_BASIC_RSP_OPCODE) {
  887. struct cmi_basic_rsp_result *result;
  888. result = (struct cmi_basic_rsp_result *)
  889. ((u8 *)parameter->message) + (sizeof(*hdr));
  890. if (result->status)
  891. pr_err("%s: error response, error code = %u\n",
  892. __func__, result->status);
  893. complete(&cpe_d.cpe_default_handle->core_svc_cmd_compl);
  894. }
  895. }
  896. static void cpe_clk_plan_work(struct work_struct *work)
  897. {
  898. struct cpe_info *t_info = NULL;
  899. size_t size = 0;
  900. struct cpe_svc_cfg_clk_plan plan;
  901. u8 *cmi_msg;
  902. struct cmi_hdr *hdr;
  903. int rc;
  904. t_info = container_of(work, struct cpe_info, clk_plan_work);
  905. if (!t_info) {
  906. pr_err("%s: Invalid handle for cpe_info\n",
  907. __func__);
  908. return;
  909. }
  910. /* Register the core service */
  911. cpe_d.cpe_cmi_handle = cmi_register(
  912. cpe_svc_core_cmi_handler,
  913. CMI_CPE_CORE_SERVICE_ID);
  914. /* send the clk plan command */
  915. if (!cpe_d.cpe_query_freq_plans_cb) {
  916. pr_err("%s: No support for querying clk plans\n",
  917. __func__);
  918. return;
  919. }
  920. cpe_d.cpe_query_freq_plans_cb(cpe_d.cdc_priv, &plan);
  921. size = sizeof(plan.current_clk_feq) +
  922. sizeof(plan.num_clk_freqs);
  923. size += plan.num_clk_freqs *
  924. sizeof(plan.clk_freqs[0]);
  925. cmi_msg = kzalloc(size + sizeof(struct cmi_hdr),
  926. GFP_KERNEL);
  927. if (!cmi_msg)
  928. return;
  929. hdr = (struct cmi_hdr *) cmi_msg;
  930. CMI_HDR_SET_OPCODE(hdr,
  931. CPE_CORE_SVC_CMD_CFG_CLK_PLAN);
  932. CMI_HDR_SET_SERVICE(hdr, CMI_CPE_CORE_SERVICE_ID);
  933. CMI_HDR_SET_SESSION(hdr, 1);
  934. CMI_HDR_SET_VERSION(hdr, CMI_DRIVER_SUPPORTED_VERSION);
  935. CMI_HDR_SET_PAYLOAD_SIZE(hdr, size);
  936. memcpy(CMI_GET_PAYLOAD(cmi_msg), &plan,
  937. size);
  938. cmi_send_msg(cmi_msg);
  939. /* Wait for clk plan command to complete */
  940. rc = wait_for_completion_timeout(&t_info->core_svc_cmd_compl,
  941. (10 * HZ));
  942. if (!rc) {
  943. pr_err("%s: clk plan cmd timed out\n",
  944. __func__);
  945. goto cmd_fail;
  946. }
  947. /* clk plan cmd is successful, send start notification */
  948. if (t_info->cpe_start_notification)
  949. t_info->cpe_start_notification(t_info);
  950. else
  951. pr_err("%s: no start notification\n",
  952. __func__);
  953. cmd_fail:
  954. kfree(cmi_msg);
  955. cmi_deregister(cpe_d.cpe_cmi_handle);
  956. }
  957. static enum cpe_process_result cpe_boot_complete(
  958. struct cpe_info *t_info)
  959. {
  960. struct cmi_core_svc_cmdrsp_shared_mem_alloc *p = NULL;
  961. if (CMI_GET_OPCODE(t_info->tgt->outbox) !=
  962. CPE_CORE_SVC_CMDRSP_SHARED_MEM_ALLOC) {
  963. broacast_boot_failed();
  964. return CPE_PROC_FAILED;
  965. }
  966. p = (struct cmi_core_svc_cmdrsp_shared_mem_alloc *)
  967. CMI_GET_PAYLOAD(t_info->tgt->outbox);
  968. cpe_d.cpe_msg_buffer = p->addr;
  969. if (cpe_d.cpe_msg_buffer == 0) {
  970. pr_err("%s: Invalid cpe buffer for message\n",
  971. __func__);
  972. broacast_boot_failed();
  973. return CPE_PROC_FAILED;
  974. }
  975. cpe_change_state(t_info, CPE_STATE_IDLE, CPE_SS_IDLE);
  976. cpe_create_worker_thread(t_info);
  977. if (t_info->codec_id != CPE_SVC_CODEC_TOMTOM) {
  978. schedule_work(&t_info->clk_plan_work);
  979. } else {
  980. if (t_info->cpe_start_notification)
  981. t_info->cpe_start_notification(t_info);
  982. else
  983. pr_err("%s: no start notification\n",
  984. __func__);
  985. }
  986. pr_debug("%s: boot complete\n", __func__);
  987. return CPE_PROC_SUCCESS;
  988. }
  989. static enum cpe_process_result cpe_process_send_msg(
  990. struct cpe_info *t_info,
  991. enum cpe_svc_result *cpe_rc,
  992. struct cpe_command_node *command_node)
  993. {
  994. enum cpe_process_result rc = CPE_PROC_SUCCESS;
  995. struct cpe_send_msg *m =
  996. (struct cpe_send_msg *)command_node->data;
  997. u32 size = m->size;
  998. if (t_info->pending) {
  999. pr_debug("%s: message queued\n", __func__);
  1000. *cpe_rc = CPE_SVC_SUCCESS;
  1001. return CPE_PROC_QUEUED;
  1002. }
  1003. pr_debug("%s: Send CMI message, size = %u\n",
  1004. __func__, size);
  1005. if (size <= t_info->tgt->tgt_get_cpe_info()->inbox_size) {
  1006. pr_debug("%s: Msg fits mailbox, size %u\n",
  1007. __func__, size);
  1008. cpe_change_state(t_info, CPE_STATE_SENDING_MSG,
  1009. CPE_SS_MSG_SEND_INBOX);
  1010. t_info->pending = m;
  1011. *cpe_rc = cpe_send_msg_to_inbox(t_info, 0, m);
  1012. } else if (size < CPE_MSG_BUFFER_SIZE) {
  1013. m->address = cpe_d.cpe_msg_buffer;
  1014. pr_debug("%s: Message req CMI mem access\n",
  1015. __func__);
  1016. t_info->pending = m;
  1017. cpe_change_state(t_info, CPE_STATE_SENDING_MSG,
  1018. CPE_SS_MSG_REQUEST_ACCESS);
  1019. *cpe_rc = cpe_send_msg_to_inbox(t_info,
  1020. CPE_CORE_SVC_CMD_DRAM_ACCESS_REQ, m);
  1021. } else {
  1022. pr_debug("%s: Invalid msg size %u\n",
  1023. __func__, size);
  1024. cpe_command_cleanup(command_node);
  1025. rc = CPE_PROC_FAILED;
  1026. cpe_change_state(t_info, CPE_STATE_IDLE,
  1027. CPE_SS_IDLE);
  1028. }
  1029. return rc;
  1030. }
  1031. static enum cpe_process_result cpe_process_incoming(
  1032. struct cpe_info *t_info)
  1033. {
  1034. enum cpe_process_result rc = CPE_PROC_FAILED;
  1035. struct cmi_hdr *hdr;
  1036. hdr = CMI_GET_HEADER(t_info->tgt->outbox);
  1037. if (CMI_HDR_GET_SERVICE(hdr) ==
  1038. CMI_CPE_CORE_SERVICE_ID) {
  1039. pr_debug("%s: core service message received\n",
  1040. __func__);
  1041. switch (CMI_GET_OPCODE(t_info->tgt->outbox)) {
  1042. case CPE_CORE_SVC_CMD_CLK_FREQ_REQUEST:
  1043. cpe_process_clk_change_req(t_info);
  1044. rc = CPE_PROC_SUCCESS;
  1045. break;
  1046. case CMI_MSG_TRANSPORT:
  1047. pr_debug("%s: transport msg received\n",
  1048. __func__);
  1049. rc = CPE_PROC_SUCCESS;
  1050. break;
  1051. case CPE_CMI_BASIC_RSP_OPCODE:
  1052. pr_debug("%s: received basic rsp\n",
  1053. __func__);
  1054. rc = CPE_PROC_SUCCESS;
  1055. break;
  1056. default:
  1057. pr_debug("%s: unknown message received\n",
  1058. __func__);
  1059. break;
  1060. }
  1061. } else {
  1062. /* if service id if for a CMI client, notify client */
  1063. pr_debug("%s: Message received, notifying client\n",
  1064. __func__);
  1065. cpe_notify_cmi_client(t_info,
  1066. t_info->tgt->outbox, CPE_SVC_SUCCESS);
  1067. rc = CPE_PROC_SUCCESS;
  1068. }
  1069. return rc;
  1070. }
  1071. static enum cpe_process_result cpe_process_kill_thread(
  1072. struct cpe_info *t_info,
  1073. struct cpe_command_node *command_node)
  1074. {
  1075. struct cpe_svc_notification payload;
  1076. cpe_d.cpe_msg_buffer = 0;
  1077. payload.result = CPE_SVC_SHUTTING_DOWN;
  1078. payload.event = CPE_SVC_OFFLINE;
  1079. payload.payload = NULL;
  1080. payload.private_data = t_info->client_context;
  1081. /*
  1082. * Make state as offline before broadcasting
  1083. * the message to clients.
  1084. */
  1085. cpe_change_state(t_info, CPE_STATE_OFFLINE,
  1086. CPE_SS_IDLE);
  1087. cpe_broadcast_notification(t_info, &payload);
  1088. return CPE_PROC_KILLED;
  1089. }
  1090. static enum cpe_process_result cpe_mt_process_cmd(
  1091. struct cpe_command_node *command_node)
  1092. {
  1093. struct cpe_info *t_info = cpe_d.cpe_default_handle;
  1094. enum cpe_svc_result cpe_rc = CPE_SVC_SUCCESS;
  1095. enum cpe_process_result rc = CPE_PROC_SUCCESS;
  1096. struct cpe_send_msg *m;
  1097. struct cmi_hdr *hdr;
  1098. u8 service = 0;
  1099. u8 retries = 0;
  1100. if (!t_info || !command_node) {
  1101. pr_err("%s: Invalid handle/command node\n",
  1102. __func__);
  1103. return CPE_PROC_FAILED;
  1104. }
  1105. pr_debug("%s: cmd = %u\n", __func__, command_node->command);
  1106. cpe_rc = cpe_is_command_valid(t_info, command_node->command);
  1107. if (cpe_rc != CPE_SVC_SUCCESS) {
  1108. pr_err("%s: Invalid command %d, err = %d\n",
  1109. __func__, command_node->command, cpe_rc);
  1110. return CPE_PROC_FAILED;
  1111. }
  1112. switch (command_node->command) {
  1113. case CPE_CMD_BOOT_INITIALIZE:
  1114. rc = cpe_boot_initialize(t_info, &cpe_rc);
  1115. break;
  1116. case CPE_CMD_BOOT_COMPLETE:
  1117. rc = cpe_boot_complete(t_info);
  1118. break;
  1119. case CPE_CMD_SEND_MSG:
  1120. rc = cpe_process_send_msg(t_info, &cpe_rc,
  1121. command_node);
  1122. break;
  1123. case CPE_CMD_SEND_TRANS_MSG:
  1124. m = (struct cpe_send_msg *)command_node->data;
  1125. while (retries < CPE_SVC_INACTIVE_STATE_RETRIES_MAX) {
  1126. if (t_info->tgt->tgt_is_active()) {
  1127. ++retries;
  1128. /* Wait for CPE to be inactive */
  1129. usleep_range(5000, 5100);
  1130. } else {
  1131. break;
  1132. }
  1133. }
  1134. pr_debug("%s: cpe inactive after %d attempts\n",
  1135. __func__, retries);
  1136. cpe_change_state(t_info, CPE_STATE_SENDING_MSG,
  1137. CPE_SS_MSG_SEND_INBOX);
  1138. rc = cpe_send_msg_to_inbox(t_info, 0, m);
  1139. break;
  1140. case CPE_CMD_SEND_MSG_COMPLETE:
  1141. hdr = CMI_GET_HEADER(t_info->tgt->outbox);
  1142. service = CMI_HDR_GET_SERVICE(hdr);
  1143. pr_debug("%s: msg send success, notifying clients\n",
  1144. __func__);
  1145. cpe_command_cleanup(command_node);
  1146. t_info->pending = NULL;
  1147. cpe_change_state(t_info,
  1148. CPE_STATE_IDLE, CPE_SS_IDLE);
  1149. cpe_notify_cmi_client(t_info,
  1150. t_info->tgt->outbox, CPE_SVC_SUCCESS);
  1151. break;
  1152. case CPE_CMD_PROC_INCOMING_MSG:
  1153. rc = cpe_process_incoming(t_info);
  1154. break;
  1155. case CPE_CMD_KILL_THREAD:
  1156. rc = cpe_process_kill_thread(t_info, command_node);
  1157. break;
  1158. default:
  1159. pr_err("%s: unhandled cpe cmd = %d\n",
  1160. __func__, command_node->command);
  1161. break;
  1162. }
  1163. if (cpe_rc != CPE_SVC_SUCCESS) {
  1164. pr_err("%s: failed to execute command\n", __func__);
  1165. if (t_info->pending) {
  1166. m = (struct cpe_send_msg *)t_info->pending;
  1167. cpe_notify_cmi_client(t_info, m->payload,
  1168. CPE_SVC_FAILED);
  1169. t_info->pending = NULL;
  1170. }
  1171. cpe_command_cleanup(command_node);
  1172. rc = CPE_PROC_FAILED;
  1173. cpe_change_state(t_info, CPE_STATE_IDLE,
  1174. CPE_SS_IDLE);
  1175. }
  1176. return rc;
  1177. }
  1178. static enum cpe_svc_result cpe_mt_validate_cmd(
  1179. const struct cpe_info *t_info,
  1180. enum cpe_command command)
  1181. {
  1182. enum cpe_svc_result rc = CPE_SVC_SUCCESS;
  1183. if ((t_info == NULL) || t_info->initialized == false) {
  1184. pr_err("%s: cpe service is not ready\n",
  1185. __func__);
  1186. return CPE_SVC_NOT_READY;
  1187. }
  1188. switch (t_info->state) {
  1189. case CPE_STATE_UNINITIALIZED:
  1190. case CPE_STATE_INITIALIZED:
  1191. switch (command) {
  1192. case CPE_CMD_RESET:
  1193. case CPE_CMD_DL_SEGMENT:
  1194. case CPE_CMD_RAMDUMP:
  1195. case CPE_CMD_PROCESS_IRQ:
  1196. case CPE_CMD_KILL_THREAD:
  1197. case CPE_CMD_DEINITIALIZE:
  1198. case CPE_CMD_FTM_TEST:
  1199. rc = CPE_SVC_SUCCESS;
  1200. break;
  1201. default:
  1202. rc = CPE_SVC_NOT_READY;
  1203. break;
  1204. }
  1205. break;
  1206. case CPE_STATE_DOWNLOADING:
  1207. switch (command) {
  1208. case CPE_CMD_RESET:
  1209. case CPE_CMD_DL_SEGMENT:
  1210. case CPE_CMD_BOOT:
  1211. case CPE_CMD_FTM_TEST:
  1212. rc = CPE_SVC_SUCCESS;
  1213. break;
  1214. default:
  1215. rc = CPE_SVC_NOT_READY;
  1216. break;
  1217. }
  1218. break;
  1219. case CPE_STATE_BOOTING:
  1220. switch (command) {
  1221. case CPE_CMD_PROCESS_IRQ:
  1222. case CPE_CMD_BOOT_INITIALIZE:
  1223. case CPE_CMD_BOOT_COMPLETE:
  1224. case CPE_CMD_SHUTDOWN:
  1225. rc = CPE_SVC_SUCCESS;
  1226. break;
  1227. case CPE_CMD_FTM_TEST:
  1228. rc = CPE_SVC_BUSY;
  1229. break;
  1230. default:
  1231. rc = CPE_SVC_NOT_READY;
  1232. break;
  1233. }
  1234. break;
  1235. case CPE_STATE_IDLE:
  1236. switch (command) {
  1237. case CPE_CMD_SEND_MSG:
  1238. case CPE_CMD_SEND_TRANS_MSG:
  1239. case CPE_CMD_SEND_MSG_COMPLETE:
  1240. case CPE_CMD_PROCESS_IRQ:
  1241. case CPE_CMD_RESET:
  1242. case CPE_CMD_SHUTDOWN:
  1243. case CPE_CMD_KILL_THREAD:
  1244. case CPE_CMD_PROC_INCOMING_MSG:
  1245. rc = CPE_SVC_SUCCESS;
  1246. break;
  1247. case CPE_CMD_FTM_TEST:
  1248. rc = CPE_SVC_BUSY;
  1249. break;
  1250. default:
  1251. rc = CPE_SVC_FAILED;
  1252. break;
  1253. }
  1254. break;
  1255. case CPE_STATE_SENDING_MSG:
  1256. switch (command) {
  1257. case CPE_CMD_SEND_MSG:
  1258. case CPE_CMD_SEND_TRANS_MSG:
  1259. case CPE_CMD_SEND_MSG_COMPLETE:
  1260. case CPE_CMD_PROCESS_IRQ:
  1261. case CPE_CMD_SHUTDOWN:
  1262. case CPE_CMD_KILL_THREAD:
  1263. case CPE_CMD_PROC_INCOMING_MSG:
  1264. rc = CPE_SVC_SUCCESS;
  1265. break;
  1266. case CPE_CMD_FTM_TEST:
  1267. rc = CPE_SVC_BUSY;
  1268. break;
  1269. default:
  1270. rc = CPE_SVC_FAILED;
  1271. break;
  1272. }
  1273. break;
  1274. case CPE_STATE_OFFLINE:
  1275. switch (command) {
  1276. case CPE_CMD_RESET:
  1277. case CPE_CMD_RAMDUMP:
  1278. case CPE_CMD_KILL_THREAD:
  1279. rc = CPE_SVC_SUCCESS;
  1280. break;
  1281. default:
  1282. rc = CPE_SVC_NOT_READY;
  1283. break;
  1284. }
  1285. break;
  1286. default:
  1287. pr_debug("%s: unhandled state %d\n",
  1288. __func__, t_info->state);
  1289. break;
  1290. }
  1291. if (rc != CPE_SVC_SUCCESS)
  1292. pr_err("%s: invalid command %d, state = %d\n",
  1293. __func__, command, t_info->state);
  1294. return rc;
  1295. }
  1296. void *cpe_svc_initialize(
  1297. void irq_control_callback(u32 enable),
  1298. const void *codec_info, void *context)
  1299. {
  1300. struct cpe_info *t_info = NULL;
  1301. const struct cpe_svc_hw_cfg *cap = NULL;
  1302. enum cpe_svc_result rc = CPE_SVC_SUCCESS;
  1303. struct cpe_svc_init_param *init_context =
  1304. (struct cpe_svc_init_param *) context;
  1305. void *client_context = NULL;
  1306. if (cpe_d.cpe_default_handle &&
  1307. cpe_d.cpe_default_handle->initialized == true)
  1308. return (void *)cpe_d.cpe_default_handle;
  1309. cpe_d.cpe_query_freq_plans_cb = NULL;
  1310. cpe_d.cpe_change_freq_plan_cb = NULL;
  1311. if (context) {
  1312. client_context = init_context->context;
  1313. switch (init_context->version) {
  1314. case CPE_SVC_INIT_PARAM_V1:
  1315. cpe_d.cpe_query_freq_plans_cb =
  1316. init_context->query_freq_plans_cb;
  1317. cpe_d.cpe_change_freq_plan_cb =
  1318. init_context->change_freq_plan_cb;
  1319. break;
  1320. default:
  1321. break;
  1322. }
  1323. }
  1324. if (!cpe_d.cpe_default_handle) {
  1325. cpe_d.cpe_default_handle = kzalloc(sizeof(struct cpe_info),
  1326. GFP_KERNEL);
  1327. if (!cpe_d.cpe_default_handle)
  1328. goto err_register;
  1329. memset(cpe_d.cpe_default_handle, 0,
  1330. sizeof(struct cpe_info));
  1331. }
  1332. t_info = cpe_d.cpe_default_handle;
  1333. t_info->client_context = client_context;
  1334. INIT_LIST_HEAD(&t_info->client_list);
  1335. cpe_d.cdc_priv = client_context;
  1336. INIT_WORK(&t_info->clk_plan_work, cpe_clk_plan_work);
  1337. init_completion(&t_info->core_svc_cmd_compl);
  1338. t_info->tgt = kzalloc(sizeof(struct cpe_svc_tgt_abstraction),
  1339. GFP_KERNEL);
  1340. if (!t_info->tgt)
  1341. goto err_tgt_alloc;
  1342. t_info->codec_id =
  1343. ((struct cpe_svc_codec_info_v1 *) codec_info)->id;
  1344. rc = cpe_svc_tgt_init((struct cpe_svc_codec_info_v1 *)codec_info,
  1345. t_info->tgt);
  1346. if (rc != CPE_SVC_SUCCESS)
  1347. goto err_tgt_init;
  1348. cap = t_info->tgt->tgt_get_cpe_info();
  1349. memset(t_info->tgt->outbox, 0, cap->outbox_size);
  1350. memset(t_info->tgt->inbox, 0, cap->inbox_size);
  1351. mutex_init(&t_info->msg_lock);
  1352. cpe_d.cpe_irq_control_callback = irq_control_callback;
  1353. t_info->cpe_process_command = cpe_mt_process_cmd;
  1354. t_info->cpe_cmd_validate = cpe_mt_validate_cmd;
  1355. t_info->cpe_start_notification = broadcast_boot_event;
  1356. mutex_init(&cpe_d.cpe_api_mutex);
  1357. mutex_init(&cpe_d.cpe_svc_lock);
  1358. pr_debug("%s: cpe services initialized\n", __func__);
  1359. t_info->state = CPE_STATE_INITIALIZED;
  1360. t_info->initialized = true;
  1361. return t_info;
  1362. err_tgt_init:
  1363. kfree(t_info->tgt);
  1364. err_tgt_alloc:
  1365. kfree(cpe_d.cpe_default_handle);
  1366. cpe_d.cpe_default_handle = NULL;
  1367. err_register:
  1368. return NULL;
  1369. }
  1370. enum cpe_svc_result cpe_svc_deinitialize(void *cpe_handle)
  1371. {
  1372. enum cpe_svc_result rc = CPE_SVC_SUCCESS;
  1373. struct cpe_info *t_info = (struct cpe_info *)cpe_handle;
  1374. if (!t_info)
  1375. t_info = cpe_d.cpe_default_handle;
  1376. rc = cpe_is_command_valid(t_info, CPE_CMD_DEINITIALIZE);
  1377. if (rc != CPE_SVC_SUCCESS) {
  1378. pr_err("%s: Invalid command %d\n",
  1379. __func__, CPE_CMD_DEINITIALIZE);
  1380. return rc;
  1381. }
  1382. if (cpe_d.cpe_default_handle == t_info)
  1383. cpe_d.cpe_default_handle = NULL;
  1384. t_info->tgt->tgt_deinit(t_info->tgt);
  1385. cpe_change_state(t_info, CPE_STATE_UNINITIALIZED,
  1386. CPE_SS_IDLE);
  1387. mutex_destroy(&t_info->msg_lock);
  1388. kfree(t_info->tgt);
  1389. kfree(t_info);
  1390. mutex_destroy(&cpe_d.cpe_api_mutex);
  1391. mutex_destroy(&cpe_d.cpe_svc_lock);
  1392. return rc;
  1393. }
  1394. void *cpe_svc_register(void *cpe_handle,
  1395. void (*notification_callback)
  1396. (const struct cpe_svc_notification *parameter),
  1397. u32 mask, const char *name)
  1398. {
  1399. void *reg_handle;
  1400. CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
  1401. if (!cpe_d.cpe_default_handle) {
  1402. cpe_d.cpe_default_handle = kzalloc(sizeof(struct cpe_info),
  1403. GFP_KERNEL);
  1404. if (!cpe_d.cpe_default_handle) {
  1405. CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
  1406. return NULL;
  1407. }
  1408. memset(cpe_d.cpe_default_handle, 0,
  1409. sizeof(struct cpe_info));
  1410. }
  1411. if (!cpe_handle)
  1412. cpe_handle = cpe_d.cpe_default_handle;
  1413. reg_handle = cpe_register_generic((struct cpe_info *)cpe_handle,
  1414. notification_callback,
  1415. NULL,
  1416. mask, CPE_NO_SERVICE, name);
  1417. CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
  1418. return reg_handle;
  1419. }
  1420. enum cpe_svc_result cpe_svc_deregister(void *cpe_handle, void *reg_handle)
  1421. {
  1422. enum cpe_svc_result rc;
  1423. CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
  1424. if (!cpe_handle)
  1425. cpe_handle = cpe_d.cpe_default_handle;
  1426. rc = cpe_deregister_generic((struct cpe_info *)cpe_handle,
  1427. reg_handle);
  1428. CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
  1429. return rc;
  1430. }
  1431. enum cpe_svc_result cpe_svc_download_segment(void *cpe_handle,
  1432. const struct cpe_svc_mem_segment *segment)
  1433. {
  1434. enum cpe_svc_result rc = CPE_SVC_SUCCESS;
  1435. struct cpe_info *t_info = (struct cpe_info *)cpe_handle;
  1436. CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
  1437. if (!t_info)
  1438. t_info = cpe_d.cpe_default_handle;
  1439. rc = cpe_is_command_valid(t_info, CPE_CMD_DL_SEGMENT);
  1440. if (rc != CPE_SVC_SUCCESS) {
  1441. pr_err("%s: cmd validation fail, cmd = %d\n",
  1442. __func__, CPE_CMD_DL_SEGMENT);
  1443. CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
  1444. return rc;
  1445. }
  1446. cpe_toggle_irq_notification(t_info, false);
  1447. t_info->state = CPE_STATE_DOWNLOADING;
  1448. t_info->substate = CPE_SS_DL_DOWNLOADING;
  1449. rc = t_info->tgt->tgt_write_ram(t_info, segment);
  1450. cpe_toggle_irq_notification(t_info, true);
  1451. CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
  1452. return rc;
  1453. }
  1454. enum cpe_svc_result cpe_svc_boot(void *cpe_handle, int debug_mode)
  1455. {
  1456. enum cpe_svc_result rc = CPE_SVC_SUCCESS;
  1457. struct cpe_info *t_info = (struct cpe_info *)cpe_handle;
  1458. CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
  1459. if (!t_info)
  1460. t_info = cpe_d.cpe_default_handle;
  1461. rc = cpe_is_command_valid(t_info, CPE_CMD_BOOT);
  1462. if (rc != CPE_SVC_SUCCESS) {
  1463. pr_err("%s: cmd validation fail, cmd = %d\n",
  1464. __func__, CPE_CMD_BOOT);
  1465. CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
  1466. return rc;
  1467. }
  1468. if (rc == CPE_SVC_SUCCESS) {
  1469. t_info->tgt->tgt_boot(debug_mode);
  1470. t_info->state = CPE_STATE_BOOTING;
  1471. t_info->substate = CPE_SS_BOOT;
  1472. pr_debug("%s: cpe service booting\n",
  1473. __func__);
  1474. }
  1475. CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
  1476. return rc;
  1477. }
  1478. enum cpe_svc_result cpe_svc_process_irq(void *cpe_handle, u32 cpe_irq)
  1479. {
  1480. enum cpe_svc_result rc = CPE_SVC_SUCCESS;
  1481. struct cpe_info *t_info = (struct cpe_info *)cpe_handle;
  1482. if (!t_info)
  1483. t_info = cpe_d.cpe_default_handle;
  1484. cpe_toggle_irq_notification(t_info, false);
  1485. cpe_process_irq_int(cpe_irq, t_info);
  1486. cpe_toggle_irq_notification(t_info, true);
  1487. return rc;
  1488. }
  1489. enum cpe_svc_result cpe_svc_route_notification(void *cpe_handle,
  1490. enum cpe_svc_module module, enum cpe_svc_route_dest dest)
  1491. {
  1492. struct cpe_info *t_info = (struct cpe_info *)cpe_handle;
  1493. enum cpe_svc_result rc = CPE_SVC_NOT_READY;
  1494. CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
  1495. if (!t_info)
  1496. t_info = cpe_d.cpe_default_handle;
  1497. if (t_info->tgt)
  1498. rc = t_info->tgt->tgt_route_notification(module, dest);
  1499. CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
  1500. return rc;
  1501. }
  1502. static enum cpe_svc_result __cpe_svc_shutdown(void *cpe_handle)
  1503. {
  1504. enum cpe_svc_result rc = CPE_SVC_SUCCESS;
  1505. struct cpe_info *t_info = (struct cpe_info *)cpe_handle;
  1506. struct cpe_command_node *n = NULL;
  1507. struct cpe_command_node kill_cmd;
  1508. if (!t_info)
  1509. t_info = cpe_d.cpe_default_handle;
  1510. rc = cpe_is_command_valid(t_info, CPE_CMD_SHUTDOWN);
  1511. if (rc != CPE_SVC_SUCCESS) {
  1512. pr_err("%s: cmd validation fail, cmd = %d\n",
  1513. __func__, CPE_CMD_SHUTDOWN);
  1514. return rc;
  1515. }
  1516. while (!list_empty(&t_info->main_queue)) {
  1517. n = list_first_entry(&t_info->main_queue,
  1518. struct cpe_command_node, list);
  1519. if (n->command == CPE_CMD_SEND_MSG) {
  1520. cpe_notify_cmi_client(t_info, (u8 *)n->data,
  1521. CPE_SVC_SHUTTING_DOWN);
  1522. }
  1523. /*
  1524. * Since command cannot be processed,
  1525. * delete it from the list and perform cleanup
  1526. */
  1527. list_del(&n->list);
  1528. cpe_command_cleanup(n);
  1529. kfree(n);
  1530. }
  1531. pr_debug("%s: cpe service OFFLINE state\n", __func__);
  1532. t_info->state = CPE_STATE_OFFLINE;
  1533. t_info->substate = CPE_SS_IDLE;
  1534. memset(&kill_cmd, 0, sizeof(kill_cmd));
  1535. kill_cmd.command = CPE_CMD_KILL_THREAD;
  1536. if (t_info->pending) {
  1537. struct cpe_send_msg *m =
  1538. (struct cpe_send_msg *)t_info->pending;
  1539. cpe_notify_cmi_client(t_info, m->payload,
  1540. CPE_SVC_SHUTTING_DOWN);
  1541. kfree(t_info->pending);
  1542. t_info->pending = NULL;
  1543. }
  1544. cpe_cleanup_worker_thread(t_info);
  1545. t_info->cpe_process_command(&kill_cmd);
  1546. return rc;
  1547. }
  1548. enum cpe_svc_result cpe_svc_shutdown(void *cpe_handle)
  1549. {
  1550. enum cpe_svc_result rc = CPE_SVC_SUCCESS;
  1551. CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
  1552. rc = __cpe_svc_shutdown(cpe_handle);
  1553. CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
  1554. return rc;
  1555. }
  1556. enum cpe_svc_result cpe_svc_reset(void *cpe_handle)
  1557. {
  1558. enum cpe_svc_result rc = CPE_SVC_SUCCESS;
  1559. struct cpe_info *t_info = (struct cpe_info *)cpe_handle;
  1560. CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
  1561. if (!t_info)
  1562. t_info = cpe_d.cpe_default_handle;
  1563. rc = cpe_is_command_valid(t_info, CPE_CMD_RESET);
  1564. if (rc != CPE_SVC_SUCCESS) {
  1565. pr_err("%s: cmd validation fail, cmd = %d\n",
  1566. __func__, CPE_CMD_RESET);
  1567. CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
  1568. return rc;
  1569. }
  1570. if (t_info && t_info->tgt) {
  1571. rc = t_info->tgt->tgt_reset();
  1572. pr_debug("%s: cpe services in INITIALIZED state\n",
  1573. __func__);
  1574. t_info->state = CPE_STATE_INITIALIZED;
  1575. t_info->substate = CPE_SS_IDLE;
  1576. }
  1577. CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
  1578. return rc;
  1579. }
  1580. enum cpe_svc_result cpe_svc_ramdump(void *cpe_handle,
  1581. struct cpe_svc_mem_segment *buffer)
  1582. {
  1583. enum cpe_svc_result rc = CPE_SVC_SUCCESS;
  1584. struct cpe_info *t_info = (struct cpe_info *)cpe_handle;
  1585. CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
  1586. if (!t_info)
  1587. t_info = cpe_d.cpe_default_handle;
  1588. rc = cpe_is_command_valid(t_info, CPE_CMD_RAMDUMP);
  1589. if (rc != CPE_SVC_SUCCESS) {
  1590. pr_err("%s: cmd validation fail, cmd = %d\n",
  1591. __func__, CPE_CMD_RAMDUMP);
  1592. CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
  1593. return rc;
  1594. }
  1595. if (t_info->tgt) {
  1596. rc = t_info->tgt->tgt_read_ram(t_info, buffer);
  1597. } else {
  1598. pr_err("%s: cpe service not ready\n", __func__);
  1599. rc = CPE_SVC_NOT_READY;
  1600. }
  1601. CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
  1602. return rc;
  1603. }
  1604. enum cpe_svc_result cpe_svc_set_debug_mode(void *cpe_handle, u32 mode)
  1605. {
  1606. struct cpe_info *t_info = (struct cpe_info *)cpe_handle;
  1607. enum cpe_svc_result rc = CPE_SVC_INVALID_HANDLE;
  1608. CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
  1609. if (!t_info)
  1610. t_info = cpe_d.cpe_default_handle;
  1611. if (t_info->tgt)
  1612. rc = t_info->tgt->tgt_set_debug_mode(mode);
  1613. CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
  1614. return rc;
  1615. }
  1616. const struct cpe_svc_hw_cfg *cpe_svc_get_hw_cfg(void *cpe_handle)
  1617. {
  1618. struct cpe_info *t_info = (struct cpe_info *)cpe_handle;
  1619. if (!t_info)
  1620. t_info = cpe_d.cpe_default_handle;
  1621. if (t_info->tgt)
  1622. return t_info->tgt->tgt_get_cpe_info();
  1623. return NULL;
  1624. }
  1625. void *cmi_register(
  1626. void notification_callback(
  1627. const struct cmi_api_notification *parameter),
  1628. u32 service)
  1629. {
  1630. void *reg_handle = NULL;
  1631. CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
  1632. reg_handle = cpe_register_generic(cpe_d.cpe_default_handle,
  1633. NULL,
  1634. notification_callback,
  1635. (CPE_SVC_CMI_MSG | CPE_SVC_OFFLINE |
  1636. CPE_SVC_ONLINE),
  1637. service,
  1638. "CMI_CLIENT");
  1639. CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
  1640. return reg_handle;
  1641. }
  1642. enum cmi_api_result cmi_deregister(void *reg_handle)
  1643. {
  1644. u32 clients = 0;
  1645. struct cpe_notif_node *n = NULL;
  1646. enum cmi_api_result rc = CMI_API_SUCCESS;
  1647. struct cpe_svc_notification payload;
  1648. CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
  1649. rc = (enum cmi_api_result) cpe_deregister_generic(
  1650. cpe_d.cpe_default_handle, reg_handle);
  1651. CPE_SVC_GRAB_LOCK(&cpe_d.cpe_svc_lock, "cpe_svc");
  1652. list_for_each_entry(n, &cpe_d.cpe_default_handle->client_list, list) {
  1653. if (n->mask & CPE_SVC_CMI_MSG)
  1654. clients++;
  1655. }
  1656. CPE_SVC_REL_LOCK(&cpe_d.cpe_svc_lock, "cpe_svc");
  1657. if (clients == 0) {
  1658. payload.event = CPE_SVC_CMI_CLIENTS_DEREG;
  1659. payload.payload = NULL;
  1660. payload.result = CPE_SVC_SUCCESS;
  1661. cpe_broadcast_notification(cpe_d.cpe_default_handle, &payload);
  1662. }
  1663. CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
  1664. return rc;
  1665. }
  1666. enum cmi_api_result cmi_send_msg(void *message)
  1667. {
  1668. enum cmi_api_result rc = CMI_API_SUCCESS;
  1669. struct cpe_send_msg *msg = NULL;
  1670. struct cmi_hdr *hdr;
  1671. CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
  1672. hdr = CMI_GET_HEADER(message);
  1673. msg = kzalloc(sizeof(struct cpe_send_msg),
  1674. GFP_ATOMIC);
  1675. if (!msg) {
  1676. CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
  1677. return CPE_SVC_NO_MEMORY;
  1678. }
  1679. if (CMI_HDR_GET_OBM_FLAG(hdr) == CMI_OBM_FLAG_OUT_BAND)
  1680. msg->isobm = 1;
  1681. else
  1682. msg->isobm = 0;
  1683. msg->size = sizeof(struct cmi_hdr) +
  1684. CMI_HDR_GET_PAYLOAD_SIZE(hdr);
  1685. msg->payload = kzalloc(msg->size, GFP_ATOMIC);
  1686. if (!msg->payload) {
  1687. kfree(msg);
  1688. CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
  1689. return CPE_SVC_NO_MEMORY;
  1690. }
  1691. msg->address = 0;
  1692. memcpy((void *)msg->payload, message, msg->size);
  1693. rc = (enum cmi_api_result) cpe_send_cmd_to_thread(
  1694. cpe_d.cpe_default_handle,
  1695. CPE_CMD_SEND_MSG,
  1696. (void *)msg, false);
  1697. if (rc != 0) {
  1698. pr_err("%s: Failed to queue message\n", __func__);
  1699. kfree(msg->payload);
  1700. kfree(msg);
  1701. }
  1702. CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
  1703. return rc;
  1704. }
  1705. enum cpe_svc_result cpe_svc_ftm_test(void *cpe_handle, u32 *status)
  1706. {
  1707. enum cpe_svc_result rc = CPE_SVC_SUCCESS;
  1708. struct cpe_info *t_info = (struct cpe_info *)cpe_handle;
  1709. struct cpe_svc_mem_segment backup_seg;
  1710. struct cpe_svc_mem_segment waiti_seg;
  1711. u8 *backup_data = NULL;
  1712. CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
  1713. if (!t_info)
  1714. t_info = cpe_d.cpe_default_handle;
  1715. rc = cpe_is_command_valid(t_info, CPE_CMD_FTM_TEST);
  1716. if (rc != CPE_SVC_SUCCESS) {
  1717. pr_err("%s: cmd validation fail, cmd = %d\n",
  1718. __func__, CPE_CMD_FTM_TEST);
  1719. goto fail_cmd;
  1720. }
  1721. if (t_info && t_info->tgt) {
  1722. backup_data = kzalloc(
  1723. t_info->tgt->tgt_waiti_info->tgt_waiti_size,
  1724. GFP_KERNEL);
  1725. /* CPE reset */
  1726. rc = t_info->tgt->tgt_reset();
  1727. if (rc != CPE_SVC_SUCCESS) {
  1728. pr_err("%s: CPE reset fail! err = %d\n",
  1729. __func__, rc);
  1730. goto err_return;
  1731. }
  1732. /* Back up the 4 byte IRAM data first */
  1733. backup_seg.type = CPE_SVC_INSTRUCTION_MEM;
  1734. backup_seg.cpe_addr =
  1735. t_info->tgt->tgt_get_cpe_info()->IRAM_offset;
  1736. backup_seg.size = t_info->tgt->tgt_waiti_info->tgt_waiti_size;
  1737. backup_seg.data = backup_data;
  1738. pr_debug("%s: Backing up IRAM data from CPE\n",
  1739. __func__);
  1740. rc = t_info->tgt->tgt_read_ram(t_info, &backup_seg);
  1741. if (rc != CPE_SVC_SUCCESS) {
  1742. pr_err("%s: Fail to backup CPE IRAM data, err = %d\n",
  1743. __func__, rc);
  1744. goto err_return;
  1745. }
  1746. pr_debug("%s: Complete backing up IRAM data from CPE\n",
  1747. __func__);
  1748. /* Write the WAITI instruction data */
  1749. waiti_seg.type = CPE_SVC_INSTRUCTION_MEM;
  1750. waiti_seg.cpe_addr =
  1751. t_info->tgt->tgt_get_cpe_info()->IRAM_offset;
  1752. waiti_seg.size = t_info->tgt->tgt_waiti_info->tgt_waiti_size;
  1753. waiti_seg.data = t_info->tgt->tgt_waiti_info->tgt_waiti_data;
  1754. rc = t_info->tgt->tgt_write_ram(t_info, &waiti_seg);
  1755. if (rc != CPE_SVC_SUCCESS) {
  1756. pr_err("%s: Fail to write the WAITI data, err = %d\n",
  1757. __func__, rc);
  1758. goto restore_iram;
  1759. }
  1760. /* Boot up cpe to execute the WAITI instructions */
  1761. rc = t_info->tgt->tgt_boot(1);
  1762. if (rc != CPE_SVC_SUCCESS) {
  1763. pr_err("%s: Fail to boot CPE, err = %d\n",
  1764. __func__, rc);
  1765. goto reset;
  1766. }
  1767. /*
  1768. * 1ms delay is suggested by the hw team to
  1769. * wait for cpe to boot up.
  1770. */
  1771. usleep_range(1000, 1100);
  1772. /* Check if the cpe init is done after executing the WAITI */
  1773. *status = t_info->tgt->tgt_cpar_init_done();
  1774. reset:
  1775. /* Set the cpe back to reset state */
  1776. rc = t_info->tgt->tgt_reset();
  1777. if (rc != CPE_SVC_SUCCESS) {
  1778. pr_err("%s: CPE reset fail! err = %d\n",
  1779. __func__, rc);
  1780. goto restore_iram;
  1781. }
  1782. restore_iram:
  1783. /* Restore the IRAM 4 bytes data */
  1784. rc = t_info->tgt->tgt_write_ram(t_info, &backup_seg);
  1785. if (rc != CPE_SVC_SUCCESS) {
  1786. pr_err("%s: Fail to restore the IRAM data, err = %d\n",
  1787. __func__, rc);
  1788. goto err_return;
  1789. }
  1790. }
  1791. err_return:
  1792. kfree(backup_data);
  1793. fail_cmd:
  1794. CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
  1795. return rc;
  1796. }
  1797. static enum cpe_svc_result cpe_tgt_tomtom_boot(int debug_mode)
  1798. {
  1799. return CPE_SVC_SUCCESS;
  1800. }
  1801. static u32 cpe_tgt_tomtom_is_cpar_init_done(void)
  1802. {
  1803. return 0;
  1804. }
  1805. static u32 cpe_tgt_tomtom_is_active(void)
  1806. {
  1807. return 0;
  1808. }
  1809. static enum cpe_svc_result cpe_tgt_tomtom_reset(void)
  1810. {
  1811. return CPE_SVC_SUCCESS;
  1812. }
  1813. enum cpe_svc_result cpe_tgt_tomtom_voicetx(bool enable)
  1814. {
  1815. return CPE_SVC_SUCCESS;
  1816. }
  1817. enum cpe_svc_result cpe_svc_toggle_lab(void *cpe_handle, bool enable)
  1818. {
  1819. struct cpe_info *t_info = (struct cpe_info *)cpe_handle;
  1820. if (!t_info)
  1821. t_info = cpe_d.cpe_default_handle;
  1822. if (t_info->tgt)
  1823. return t_info->tgt->tgt_voice_tx_lab(enable);
  1824. else
  1825. return CPE_SVC_INVALID_HANDLE;
  1826. }
  1827. static enum cpe_svc_result cpe_tgt_tomtom_read_mailbox(u8 *buffer,
  1828. size_t size)
  1829. {
  1830. return CPE_SVC_SUCCESS;
  1831. }
  1832. static enum cpe_svc_result cpe_tgt_tomtom_write_mailbox(u8 *buffer,
  1833. size_t size)
  1834. {
  1835. return CPE_SVC_SUCCESS;
  1836. }
  1837. static enum cpe_svc_result cpe_tgt_tomtom_read_RAM(struct cpe_info *t_info,
  1838. struct cpe_svc_mem_segment *mem_seg)
  1839. {
  1840. return CPE_SVC_SUCCESS;
  1841. }
  1842. static enum cpe_svc_result cpe_tgt_tomtom_write_RAM(struct cpe_info *t_info,
  1843. const struct cpe_svc_mem_segment *mem_seg)
  1844. {
  1845. return CPE_SVC_SUCCESS;
  1846. }
  1847. static enum cpe_svc_result cpe_tgt_tomtom_route_notification(
  1848. enum cpe_svc_module module,
  1849. enum cpe_svc_route_dest dest)
  1850. {
  1851. return CPE_SVC_SUCCESS;
  1852. }
  1853. static enum cpe_svc_result cpe_tgt_tomtom_set_debug_mode(u32 enable)
  1854. {
  1855. return CPE_SVC_SUCCESS;
  1856. }
  1857. static const struct cpe_svc_hw_cfg *cpe_tgt_tomtom_get_cpe_info(void)
  1858. {
  1859. return &cpe_svc_tomtom_info;
  1860. }
  1861. static enum cpe_svc_result cpe_tgt_tomtom_deinit(
  1862. struct cpe_svc_tgt_abstraction *param)
  1863. {
  1864. kfree(param->inbox);
  1865. param->inbox = NULL;
  1866. kfree(param->outbox);
  1867. param->outbox = NULL;
  1868. memset(param, 0, sizeof(struct cpe_svc_tgt_abstraction));
  1869. return CPE_SVC_SUCCESS;
  1870. }
  1871. static u8 cpe_tgt_tomtom_waiti_data[] = {0x00, 0x70, 0x00, 0x00};
  1872. static struct cpe_tgt_waiti_info cpe_tgt_tomtom_waiti_info = {
  1873. .tgt_waiti_size = ARRAY_SIZE(cpe_tgt_tomtom_waiti_data),
  1874. .tgt_waiti_data = cpe_tgt_tomtom_waiti_data,
  1875. };
  1876. static enum cpe_svc_result cpe_tgt_tomtom_init(
  1877. struct cpe_svc_codec_info_v1 *codec_info,
  1878. struct cpe_svc_tgt_abstraction *param)
  1879. {
  1880. if (!codec_info)
  1881. return CPE_SVC_INVALID_HANDLE;
  1882. if (!param)
  1883. return CPE_SVC_INVALID_HANDLE;
  1884. if (codec_info->id == CPE_SVC_CODEC_TOMTOM) {
  1885. param->tgt_boot = cpe_tgt_tomtom_boot;
  1886. param->tgt_cpar_init_done = cpe_tgt_tomtom_is_cpar_init_done;
  1887. param->tgt_is_active = cpe_tgt_tomtom_is_active;
  1888. param->tgt_reset = cpe_tgt_tomtom_reset;
  1889. param->tgt_read_mailbox = cpe_tgt_tomtom_read_mailbox;
  1890. param->tgt_write_mailbox = cpe_tgt_tomtom_write_mailbox;
  1891. param->tgt_read_ram = cpe_tgt_tomtom_read_RAM;
  1892. param->tgt_write_ram = cpe_tgt_tomtom_write_RAM;
  1893. param->tgt_route_notification =
  1894. cpe_tgt_tomtom_route_notification;
  1895. param->tgt_set_debug_mode = cpe_tgt_tomtom_set_debug_mode;
  1896. param->tgt_get_cpe_info = cpe_tgt_tomtom_get_cpe_info;
  1897. param->tgt_deinit = cpe_tgt_tomtom_deinit;
  1898. param->tgt_voice_tx_lab = cpe_tgt_tomtom_voicetx;
  1899. param->tgt_waiti_info = &cpe_tgt_tomtom_waiti_info;
  1900. param->inbox = kzalloc(TOMTOM_A_SVASS_SPE_INBOX_SIZE,
  1901. GFP_KERNEL);
  1902. if (!param->inbox)
  1903. return CPE_SVC_NO_MEMORY;
  1904. param->outbox = kzalloc(TOMTOM_A_SVASS_SPE_OUTBOX_SIZE,
  1905. GFP_KERNEL);
  1906. if (!param->outbox) {
  1907. kfree(param->inbox);
  1908. return CPE_SVC_NO_MEMORY;
  1909. }
  1910. }
  1911. return CPE_SVC_SUCCESS;
  1912. }
  1913. static enum cpe_svc_result cpe_tgt_wcd9335_boot(int debug_mode)
  1914. {
  1915. enum cpe_svc_result rc = CPE_SVC_SUCCESS;
  1916. if (!debug_mode)
  1917. rc |= cpe_update_bits(
  1918. WCD9335_CPE_SS_WDOG_CFG,
  1919. 0x3f, 0x31);
  1920. else
  1921. pr_info("%s: CPE in debug mode, WDOG disabled\n",
  1922. __func__);
  1923. rc |= cpe_register_write(WCD9335_CPE_SS_CPARMAD_BUFRDY_INT_PERIOD, 19);
  1924. rc |= cpe_update_bits(WCD9335_CPE_SS_CPAR_CTL, 0x04, 0x00);
  1925. rc |= cpe_update_bits(WCD9335_CPE_SS_CPAR_CTL, 0x02, 0x02);
  1926. rc |= cpe_update_bits(WCD9335_CPE_SS_CPAR_CTL, 0x01, 0x01);
  1927. if (unlikely(rc)) {
  1928. pr_err("%s: Failed to boot, err = %d\n",
  1929. __func__, rc);
  1930. rc = CPE_SVC_FAILED;
  1931. }
  1932. return rc;
  1933. }
  1934. static u32 cpe_tgt_wcd9335_is_cpar_init_done(void)
  1935. {
  1936. u8 temp = 0;
  1937. cpe_register_read(WCD9335_CPE_SS_STATUS, &temp);
  1938. return temp & 0x1;
  1939. }
  1940. static u32 cpe_tgt_wcd9335_is_active(void)
  1941. {
  1942. u8 temp = 0;
  1943. cpe_register_read(WCD9335_CPE_SS_STATUS, &temp);
  1944. return temp & 0x4;
  1945. }
  1946. static enum cpe_svc_result cpe_tgt_wcd9335_reset(void)
  1947. {
  1948. enum cpe_svc_result rc = CPE_SVC_SUCCESS;
  1949. rc |= cpe_update_bits(WCD9335_CPE_SS_CPAR_CFG, 0x01, 0x00);
  1950. rc |= cpe_register_write(
  1951. WCD9335_CODEC_RPM_PWR_CPE_IRAM_SHUTDOWN, 0x00);
  1952. rc |= cpe_register_write(
  1953. WCD9335_CODEC_RPM_PWR_CPE_DRAM1_SHUTDOWN, 0x00);
  1954. rc |= cpe_register_write(
  1955. WCD9335_CODEC_RPM_PWR_CPE_DRAM0_SHUTDOWN_1, 0x00);
  1956. rc |= cpe_register_write(
  1957. WCD9335_CODEC_RPM_PWR_CPE_DRAM0_SHUTDOWN_2, 0x00);
  1958. rc |= cpe_update_bits(WCD9335_CPE_SS_CPAR_CTL, 0x04, 0x04);
  1959. if (unlikely(rc)) {
  1960. pr_err("%s: failed to reset cpe, err = %d\n",
  1961. __func__, rc);
  1962. rc = CPE_SVC_FAILED;
  1963. }
  1964. return rc;
  1965. }
  1966. static enum cpe_svc_result cpe_tgt_wcd9335_read_mailbox(u8 *buffer,
  1967. size_t size)
  1968. {
  1969. enum cpe_svc_result rc = CPE_SVC_SUCCESS;
  1970. u32 cnt = 0;
  1971. pr_debug("%s: size=%zd\n", __func__, size);
  1972. if (size > WCD9335_CPE_SS_SPE_OUTBOX_SIZE)
  1973. size = WCD9335_CPE_SS_SPE_OUTBOX_SIZE;
  1974. for (cnt = 0; (cnt < size) && (rc == CPE_SVC_SUCCESS); cnt++)
  1975. rc = cpe_register_read(WCD9335_CPE_SS_SPE_OUTBOX1(cnt),
  1976. &buffer[cnt]);
  1977. rc = cpe_register_write(WCD9335_CPE_SS_OUTBOX1_ACK, 0x01);
  1978. if (unlikely(rc)) {
  1979. pr_err("%s: failed to ACK outbox, err = %d\n",
  1980. __func__, rc);
  1981. rc = CPE_SVC_FAILED;
  1982. }
  1983. return rc;
  1984. }
  1985. static enum cpe_svc_result cpe_tgt_wcd9335_write_mailbox(u8 *buffer,
  1986. size_t size)
  1987. {
  1988. enum cpe_svc_result rc = CPE_SVC_SUCCESS;
  1989. u32 cnt = 0;
  1990. pr_debug("%s: size = %zd\n", __func__, size);
  1991. if (size > WCD9335_CPE_SS_SPE_INBOX_SIZE)
  1992. size = WCD9335_CPE_SS_SPE_INBOX_SIZE;
  1993. for (cnt = 0; (cnt < size) && (rc == CPE_SVC_SUCCESS); cnt++) {
  1994. rc |= cpe_register_write(WCD9335_CPE_SS_SPE_INBOX1(cnt),
  1995. buffer[cnt]);
  1996. }
  1997. if (unlikely(rc)) {
  1998. pr_err("%s: Error %d writing mailbox registers\n",
  1999. __func__, rc);
  2000. return rc;
  2001. }
  2002. rc = cpe_register_write(WCD9335_CPE_SS_INBOX1_TRG, 1);
  2003. return rc;
  2004. }
  2005. static enum cpe_svc_result cpe_wcd9335_get_mem_addr(struct cpe_info *t_info,
  2006. const struct cpe_svc_mem_segment *mem_seg,
  2007. u32 *addr, u8 *mem)
  2008. {
  2009. enum cpe_svc_result rc = CPE_SVC_SUCCESS;
  2010. u32 offset, mem_sz, address;
  2011. u8 mem_type;
  2012. switch (mem_seg->type) {
  2013. case CPE_SVC_DATA_MEM:
  2014. mem_type = MEM_ACCESS_DRAM_VAL;
  2015. offset = WCD9335_CPE_SS_SPE_DRAM_OFFSET;
  2016. mem_sz = WCD9335_CPE_SS_SPE_DRAM_SIZE;
  2017. break;
  2018. case CPE_SVC_INSTRUCTION_MEM:
  2019. mem_type = MEM_ACCESS_IRAM_VAL;
  2020. offset = WCD9335_CPE_SS_SPE_IRAM_OFFSET;
  2021. mem_sz = WCD9335_CPE_SS_SPE_IRAM_SIZE;
  2022. break;
  2023. default:
  2024. pr_err("%s: Invalid mem type = %u\n",
  2025. __func__, mem_seg->type);
  2026. return CPE_SVC_INVALID_HANDLE;
  2027. }
  2028. if (mem_seg->cpe_addr < offset) {
  2029. pr_err("%s: Invalid addr %x for mem type %u\n",
  2030. __func__, mem_seg->cpe_addr, mem_type);
  2031. return CPE_SVC_INVALID_HANDLE;
  2032. }
  2033. address = mem_seg->cpe_addr - offset;
  2034. if (address + mem_seg->size > mem_sz) {
  2035. pr_err("%s: wrong size %zu, start address %x, mem_type %u\n",
  2036. __func__, mem_seg->size, address, mem_type);
  2037. return CPE_SVC_INVALID_HANDLE;
  2038. }
  2039. (*addr) = address;
  2040. (*mem) = mem_type;
  2041. return rc;
  2042. }
  2043. static enum cpe_svc_result cpe_tgt_wcd9335_read_RAM(struct cpe_info *t_info,
  2044. struct cpe_svc_mem_segment *mem_seg)
  2045. {
  2046. enum cpe_svc_result rc = CPE_SVC_SUCCESS;
  2047. u8 temp = 0;
  2048. u32 cnt = 0;
  2049. u8 mem = 0x0;
  2050. u32 addr = 0;
  2051. u32 lastaddr = 0;
  2052. u32 ptr_update = true;
  2053. bool autoinc;
  2054. if (!mem_seg) {
  2055. pr_err("%s: Invalid buffer\n", __func__);
  2056. return CPE_SVC_INVALID_HANDLE;
  2057. }
  2058. rc = cpe_wcd9335_get_mem_addr(t_info, mem_seg, &addr, &mem);
  2059. if (rc != CPE_SVC_SUCCESS) {
  2060. pr_err("%s: Cannot obtain address, mem_type %u\n",
  2061. __func__, mem_seg->type);
  2062. return rc;
  2063. }
  2064. rc |= cpe_register_write(WCD9335_CPE_SS_MEM_CTRL, 0);
  2065. autoinc = cpe_register_read_autoinc_supported();
  2066. if (autoinc)
  2067. temp = 0x18;
  2068. else
  2069. temp = 0x10;
  2070. temp |= mem;
  2071. lastaddr = ~addr;
  2072. do {
  2073. if (!autoinc || (ptr_update)) {
  2074. /* write LSB only if modified */
  2075. if ((lastaddr & 0xFF) != (addr & 0xFF))
  2076. rc |= cpe_register_write(
  2077. WCD9335_CPE_SS_MEM_PTR_0,
  2078. (addr & 0xFF));
  2079. /* write middle byte only if modified */
  2080. if (((lastaddr >> 8) & 0xFF) != ((addr >> 8) & 0xFF))
  2081. rc |= cpe_register_write(
  2082. WCD9335_CPE_SS_MEM_PTR_1,
  2083. ((addr>>8) & 0xFF));
  2084. /* write MSB only if modified */
  2085. if (((lastaddr >> 16) & 0xFF) != ((addr >> 16) & 0xFF))
  2086. rc |= cpe_register_write(
  2087. WCD9335_CPE_SS_MEM_PTR_2,
  2088. ((addr>>16) & 0xFF));
  2089. rc |= cpe_register_write(WCD9335_CPE_SS_MEM_CTRL, temp);
  2090. lastaddr = addr;
  2091. addr++;
  2092. ptr_update = false;
  2093. }
  2094. rc |= cpe_register_read(WCD9335_CPE_SS_MEM_BANK_0,
  2095. &mem_seg->data[cnt]);
  2096. if (!autoinc)
  2097. rc |= cpe_register_write(WCD9335_CPE_SS_MEM_CTRL, 0);
  2098. } while ((++cnt < mem_seg->size) ||
  2099. (rc != CPE_SVC_SUCCESS));
  2100. rc |= cpe_register_write(WCD9335_CPE_SS_MEM_CTRL, 0);
  2101. if (rc)
  2102. pr_err("%s: Failed to read registers, err = %d\n",
  2103. __func__, rc);
  2104. return rc;
  2105. }
  2106. static enum cpe_svc_result cpe_tgt_wcd9335_write_RAM(struct cpe_info *t_info,
  2107. const struct cpe_svc_mem_segment *mem_seg)
  2108. {
  2109. enum cpe_svc_result rc = CPE_SVC_SUCCESS;
  2110. u8 mem_reg_val = 0;
  2111. u8 mem = MEM_ACCESS_NONE_VAL;
  2112. u32 addr = 0;
  2113. u8 *temp_ptr = NULL;
  2114. u32 temp_size = 0;
  2115. bool autoinc;
  2116. if (!mem_seg) {
  2117. pr_err("%s: Invalid mem segment\n",
  2118. __func__);
  2119. return CPE_SVC_INVALID_HANDLE;
  2120. }
  2121. rc = cpe_wcd9335_get_mem_addr(t_info, mem_seg, &addr, &mem);
  2122. if (rc != CPE_SVC_SUCCESS) {
  2123. pr_err("%s: Cannot obtain address, mem_type %u\n",
  2124. __func__, mem_seg->type);
  2125. return rc;
  2126. }
  2127. autoinc = cpe_register_read_autoinc_supported();
  2128. if (autoinc)
  2129. mem_reg_val = 0x18;
  2130. else
  2131. mem_reg_val = 0x10;
  2132. mem_reg_val |= mem;
  2133. rc = cpe_update_bits(WCD9335_CPE_SS_MEM_CTRL,
  2134. 0x0F, mem_reg_val);
  2135. rc = cpe_register_write(WCD9335_CPE_SS_MEM_PTR_0,
  2136. (addr & 0xFF));
  2137. rc = cpe_register_write(WCD9335_CPE_SS_MEM_PTR_1,
  2138. ((addr >> 8) & 0xFF));
  2139. rc = cpe_register_write(WCD9335_CPE_SS_MEM_PTR_2,
  2140. ((addr >> 16) & 0xFF));
  2141. temp_size = 0;
  2142. temp_ptr = mem_seg->data;
  2143. while (temp_size <= mem_seg->size) {
  2144. u32 to_write = (mem_seg->size >= temp_size+CHUNK_SIZE)
  2145. ? CHUNK_SIZE : (mem_seg->size - temp_size);
  2146. if (t_info->state == CPE_STATE_OFFLINE) {
  2147. pr_err("%s: CPE is offline\n", __func__);
  2148. return CPE_SVC_FAILED;
  2149. }
  2150. cpe_register_write_repeat(WCD9335_CPE_SS_MEM_BANK_0,
  2151. temp_ptr, to_write);
  2152. temp_size += CHUNK_SIZE;
  2153. temp_ptr += CHUNK_SIZE;
  2154. }
  2155. rc = cpe_register_write(WCD9335_CPE_SS_MEM_CTRL, 0);
  2156. if (rc)
  2157. pr_err("%s: Failed to write registers, err = %d\n",
  2158. __func__, rc);
  2159. return rc;
  2160. }
  2161. static enum cpe_svc_result cpe_tgt_wcd9335_route_notification(
  2162. enum cpe_svc_module module,
  2163. enum cpe_svc_route_dest dest)
  2164. {
  2165. enum cpe_svc_result rc = CPE_SVC_SUCCESS;
  2166. pr_debug("%s: Module = %d, Destination = %d\n",
  2167. __func__, module, dest);
  2168. switch (module) {
  2169. case CPE_SVC_LISTEN_PROC:
  2170. switch (dest) {
  2171. case CPE_SVC_EXTERNAL:
  2172. rc = cpe_update_bits(WCD9335_CPE_SS_CFG, 0x01, 0x01);
  2173. break;
  2174. case CPE_SVC_INTERNAL:
  2175. rc = cpe_update_bits(WCD9335_CPE_SS_CFG, 0x01, 0x00);
  2176. break;
  2177. default:
  2178. pr_err("%s: Invalid destination %d\n",
  2179. __func__, dest);
  2180. return CPE_SVC_FAILED;
  2181. }
  2182. break;
  2183. default:
  2184. pr_err("%s: Invalid module %d\n",
  2185. __func__, module);
  2186. rc = CPE_SVC_FAILED;
  2187. break;
  2188. }
  2189. return rc;
  2190. }
  2191. static enum cpe_svc_result cpe_tgt_wcd9335_set_debug_mode(u32 enable)
  2192. {
  2193. enum cpe_svc_result rc = CPE_SVC_SUCCESS;
  2194. pr_debug("%s: enable = %s\n", __func__,
  2195. (enable) ? "true" : "false");
  2196. return rc;
  2197. }
  2198. static const struct cpe_svc_hw_cfg *cpe_tgt_wcd9335_get_cpe_info(void)
  2199. {
  2200. return &cpe_svc_wcd9335_info;
  2201. }
  2202. static enum cpe_svc_result
  2203. cpe_tgt_wcd9335_deinit(struct cpe_svc_tgt_abstraction *param)
  2204. {
  2205. kfree(param->inbox);
  2206. param->inbox = NULL;
  2207. kfree(param->outbox);
  2208. param->outbox = NULL;
  2209. memset(param, 0, sizeof(struct cpe_svc_tgt_abstraction));
  2210. return CPE_SVC_SUCCESS;
  2211. }
  2212. static enum cpe_svc_result
  2213. cpe_tgt_wcd9335_voicetx(bool enable)
  2214. {
  2215. enum cpe_svc_result rc = CPE_SVC_SUCCESS;
  2216. u8 val = 0;
  2217. pr_debug("%s: enable = %u\n", __func__, enable);
  2218. if (enable)
  2219. val = 0x02;
  2220. else
  2221. val = 0x00;
  2222. rc = cpe_update_bits(WCD9335_CPE_SS_CFG, 0x02, val);
  2223. val = 0;
  2224. cpe_register_read(WCD9335_CPE_SS_CFG, &val);
  2225. return rc;
  2226. }
  2227. static u8 cpe_tgt_wcd9335_waiti_data[] = {0x00, 0x70, 0x00, 0x00};
  2228. static struct cpe_tgt_waiti_info cpe_tgt_wcd9335_waiti_info = {
  2229. .tgt_waiti_size = ARRAY_SIZE(cpe_tgt_wcd9335_waiti_data),
  2230. .tgt_waiti_data = cpe_tgt_wcd9335_waiti_data,
  2231. };
  2232. static enum cpe_svc_result cpe_tgt_wcd9335_init(
  2233. struct cpe_svc_codec_info_v1 *codec_info,
  2234. struct cpe_svc_tgt_abstraction *param)
  2235. {
  2236. if (!codec_info)
  2237. return CPE_SVC_INVALID_HANDLE;
  2238. if (!param)
  2239. return CPE_SVC_INVALID_HANDLE;
  2240. if (codec_info->id == CPE_SVC_CODEC_WCD9335) {
  2241. param->tgt_boot = cpe_tgt_wcd9335_boot;
  2242. param->tgt_cpar_init_done = cpe_tgt_wcd9335_is_cpar_init_done;
  2243. param->tgt_is_active = cpe_tgt_wcd9335_is_active;
  2244. param->tgt_reset = cpe_tgt_wcd9335_reset;
  2245. param->tgt_read_mailbox = cpe_tgt_wcd9335_read_mailbox;
  2246. param->tgt_write_mailbox = cpe_tgt_wcd9335_write_mailbox;
  2247. param->tgt_read_ram = cpe_tgt_wcd9335_read_RAM;
  2248. param->tgt_write_ram = cpe_tgt_wcd9335_write_RAM;
  2249. param->tgt_route_notification =
  2250. cpe_tgt_wcd9335_route_notification;
  2251. param->tgt_set_debug_mode = cpe_tgt_wcd9335_set_debug_mode;
  2252. param->tgt_get_cpe_info = cpe_tgt_wcd9335_get_cpe_info;
  2253. param->tgt_deinit = cpe_tgt_wcd9335_deinit;
  2254. param->tgt_voice_tx_lab = cpe_tgt_wcd9335_voicetx;
  2255. param->tgt_waiti_info = &cpe_tgt_wcd9335_waiti_info;
  2256. param->inbox = kzalloc(WCD9335_CPE_SS_SPE_INBOX_SIZE,
  2257. GFP_KERNEL);
  2258. if (!param->inbox)
  2259. return CPE_SVC_NO_MEMORY;
  2260. param->outbox = kzalloc(WCD9335_CPE_SS_SPE_OUTBOX_SIZE,
  2261. GFP_KERNEL);
  2262. if (!param->outbox) {
  2263. kfree(param->inbox);
  2264. return CPE_SVC_NO_MEMORY;
  2265. }
  2266. }
  2267. return CPE_SVC_SUCCESS;
  2268. }
  2269. MODULE_DESCRIPTION("WCD CPE Services");
  2270. MODULE_LICENSE("GPL v2");