rpm-smd.c 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #define pr_fmt(fmt) "%s: " fmt, __func__
  7. #include <linux/completion.h>
  8. #include <linux/device.h>
  9. #include <linux/irq.h>
  10. #include <linux/list.h>
  11. #include <linux/kernel.h>
  12. #include <linux/list.h>
  13. #include <linux/module.h>
  14. #include <linux/mutex.h>
  15. #include <linux/notifier.h>
  16. #include <linux/of.h>
  17. #include <linux/of_address.h>
  18. #include <linux/of_irq.h>
  19. #include <linux/of_platform.h>
  20. #include <linux/platform_device.h>
  21. #include <linux/pm_runtime.h>
  22. #include <linux/pm_domain.h>
  23. #include <linux/rbtree.h>
  24. #include <linux/rpmsg.h>
  25. #include <linux/slab.h>
  26. #include <linux/spinlock.h>
  27. #include <linux/string.h>
  28. #include <linux/suspend.h>
  29. #include <linux/types.h>
  30. #include <soc/qcom/rpm-smd.h>
  31. #include <soc/qcom/mpm.h>
  32. #define CREATE_TRACE_POINTS
  33. #include <trace/events/trace_rpm_smd.h>
  34. #define DEFAULT_BUFFER_SIZE 256
  35. #define DEBUG_PRINT_BUFFER_SIZE 512
  36. #define MAX_SLEEP_BUFFER 128
  37. #define INV_RSC "resource does not exist"
  38. #define ERR "err\0"
  39. #define MAX_ERR_BUFFER_SIZE 128
  40. #define MAX_WAIT_ON_ACK 24
  41. #define INIT_ERROR 1
  42. #define V1_PROTOCOL_VERSION 0x31726576 /* rev1 */
  43. #define V0_PROTOCOL_VERSION 0 /* rev0 */
  44. #define RPM_MSG_TYPE_OFFSET 16
  45. #define RPM_MSG_TYPE_SIZE 8
  46. #define RPM_SET_TYPE_OFFSET 28
  47. #define RPM_SET_TYPE_SIZE 4
  48. #define RPM_REQ_LEN_OFFSET 0
  49. #define RPM_REQ_LEN_SIZE 16
  50. #define RPM_MSG_VERSION_OFFSET 24
  51. #define RPM_MSG_VERSION_SIZE 8
  52. #define RPM_MSG_VERSION 1
  53. #define RPM_MSG_SET_OFFSET 28
  54. #define RPM_MSG_SET_SIZE 4
  55. #define RPM_RSC_ID_OFFSET 16
  56. #define RPM_RSC_ID_SIZE 12
  57. #define RPM_DATA_LEN_OFFSET 0
  58. #define RPM_DATA_LEN_SIZE 16
  59. #define RPM_HDR_SIZE ((rpm_msg_fmt_ver == RPM_MSG_V0_FMT) ?\
  60. sizeof(struct rpm_v0_hdr) : sizeof(struct rpm_v1_hdr))
  61. #define CLEAR_FIELD(offset, size) (~GENMASK(offset + size - 1, offset))
  62. #define for_each_kvp(buf, k) \
  63. for (k = (struct kvp *)get_first_kvp(buf); \
  64. ((void *)k - (void *)get_first_kvp(buf)) < \
  65. get_data_len(buf);\
  66. k = get_next_kvp(k))
  67. #ifdef CONFIG_ARM
  68. #define readq_relaxed(a) ({ \
  69. u64 val = readl_relaxed((a) + 4); \
  70. val <<= 32; \
  71. val |= readl_relaxed((a)); \
  72. val; \
  73. })
  74. #endif
  75. /* Debug Definitions */
  76. enum {
  77. MSM_RPM_LOG_REQUEST_PRETTY = BIT(0),
  78. MSM_RPM_LOG_REQUEST_RAW = BIT(1),
  79. MSM_RPM_LOG_REQUEST_SHOW_MSG_ID = BIT(2),
  80. };
  81. static int msm_rpm_debug_mask;
  82. module_param_named(
  83. debug_mask, msm_rpm_debug_mask, int, 0644
  84. );
  85. static uint32_t rpm_msg_fmt_ver;
  86. struct msm_rpm_driver_data {
  87. const char *ch_name;
  88. uint32_t ch_type;
  89. struct smd_channel *ch_info;
  90. struct work_struct work;
  91. struct completion smd_open;
  92. };
  93. struct qcom_smd_rpm {
  94. struct rpmsg_endpoint *rpm_channel;
  95. struct device *dev;
  96. int irq;
  97. struct completion ack;
  98. struct mutex lock;
  99. int ack_status;
  100. struct notifier_block genpd_nb;
  101. };
  102. struct qcom_smd_rpm *rpm;
  103. struct qcom_smd_rpm priv_rpm;
  104. static bool standalone;
  105. static int probe_status = -EPROBE_DEFER;
  106. static void msm_rpm_process_ack(uint32_t msg_id, int errno);
  107. enum {
  108. MSM_RPM_MSG_REQUEST_TYPE = 0,
  109. MSM_RPM_MSG_TYPE_NR,
  110. };
  111. static const uint32_t msm_rpm_request_service_v1[MSM_RPM_MSG_TYPE_NR] = {
  112. 0x716572, /* 'req\0' */
  113. };
  114. enum {
  115. RPM_V1_REQUEST_SERVICE,
  116. RPM_V1_SYSTEMDB_SERVICE,
  117. RPM_V1_COMMAND_SERVICE,
  118. RPM_V1_ACK_SERVICE,
  119. RPM_V1_NACK_SERVICE,
  120. } msm_rpm_request_service_v2;
  121. struct rpm_v0_hdr {
  122. uint32_t service_type;
  123. uint32_t request_len;
  124. };
  125. struct rpm_v1_hdr {
  126. uint32_t request_hdr;
  127. };
  128. struct rpm_message_header_v0 {
  129. struct rpm_v0_hdr hdr;
  130. uint32_t msg_id;
  131. enum msm_rpm_set set;
  132. uint32_t resource_type;
  133. uint32_t resource_id;
  134. uint32_t data_len;
  135. };
  136. struct rpm_message_header_v1 {
  137. struct rpm_v1_hdr hdr;
  138. uint32_t msg_id;
  139. uint32_t resource_type;
  140. uint32_t request_details;
  141. };
  142. struct msm_rpm_ack_msg_v0 {
  143. uint32_t req;
  144. uint32_t req_len;
  145. uint32_t rsc_id;
  146. uint32_t msg_len;
  147. uint32_t id_ack;
  148. };
  149. struct msm_rpm_ack_msg_v1 {
  150. uint32_t request_hdr;
  151. uint32_t id_ack;
  152. };
  153. struct kvp {
  154. unsigned int k;
  155. unsigned int s;
  156. };
  157. struct msm_rpm_kvp_data {
  158. uint32_t key;
  159. uint32_t nbytes; /* number of bytes */
  160. uint8_t *value;
  161. bool valid;
  162. };
  163. struct slp_buf {
  164. struct rb_node node;
  165. char ubuf[MAX_SLEEP_BUFFER];
  166. char *buf;
  167. bool valid;
  168. };
  169. enum rpm_msg_fmts {
  170. RPM_MSG_V0_FMT,
  171. RPM_MSG_V1_FMT
  172. };
  173. static struct rb_root tr_root = RB_ROOT;
  174. static uint32_t msm_rpm_get_next_msg_id(void);
  175. static inline uint32_t get_offset_value(uint32_t val, uint32_t offset,
  176. uint32_t size)
  177. {
  178. return (((val) & GENMASK(offset + size - 1, offset))
  179. >> offset);
  180. }
  181. static inline void change_offset_value(uint32_t *val, uint32_t offset,
  182. uint32_t size, int32_t val1)
  183. {
  184. uint32_t member = *val;
  185. uint32_t offset_val = get_offset_value(member, offset, size);
  186. uint32_t mask = (1 << size) - 1;
  187. offset_val += val1;
  188. *val &= CLEAR_FIELD(offset, size);
  189. *val |= ((offset_val & mask) << offset);
  190. }
  191. static inline void set_offset_value(uint32_t *val, uint32_t offset,
  192. uint32_t size, uint32_t val1)
  193. {
  194. uint32_t mask = (1 << size) - 1;
  195. *val &= CLEAR_FIELD(offset, size);
  196. *val |= ((val1 & mask) << offset);
  197. }
  198. static uint32_t get_msg_id(char *buf)
  199. {
  200. if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
  201. return ((struct rpm_message_header_v0 *)buf)->msg_id;
  202. return ((struct rpm_message_header_v1 *)buf)->msg_id;
  203. }
  204. static uint32_t get_ack_msg_id(char *buf)
  205. {
  206. if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
  207. return ((struct msm_rpm_ack_msg_v0 *)buf)->id_ack;
  208. return ((struct msm_rpm_ack_msg_v1 *)buf)->id_ack;
  209. }
  210. static uint32_t get_rsc_type(char *buf)
  211. {
  212. if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
  213. return ((struct rpm_message_header_v0 *)buf)->resource_type;
  214. return ((struct rpm_message_header_v1 *)buf)->resource_type;
  215. }
  216. static uint32_t get_set_type(char *buf)
  217. {
  218. if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
  219. return ((struct rpm_message_header_v0 *)buf)->set;
  220. return get_offset_value(((struct rpm_message_header_v1 *)buf)->
  221. request_details, RPM_SET_TYPE_OFFSET,
  222. RPM_SET_TYPE_SIZE);
  223. }
  224. static uint32_t get_data_len(char *buf)
  225. {
  226. if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
  227. return ((struct rpm_message_header_v0 *)buf)->data_len;
  228. return get_offset_value(((struct rpm_message_header_v1 *)buf)->
  229. request_details, RPM_DATA_LEN_OFFSET,
  230. RPM_DATA_LEN_SIZE);
  231. }
  232. static uint32_t get_rsc_id(char *buf)
  233. {
  234. if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
  235. return ((struct rpm_message_header_v0 *)buf)->resource_id;
  236. return get_offset_value(((struct rpm_message_header_v1 *)buf)->
  237. request_details, RPM_RSC_ID_OFFSET,
  238. RPM_RSC_ID_SIZE);
  239. }
  240. static uint32_t get_ack_req_len(char *buf)
  241. {
  242. if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
  243. return ((struct msm_rpm_ack_msg_v0 *)buf)->req_len;
  244. return get_offset_value(((struct msm_rpm_ack_msg_v1 *)buf)->
  245. request_hdr, RPM_REQ_LEN_OFFSET,
  246. RPM_REQ_LEN_SIZE);
  247. }
  248. static uint32_t get_ack_msg_type(char *buf)
  249. {
  250. if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
  251. return ((struct msm_rpm_ack_msg_v0 *)buf)->req;
  252. return get_offset_value(((struct msm_rpm_ack_msg_v1 *)buf)->
  253. request_hdr, RPM_MSG_TYPE_OFFSET,
  254. RPM_MSG_TYPE_SIZE);
  255. }
  256. static uint32_t get_req_len(char *buf)
  257. {
  258. if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
  259. return ((struct rpm_message_header_v0 *)buf)->hdr.request_len;
  260. return get_offset_value(((struct rpm_message_header_v1 *)buf)->
  261. hdr.request_hdr, RPM_REQ_LEN_OFFSET,
  262. RPM_REQ_LEN_SIZE);
  263. }
  264. static void set_msg_ver(char *buf, uint32_t val)
  265. {
  266. if (rpm_msg_fmt_ver) {
  267. set_offset_value(&((struct rpm_message_header_v1 *)buf)->
  268. hdr.request_hdr, RPM_MSG_VERSION_OFFSET,
  269. RPM_MSG_VERSION_SIZE, val);
  270. } else {
  271. set_offset_value(&((struct rpm_message_header_v1 *)buf)->
  272. hdr.request_hdr, RPM_MSG_VERSION_OFFSET,
  273. RPM_MSG_VERSION_SIZE, 0);
  274. }
  275. }
  276. static void set_req_len(char *buf, uint32_t val)
  277. {
  278. if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
  279. ((struct rpm_message_header_v0 *)buf)->hdr.request_len = val;
  280. else {
  281. set_offset_value(&((struct rpm_message_header_v1 *)buf)->
  282. hdr.request_hdr, RPM_REQ_LEN_OFFSET,
  283. RPM_REQ_LEN_SIZE, val);
  284. }
  285. }
  286. static void change_req_len(char *buf, int32_t val)
  287. {
  288. if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
  289. ((struct rpm_message_header_v0 *)buf)->hdr.request_len += val;
  290. else {
  291. change_offset_value(&((struct rpm_message_header_v1 *)buf)->
  292. hdr.request_hdr, RPM_REQ_LEN_OFFSET,
  293. RPM_REQ_LEN_SIZE, val);
  294. }
  295. }
  296. static void set_msg_type(char *buf, uint32_t val)
  297. {
  298. if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
  299. ((struct rpm_message_header_v0 *)buf)->hdr.service_type =
  300. msm_rpm_request_service_v1[val];
  301. else {
  302. set_offset_value(&((struct rpm_message_header_v1 *)buf)->
  303. hdr.request_hdr, RPM_MSG_TYPE_OFFSET,
  304. RPM_MSG_TYPE_SIZE, RPM_V1_REQUEST_SERVICE);
  305. }
  306. }
  307. static void set_rsc_id(char *buf, uint32_t val)
  308. {
  309. if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
  310. ((struct rpm_message_header_v0 *)buf)->resource_id = val;
  311. else
  312. set_offset_value(&((struct rpm_message_header_v1 *)buf)->
  313. request_details, RPM_RSC_ID_OFFSET,
  314. RPM_RSC_ID_SIZE, val);
  315. }
  316. static void set_data_len(char *buf, uint32_t val)
  317. {
  318. if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
  319. ((struct rpm_message_header_v0 *)buf)->data_len = val;
  320. else
  321. set_offset_value(&((struct rpm_message_header_v1 *)buf)->
  322. request_details, RPM_DATA_LEN_OFFSET,
  323. RPM_DATA_LEN_SIZE, val);
  324. }
  325. static void change_data_len(char *buf, int32_t val)
  326. {
  327. if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
  328. ((struct rpm_message_header_v0 *)buf)->data_len += val;
  329. else
  330. change_offset_value(&((struct rpm_message_header_v1 *)buf)->
  331. request_details, RPM_DATA_LEN_OFFSET,
  332. RPM_DATA_LEN_SIZE, val);
  333. }
  334. static void set_set_type(char *buf, uint32_t val)
  335. {
  336. if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
  337. ((struct rpm_message_header_v0 *)buf)->set = val;
  338. else
  339. set_offset_value(&((struct rpm_message_header_v1 *)buf)->
  340. request_details, RPM_SET_TYPE_OFFSET,
  341. RPM_SET_TYPE_SIZE, val);
  342. }
  343. static void set_msg_id(char *buf, uint32_t val)
  344. {
  345. if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
  346. ((struct rpm_message_header_v0 *)buf)->msg_id = val;
  347. else
  348. ((struct rpm_message_header_v1 *)buf)->msg_id = val;
  349. }
  350. static void set_rsc_type(char *buf, uint32_t val)
  351. {
  352. if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
  353. ((struct rpm_message_header_v0 *)buf)->resource_type = val;
  354. else
  355. ((struct rpm_message_header_v1 *)buf)->resource_type = val;
  356. }
  357. static inline int get_buf_len(char *buf)
  358. {
  359. return get_req_len(buf) + RPM_HDR_SIZE;
  360. }
  361. static inline struct kvp *get_first_kvp(char *buf)
  362. {
  363. if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
  364. return (struct kvp *)(buf +
  365. sizeof(struct rpm_message_header_v0));
  366. else
  367. return (struct kvp *)(buf +
  368. sizeof(struct rpm_message_header_v1));
  369. }
  370. static inline struct kvp *get_next_kvp(struct kvp *k)
  371. {
  372. return (struct kvp *)((void *)k + sizeof(*k) + k->s);
  373. }
  374. static inline void *get_data(struct kvp *k)
  375. {
  376. return (void *)k + sizeof(*k);
  377. }
  378. static void delete_kvp(char *buf, struct kvp *d)
  379. {
  380. struct kvp *n;
  381. int dec;
  382. uint32_t size;
  383. n = get_next_kvp(d);
  384. dec = (void *)n - (void *)d;
  385. size = get_data_len(buf) -
  386. ((void *)n - (void *)get_first_kvp(buf));
  387. memcpy((void *)d, (void *)n, size);
  388. change_data_len(buf, -dec);
  389. change_req_len(buf, -dec);
  390. }
  391. static inline void update_kvp_data(struct kvp *dest, struct kvp *src)
  392. {
  393. memcpy(get_data(dest), get_data(src), src->s);
  394. }
  395. static void add_kvp(char *buf, struct kvp *n)
  396. {
  397. int32_t inc = sizeof(*n) + n->s;
  398. if (get_req_len(buf) + inc > MAX_SLEEP_BUFFER) {
  399. WARN_ON(get_req_len(buf) + inc > MAX_SLEEP_BUFFER);
  400. return;
  401. }
  402. memcpy(buf + get_buf_len(buf), n, inc);
  403. change_data_len(buf, inc);
  404. change_req_len(buf, inc);
  405. }
  406. static struct slp_buf *tr_search(struct rb_root *root, char *slp)
  407. {
  408. unsigned int type = get_rsc_type(slp);
  409. unsigned int id = get_rsc_id(slp);
  410. struct rb_node *node = root->rb_node;
  411. while (node) {
  412. struct slp_buf *cur = rb_entry(node, struct slp_buf, node);
  413. unsigned int ctype = get_rsc_type(cur->buf);
  414. unsigned int cid = get_rsc_id(cur->buf);
  415. if (type < ctype)
  416. node = node->rb_left;
  417. else if (type > ctype)
  418. node = node->rb_right;
  419. else if (id < cid)
  420. node = node->rb_left;
  421. else if (id > cid)
  422. node = node->rb_right;
  423. else
  424. return cur;
  425. }
  426. return NULL;
  427. }
  428. static int tr_insert(struct rb_root *root, struct slp_buf *slp)
  429. {
  430. unsigned int type = get_rsc_type(slp->buf);
  431. unsigned int id = get_rsc_id(slp->buf);
  432. struct rb_node **node = &(root->rb_node), *parent = NULL;
  433. while (*node) {
  434. struct slp_buf *curr = rb_entry(*node, struct slp_buf, node);
  435. unsigned int ctype = get_rsc_type(curr->buf);
  436. unsigned int cid = get_rsc_id(curr->buf);
  437. parent = *node;
  438. if (type < ctype)
  439. node = &((*node)->rb_left);
  440. else if (type > ctype)
  441. node = &((*node)->rb_right);
  442. else if (id < cid)
  443. node = &((*node)->rb_left);
  444. else if (id > cid)
  445. node = &((*node)->rb_right);
  446. else
  447. return -EINVAL;
  448. }
  449. rb_link_node(&slp->node, parent, node);
  450. rb_insert_color(&slp->node, root);
  451. slp->valid = true;
  452. return 0;
  453. }
  454. static void tr_update(struct slp_buf *s, char *buf)
  455. {
  456. struct kvp *e, *n;
  457. for_each_kvp(buf, n) {
  458. bool found = false;
  459. for_each_kvp(s->buf, e) {
  460. if (n->k == e->k) {
  461. found = true;
  462. if (n->s == e->s) {
  463. void *e_data = get_data(e);
  464. void *n_data = get_data(n);
  465. if (memcmp(e_data, n_data, n->s)) {
  466. update_kvp_data(e, n);
  467. s->valid = true;
  468. }
  469. } else {
  470. delete_kvp(s->buf, e);
  471. add_kvp(s->buf, n);
  472. s->valid = true;
  473. }
  474. break;
  475. }
  476. }
  477. if (!found) {
  478. add_kvp(s->buf, n);
  479. s->valid = true;
  480. }
  481. }
  482. }
  483. static atomic_t msm_rpm_msg_id = ATOMIC_INIT(0);
  484. struct msm_rpm_request {
  485. uint8_t *client_buf;
  486. struct msm_rpm_kvp_data *kvp;
  487. uint32_t num_elements;
  488. uint32_t write_idx;
  489. uint8_t *buf;
  490. uint32_t numbytes;
  491. };
  492. /*
  493. * Data related to message acknowledgment
  494. */
  495. LIST_HEAD(msm_rpm_wait_list);
  496. struct msm_rpm_wait_data {
  497. struct list_head list;
  498. uint32_t msg_id;
  499. bool ack_recd;
  500. int errno;
  501. struct completion ack;
  502. bool delete_on_ack;
  503. };
  504. DEFINE_SPINLOCK(msm_rpm_list_lock);
  505. LIST_HEAD(msm_rpm_ack_list);
  506. static inline uint32_t msm_rpm_get_msg_id_from_ack(uint8_t *buf)
  507. {
  508. return get_ack_msg_id(buf);
  509. }
  510. static inline int msm_rpm_get_error_from_ack(uint8_t *buf)
  511. {
  512. uint8_t *tmp;
  513. uint32_t req_len = get_ack_req_len(buf);
  514. uint32_t msg_type = get_ack_msg_type(buf);
  515. int rc = -ENODEV;
  516. uint32_t err;
  517. uint32_t ack_msg_size = rpm_msg_fmt_ver ?
  518. sizeof(struct msm_rpm_ack_msg_v1) :
  519. sizeof(struct msm_rpm_ack_msg_v0);
  520. if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT &&
  521. msg_type == RPM_V1_ACK_SERVICE) {
  522. return 0;
  523. } else if (rpm_msg_fmt_ver && msg_type == RPM_V1_NACK_SERVICE) {
  524. err = *(uint32_t *)(buf + sizeof(struct msm_rpm_ack_msg_v1));
  525. return err;
  526. }
  527. req_len -= ack_msg_size;
  528. req_len += 2 * sizeof(uint32_t);
  529. if (!req_len)
  530. return 0;
  531. pr_err("%s:rpm returned error or nack req_len: %d id_ack: %d\n",
  532. __func__, req_len, get_ack_msg_id(buf));
  533. tmp = buf + ack_msg_size;
  534. if (memcmp(tmp, ERR, sizeof(uint32_t))) {
  535. pr_err("%s rpm returned error\n", __func__);
  536. WARN_ON(1);
  537. }
  538. tmp += 2 * sizeof(uint32_t);
  539. if (!(memcmp(tmp, INV_RSC, min_t(uint32_t, req_len,
  540. sizeof(INV_RSC))-1))) {
  541. pr_err("%s(): RPM NACK Unsupported resource\n", __func__);
  542. rc = -EINVAL;
  543. } else {
  544. pr_err("%s(): RPM NACK Invalid header\n", __func__);
  545. }
  546. return rc;
  547. }
  548. int msm_rpm_smd_buffer_request(struct msm_rpm_request *cdata,
  549. uint32_t size, gfp_t flag)
  550. {
  551. struct slp_buf *slp;
  552. static DEFINE_SPINLOCK(slp_buffer_lock);
  553. unsigned long flags;
  554. char *buf;
  555. buf = cdata->buf;
  556. if (size > MAX_SLEEP_BUFFER)
  557. return -ENOMEM;
  558. spin_lock_irqsave(&slp_buffer_lock, flags);
  559. slp = tr_search(&tr_root, buf);
  560. if (!slp) {
  561. slp = kzalloc(sizeof(struct slp_buf), GFP_ATOMIC);
  562. if (!slp) {
  563. spin_unlock_irqrestore(&slp_buffer_lock, flags);
  564. return -ENOMEM;
  565. }
  566. slp->buf = PTR_ALIGN(&slp->ubuf[0], sizeof(u32));
  567. memcpy(slp->buf, buf, size);
  568. if (tr_insert(&tr_root, slp)) {
  569. pr_err("Error updating sleep request\n");
  570. kfree(slp);
  571. spin_unlock_irqrestore(&slp_buffer_lock, flags);
  572. return -EINVAL;
  573. }
  574. } else {
  575. /* handle unsent requests */
  576. tr_update(slp, buf);
  577. }
  578. trace_rpm_smd_sleep_set(get_msg_id(cdata->client_buf),
  579. get_rsc_type(cdata->client_buf),
  580. get_req_len(cdata->client_buf));
  581. spin_unlock_irqrestore(&slp_buffer_lock, flags);
  582. return 0;
  583. }
  584. static struct msm_rpm_driver_data msm_rpm_data = {
  585. .smd_open = COMPLETION_INITIALIZER(msm_rpm_data.smd_open),
  586. };
  587. static int msm_rpm_flush_requests(void)
  588. {
  589. struct rb_node *t;
  590. int ret;
  591. int count = 0;
  592. for (t = rb_first(&tr_root); t; t = rb_next(t)) {
  593. struct slp_buf *s = rb_entry(t, struct slp_buf, node);
  594. unsigned int type = get_rsc_type(s->buf);
  595. unsigned int id = get_rsc_id(s->buf);
  596. if (!s->valid)
  597. continue;
  598. set_msg_id(s->buf, msm_rpm_get_next_msg_id());
  599. ret = rpmsg_send(rpm->rpm_channel, s->buf, get_buf_len(s->buf));
  600. WARN_ON(ret != 0);
  601. trace_rpm_smd_send_sleep_set(get_msg_id(s->buf), type, id);
  602. s->valid = false;
  603. count++;
  604. /*
  605. * RPM acks need to be handled here if we have sent 24
  606. * messages such that we do not overrun SMD buffer. Since
  607. * we expect only sleep sets at this point (RPM PC would be
  608. * disallowed if we had pending active requests), we need not
  609. * process these sleep set acks.
  610. */
  611. if (count >= MAX_WAIT_ON_ACK) {
  612. pr_err("Error: more than %d requests are buffered\n",
  613. MAX_WAIT_ON_ACK);
  614. return -ENOSPC;
  615. }
  616. }
  617. return 0;
  618. }
  619. static int msm_rpm_add_kvp_data_common(struct msm_rpm_request *handle,
  620. uint32_t key, const uint8_t *data, int size)
  621. {
  622. uint32_t i;
  623. uint32_t data_size, msg_size;
  624. if (probe_status)
  625. return probe_status;
  626. if (!handle || !data) {
  627. pr_err("%s(): Invalid handle/data\n", __func__);
  628. return -EINVAL;
  629. }
  630. if (size < 0)
  631. return -EINVAL;
  632. data_size = ALIGN(size, SZ_4);
  633. msg_size = data_size + 8;
  634. for (i = 0; i < handle->write_idx; i++) {
  635. if (handle->kvp[i].key != key)
  636. continue;
  637. if (handle->kvp[i].nbytes != data_size) {
  638. kfree(handle->kvp[i].value);
  639. handle->kvp[i].value = NULL;
  640. } else {
  641. if (!memcmp(handle->kvp[i].value, data, data_size))
  642. return 0;
  643. }
  644. break;
  645. }
  646. if (i >= handle->num_elements) {
  647. pr_err("Number of resources exceeds max allocated\n");
  648. return -ENOMEM;
  649. }
  650. if (i == handle->write_idx)
  651. handle->write_idx++;
  652. if (!handle->kvp[i].value) {
  653. handle->kvp[i].value = kzalloc(data_size, GFP_NOIO);
  654. if (!handle->kvp[i].value)
  655. return -ENOMEM;
  656. } else {
  657. /*
  658. * We enter the else case, if a key already exists but the
  659. * data doesn't match. In which case, we should zero the data
  660. * out.
  661. */
  662. memset(handle->kvp[i].value, 0, data_size);
  663. }
  664. if (!handle->kvp[i].valid)
  665. change_data_len(handle->client_buf, msg_size);
  666. else
  667. change_data_len(handle->client_buf,
  668. (data_size - handle->kvp[i].nbytes));
  669. handle->kvp[i].nbytes = data_size;
  670. handle->kvp[i].key = key;
  671. memcpy(handle->kvp[i].value, data, size);
  672. handle->kvp[i].valid = true;
  673. return 0;
  674. }
  675. static struct msm_rpm_request *msm_rpm_create_request_common(
  676. enum msm_rpm_set set, uint32_t rsc_type, uint32_t rsc_id,
  677. int num_elements)
  678. {
  679. struct msm_rpm_request *cdata;
  680. uint32_t buf_size;
  681. if (probe_status)
  682. return ERR_PTR(probe_status);
  683. cdata = kzalloc(sizeof(struct msm_rpm_request), GFP_NOIO);
  684. if (!cdata)
  685. goto cdata_alloc_fail;
  686. if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
  687. buf_size = sizeof(struct rpm_message_header_v0);
  688. else
  689. buf_size = sizeof(struct rpm_message_header_v1);
  690. cdata->client_buf = kzalloc(buf_size, GFP_NOIO);
  691. if (!cdata->client_buf)
  692. goto client_buf_alloc_fail;
  693. set_set_type(cdata->client_buf, set);
  694. set_rsc_type(cdata->client_buf, rsc_type);
  695. set_rsc_id(cdata->client_buf, rsc_id);
  696. cdata->num_elements = num_elements;
  697. cdata->write_idx = 0;
  698. cdata->kvp = kcalloc(num_elements, sizeof(struct msm_rpm_kvp_data),
  699. GFP_NOIO);
  700. if (!cdata->kvp) {
  701. pr_warn("%s(): Cannot allocate memory for key value data\n",
  702. __func__);
  703. goto kvp_alloc_fail;
  704. }
  705. cdata->buf = kzalloc(DEFAULT_BUFFER_SIZE, GFP_NOIO);
  706. if (!cdata->buf)
  707. goto buf_alloc_fail;
  708. cdata->numbytes = DEFAULT_BUFFER_SIZE;
  709. return cdata;
  710. buf_alloc_fail:
  711. kfree(cdata->kvp);
  712. kvp_alloc_fail:
  713. kfree(cdata->client_buf);
  714. client_buf_alloc_fail:
  715. kfree(cdata);
  716. cdata_alloc_fail:
  717. return NULL;
  718. }
  719. void msm_rpm_free_request(struct msm_rpm_request *handle)
  720. {
  721. int i;
  722. if (!handle)
  723. return;
  724. for (i = 0; i < handle->num_elements; i++)
  725. kfree(handle->kvp[i].value);
  726. kfree(handle->kvp);
  727. kfree(handle->client_buf);
  728. kfree(handle->buf);
  729. kfree(handle);
  730. }
  731. EXPORT_SYMBOL(msm_rpm_free_request);
  732. struct msm_rpm_request *msm_rpm_create_request(
  733. enum msm_rpm_set set, uint32_t rsc_type,
  734. uint32_t rsc_id, int num_elements)
  735. {
  736. return msm_rpm_create_request_common(set, rsc_type, rsc_id,
  737. num_elements);
  738. }
  739. EXPORT_SYMBOL(msm_rpm_create_request);
  740. int msm_rpm_add_kvp_data(struct msm_rpm_request *handle,
  741. uint32_t key, const uint8_t *data, int size)
  742. {
  743. return msm_rpm_add_kvp_data_common(handle, key, data, size);
  744. }
  745. EXPORT_SYMBOL(msm_rpm_add_kvp_data);
  746. int msm_rpm_add_kvp_data_noirq(struct msm_rpm_request *handle,
  747. uint32_t key, const uint8_t *data, int size)
  748. {
  749. return msm_rpm_add_kvp_data_common(handle, key, data, size);
  750. }
  751. EXPORT_SYMBOL(msm_rpm_add_kvp_data_noirq);
  752. bool msm_rpm_waiting_for_ack(void)
  753. {
  754. bool ret;
  755. unsigned long flags;
  756. spin_lock_irqsave(&msm_rpm_list_lock, flags);
  757. ret = list_empty(&msm_rpm_wait_list);
  758. spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
  759. return !ret;
  760. }
  761. static struct msm_rpm_wait_data *msm_rpm_get_entry_from_msg_id(uint32_t msg_id)
  762. {
  763. struct list_head *ptr;
  764. struct msm_rpm_wait_data *elem = NULL;
  765. unsigned long flags;
  766. spin_lock_irqsave(&msm_rpm_list_lock, flags);
  767. list_for_each(ptr, &msm_rpm_wait_list) {
  768. elem = list_entry(ptr, struct msm_rpm_wait_data, list);
  769. if (elem && (elem->msg_id == msg_id))
  770. break;
  771. elem = NULL;
  772. }
  773. spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
  774. return elem;
  775. }
  776. static uint32_t msm_rpm_get_next_msg_id(void)
  777. {
  778. uint32_t id;
  779. /*
  780. * A message id of 0 is used by the driver to indicate a error
  781. * condition. The RPM driver uses a id of 1 to indicate unsent data
  782. * when the data sent over hasn't been modified. This isn't a error
  783. * scenario and wait for ack returns a success when the message id is 1.
  784. */
  785. do {
  786. id = atomic_inc_return(&msm_rpm_msg_id);
  787. } while ((id == 0) || (id == 1) || msm_rpm_get_entry_from_msg_id(id));
  788. return id;
  789. }
  790. static int msm_rpm_add_wait_list(uint32_t msg_id, bool delete_on_ack)
  791. {
  792. unsigned long flags;
  793. struct msm_rpm_wait_data *data =
  794. kzalloc(sizeof(struct msm_rpm_wait_data), GFP_ATOMIC);
  795. if (!data)
  796. return -ENOMEM;
  797. init_completion(&data->ack);
  798. data->ack_recd = false;
  799. data->msg_id = msg_id;
  800. data->errno = INIT_ERROR;
  801. data->delete_on_ack = delete_on_ack;
  802. spin_lock_irqsave(&msm_rpm_list_lock, flags);
  803. if (delete_on_ack)
  804. list_add_tail(&data->list, &msm_rpm_wait_list);
  805. else
  806. list_add(&data->list, &msm_rpm_wait_list);
  807. spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
  808. return 0;
  809. }
  810. static void msm_rpm_free_list_entry(struct msm_rpm_wait_data *elem)
  811. {
  812. unsigned long flags;
  813. spin_lock_irqsave(&msm_rpm_list_lock, flags);
  814. list_del(&elem->list);
  815. spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
  816. kfree(elem);
  817. }
  818. static void msm_rpm_process_ack(uint32_t msg_id, int errno)
  819. {
  820. struct list_head *ptr, *next;
  821. struct msm_rpm_wait_data *elem = NULL;
  822. unsigned long flags;
  823. spin_lock_irqsave(&msm_rpm_list_lock, flags);
  824. list_for_each_safe(ptr, next, &msm_rpm_wait_list) {
  825. elem = list_entry(ptr, struct msm_rpm_wait_data, list);
  826. if (elem->msg_id == msg_id) {
  827. elem->errno = errno;
  828. elem->ack_recd = true;
  829. complete(&elem->ack);
  830. if (elem->delete_on_ack) {
  831. list_del(&elem->list);
  832. kfree(elem);
  833. }
  834. break;
  835. }
  836. }
  837. /*
  838. * Special case where the sleep driver doesn't
  839. * wait for ACKs. This would decrease the latency involved with
  840. * entering RPM assisted power collapse.
  841. */
  842. if (!elem)
  843. trace_rpm_smd_ack_recvd(0, msg_id, 0xDEADBEEF);
  844. spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
  845. }
  846. struct msm_rpm_kvp_packet {
  847. uint32_t id;
  848. uint32_t len;
  849. uint32_t val;
  850. };
  851. static void msm_rpm_log_request(struct msm_rpm_request *cdata)
  852. {
  853. char buf[DEBUG_PRINT_BUFFER_SIZE];
  854. size_t buflen = DEBUG_PRINT_BUFFER_SIZE;
  855. char name[5];
  856. u32 value;
  857. uint32_t i;
  858. int j, prev_valid;
  859. int valid_count = 0;
  860. int pos = 0;
  861. uint32_t res_type, rsc_id;
  862. name[4] = 0;
  863. for (i = 0; i < cdata->write_idx; i++)
  864. if (cdata->kvp[i].valid)
  865. valid_count++;
  866. pos += scnprintf(buf + pos, buflen - pos, "%sRPM req: ", KERN_INFO);
  867. if (msm_rpm_debug_mask & MSM_RPM_LOG_REQUEST_SHOW_MSG_ID)
  868. pos += scnprintf(buf + pos, buflen - pos, "msg_id=%u, ",
  869. get_msg_id(cdata->client_buf));
  870. pos += scnprintf(buf + pos, buflen - pos, "s=%s",
  871. (get_set_type(cdata->client_buf) ==
  872. MSM_RPM_CTX_ACTIVE_SET ? "act" : "slp"));
  873. res_type = get_rsc_type(cdata->client_buf);
  874. rsc_id = get_rsc_id(cdata->client_buf);
  875. if ((msm_rpm_debug_mask & MSM_RPM_LOG_REQUEST_PRETTY)
  876. && (msm_rpm_debug_mask & MSM_RPM_LOG_REQUEST_RAW)) {
  877. /* Both pretty and raw formatting */
  878. memcpy(name, &res_type, sizeof(uint32_t));
  879. pos += scnprintf(buf + pos, buflen - pos,
  880. ", rsc_type=0x%08X (%s), rsc_id=%u; ",
  881. res_type, name, rsc_id);
  882. for (i = 0, prev_valid = 0; i < cdata->write_idx; i++) {
  883. if (!cdata->kvp[i].valid)
  884. continue;
  885. memcpy(name, &cdata->kvp[i].key, sizeof(uint32_t));
  886. pos += scnprintf(buf + pos, buflen - pos,
  887. "[key=0x%08X (%s), value=%s",
  888. cdata->kvp[i].key, name,
  889. (cdata->kvp[i].nbytes ? "0x" : "null"));
  890. for (j = 0; j < cdata->kvp[i].nbytes; j++)
  891. pos += scnprintf(buf + pos, buflen - pos,
  892. "%02X ",
  893. cdata->kvp[i].value[j]);
  894. if (cdata->kvp[i].nbytes)
  895. pos += scnprintf(buf + pos, buflen - pos, "(");
  896. for (j = 0; j < cdata->kvp[i].nbytes; j += 4) {
  897. value = 0;
  898. memcpy(&value, &cdata->kvp[i].value[j],
  899. min_t(uint32_t, sizeof(uint32_t),
  900. cdata->kvp[i].nbytes - j));
  901. pos += scnprintf(buf + pos, buflen - pos, "%u",
  902. value);
  903. if (j + 4 < cdata->kvp[i].nbytes)
  904. pos += scnprintf(buf + pos,
  905. buflen - pos, " ");
  906. }
  907. if (cdata->kvp[i].nbytes)
  908. pos += scnprintf(buf + pos, buflen - pos, ")");
  909. pos += scnprintf(buf + pos, buflen - pos, "]");
  910. if (prev_valid + 1 < valid_count)
  911. pos += scnprintf(buf + pos, buflen - pos, ", ");
  912. prev_valid++;
  913. }
  914. } else if (msm_rpm_debug_mask & MSM_RPM_LOG_REQUEST_PRETTY) {
  915. /* Pretty formatting only */
  916. memcpy(name, &res_type, sizeof(uint32_t));
  917. pos += scnprintf(buf + pos, buflen - pos, " %s %u; ", name,
  918. rsc_id);
  919. for (i = 0, prev_valid = 0; i < cdata->write_idx; i++) {
  920. if (!cdata->kvp[i].valid)
  921. continue;
  922. memcpy(name, &cdata->kvp[i].key, sizeof(uint32_t));
  923. pos += scnprintf(buf + pos, buflen - pos, "%s=%s",
  924. name, (cdata->kvp[i].nbytes ? "" : "null"));
  925. for (j = 0; j < cdata->kvp[i].nbytes; j += 4) {
  926. value = 0;
  927. memcpy(&value, &cdata->kvp[i].value[j],
  928. min_t(uint32_t, sizeof(uint32_t),
  929. cdata->kvp[i].nbytes - j));
  930. pos += scnprintf(buf + pos, buflen - pos, "%u",
  931. value);
  932. if (j + 4 < cdata->kvp[i].nbytes)
  933. pos += scnprintf(buf + pos,
  934. buflen - pos, " ");
  935. }
  936. if (prev_valid + 1 < valid_count)
  937. pos += scnprintf(buf + pos, buflen - pos, ", ");
  938. prev_valid++;
  939. }
  940. } else {
  941. /* Raw formatting only */
  942. pos += scnprintf(buf + pos, buflen - pos,
  943. ", rsc_type=0x%08X, rsc_id=%u; ", res_type, rsc_id);
  944. for (i = 0, prev_valid = 0; i < cdata->write_idx; i++) {
  945. if (!cdata->kvp[i].valid)
  946. continue;
  947. pos += scnprintf(buf + pos, buflen - pos,
  948. "[key=0x%08X, value=%s",
  949. cdata->kvp[i].key,
  950. (cdata->kvp[i].nbytes ? "0x" : "null"));
  951. for (j = 0; j < cdata->kvp[i].nbytes; j++) {
  952. pos += scnprintf(buf + pos, buflen - pos,
  953. "%02X",
  954. cdata->kvp[i].value[j]);
  955. if (j + 1 < cdata->kvp[i].nbytes)
  956. pos += scnprintf(buf + pos,
  957. buflen - pos, " ");
  958. }
  959. pos += scnprintf(buf + pos, buflen - pos, "]");
  960. if (prev_valid + 1 < valid_count)
  961. pos += scnprintf(buf + pos, buflen - pos, ", ");
  962. prev_valid++;
  963. }
  964. }
  965. pos += scnprintf(buf + pos, buflen - pos, "\n");
  966. pr_info("request info %s\n", buf);
  967. }
  968. static int msm_rpm_send_data(struct msm_rpm_request *cdata,
  969. int msg_type, bool noack)
  970. {
  971. uint8_t *tmpbuff;
  972. int ret;
  973. uint32_t i;
  974. uint32_t msg_size;
  975. int msg_hdr_sz, req_hdr_sz;
  976. uint32_t data_len = get_data_len(cdata->client_buf);
  977. uint32_t set = get_set_type(cdata->client_buf);
  978. uint32_t msg_id;
  979. if (probe_status) {
  980. pr_err("probe failed\n");
  981. return probe_status;
  982. }
  983. if (!data_len) {
  984. pr_err("no data len\n");
  985. return 1;
  986. }
  987. msg_hdr_sz = rpm_msg_fmt_ver ? sizeof(struct rpm_message_header_v1) :
  988. sizeof(struct rpm_message_header_v0);
  989. req_hdr_sz = RPM_HDR_SIZE;
  990. set_msg_type(cdata->client_buf, msg_type);
  991. set_req_len(cdata->client_buf, data_len + msg_hdr_sz - req_hdr_sz);
  992. msg_size = get_req_len(cdata->client_buf) + req_hdr_sz;
  993. /* populate data_len */
  994. if (msg_size > cdata->numbytes) {
  995. kfree(cdata->buf);
  996. cdata->numbytes = msg_size;
  997. cdata->buf = kzalloc(msg_size, GFP_NOIO);
  998. }
  999. if (!cdata->buf) {
  1000. pr_err("Failed malloc\n");
  1001. return 0;
  1002. }
  1003. tmpbuff = cdata->buf;
  1004. tmpbuff += msg_hdr_sz;
  1005. for (i = 0; (i < cdata->write_idx); i++) {
  1006. /* Sanity check */
  1007. WARN_ON((tmpbuff - cdata->buf) > cdata->numbytes);
  1008. if (!cdata->kvp[i].valid)
  1009. continue;
  1010. memcpy(tmpbuff, &cdata->kvp[i].key, sizeof(uint32_t));
  1011. tmpbuff += sizeof(uint32_t);
  1012. memcpy(tmpbuff, &cdata->kvp[i].nbytes, sizeof(uint32_t));
  1013. tmpbuff += sizeof(uint32_t);
  1014. memcpy(tmpbuff, cdata->kvp[i].value, cdata->kvp[i].nbytes);
  1015. tmpbuff += cdata->kvp[i].nbytes;
  1016. }
  1017. memcpy(cdata->buf, cdata->client_buf, msg_hdr_sz);
  1018. if ((set == MSM_RPM_CTX_SLEEP_SET) &&
  1019. !msm_rpm_smd_buffer_request(cdata, msg_size, GFP_NOIO)) {
  1020. return 1;
  1021. }
  1022. msg_id = msm_rpm_get_next_msg_id();
  1023. /* Set the version bit for new protocol */
  1024. set_msg_ver(cdata->buf, rpm_msg_fmt_ver);
  1025. set_msg_id(cdata->buf, msg_id);
  1026. set_msg_id(cdata->client_buf, msg_id);
  1027. if (msm_rpm_debug_mask
  1028. & (MSM_RPM_LOG_REQUEST_PRETTY | MSM_RPM_LOG_REQUEST_RAW))
  1029. msm_rpm_log_request(cdata);
  1030. if (standalone) {
  1031. for (i = 0; (i < cdata->write_idx); i++)
  1032. cdata->kvp[i].valid = false;
  1033. set_data_len(cdata->client_buf, 0);
  1034. ret = msg_id;
  1035. return ret;
  1036. }
  1037. msm_rpm_add_wait_list(msg_id, noack);
  1038. ret = rpmsg_send(rpm->rpm_channel, &cdata->buf[0], msg_size);
  1039. if (!ret) {
  1040. for (i = 0; (i < cdata->write_idx); i++)
  1041. cdata->kvp[i].valid = false;
  1042. set_data_len(cdata->client_buf, 0);
  1043. ret = msg_id;
  1044. trace_rpm_smd_send_active_set(msg_id,
  1045. get_rsc_type(cdata->client_buf),
  1046. get_rsc_id(cdata->client_buf));
  1047. } else if (ret < 0) {
  1048. struct msm_rpm_wait_data *rc;
  1049. ret = 0;
  1050. pr_err("Failed to write data msg_size:%d ret:%d msg_id:%d\n",
  1051. msg_size, ret, msg_id);
  1052. rc = msm_rpm_get_entry_from_msg_id(msg_id);
  1053. if (rc)
  1054. msm_rpm_free_list_entry(rc);
  1055. }
  1056. return ret;
  1057. }
  1058. static int _msm_rpm_send_request(struct msm_rpm_request *handle, bool noack)
  1059. {
  1060. int ret;
  1061. static DEFINE_MUTEX(send_mtx);
  1062. mutex_lock(&send_mtx);
  1063. ret = msm_rpm_send_data(handle, MSM_RPM_MSG_REQUEST_TYPE, noack);
  1064. mutex_unlock(&send_mtx);
  1065. return ret;
  1066. }
  1067. int msm_rpm_send_request_noirq(struct msm_rpm_request *handle)
  1068. {
  1069. return _msm_rpm_send_request(handle, false);
  1070. }
  1071. EXPORT_SYMBOL(msm_rpm_send_request_noirq);
  1072. int msm_rpm_send_request(struct msm_rpm_request *handle)
  1073. {
  1074. return _msm_rpm_send_request(handle, false);
  1075. }
  1076. EXPORT_SYMBOL(msm_rpm_send_request);
  1077. void *msm_rpm_send_request_noack(struct msm_rpm_request *handle)
  1078. {
  1079. int ret;
  1080. ret = _msm_rpm_send_request(handle, true);
  1081. return ret < 0 ? ERR_PTR(ret) : NULL;
  1082. }
  1083. EXPORT_SYMBOL(msm_rpm_send_request_noack);
  1084. int msm_rpm_wait_for_ack(uint32_t msg_id)
  1085. {
  1086. struct msm_rpm_wait_data *elem;
  1087. int rc = 0;
  1088. if (!msg_id) {
  1089. pr_err("Invalid msg id\n");
  1090. return -ENOMEM;
  1091. }
  1092. if (msg_id == 1)
  1093. return rc;
  1094. if (standalone)
  1095. return rc;
  1096. elem = msm_rpm_get_entry_from_msg_id(msg_id);
  1097. if (!elem)
  1098. return rc;
  1099. wait_for_completion(&elem->ack);
  1100. trace_rpm_smd_ack_recvd(0, msg_id, 0xDEADFEED);
  1101. rc = elem->errno;
  1102. msm_rpm_free_list_entry(elem);
  1103. return rc;
  1104. }
  1105. EXPORT_SYMBOL(msm_rpm_wait_for_ack);
  1106. int msm_rpm_wait_for_ack_noirq(uint32_t msg_id)
  1107. {
  1108. return msm_rpm_wait_for_ack(msg_id);
  1109. }
  1110. EXPORT_SYMBOL(msm_rpm_wait_for_ack_noirq);
  1111. void *msm_rpm_send_message_noack(enum msm_rpm_set set, uint32_t rsc_type,
  1112. uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems)
  1113. {
  1114. int i, rc;
  1115. struct msm_rpm_request *req =
  1116. msm_rpm_create_request_common(set, rsc_type, rsc_id, nelems);
  1117. if (IS_ERR(req))
  1118. return req;
  1119. if (!req)
  1120. return ERR_PTR(ENOMEM);
  1121. for (i = 0; i < nelems; i++) {
  1122. rc = msm_rpm_add_kvp_data(req, kvp[i].key,
  1123. kvp[i].data, kvp[i].length);
  1124. if (rc)
  1125. goto bail;
  1126. }
  1127. rc = PTR_ERR(msm_rpm_send_request_noack(req));
  1128. bail:
  1129. msm_rpm_free_request(req);
  1130. return rc < 0 ? ERR_PTR(rc) : NULL;
  1131. }
  1132. EXPORT_SYMBOL(msm_rpm_send_message_noack);
  1133. int msm_rpm_send_message(enum msm_rpm_set set, uint32_t rsc_type,
  1134. uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems)
  1135. {
  1136. int i, rc;
  1137. struct msm_rpm_request *req =
  1138. msm_rpm_create_request(set, rsc_type, rsc_id, nelems);
  1139. if (IS_ERR(req))
  1140. return PTR_ERR(req);
  1141. if (!req)
  1142. return -ENOMEM;
  1143. for (i = 0; i < nelems; i++) {
  1144. rc = msm_rpm_add_kvp_data(req, kvp[i].key,
  1145. kvp[i].data, kvp[i].length);
  1146. if (rc)
  1147. goto bail;
  1148. }
  1149. rc = msm_rpm_wait_for_ack(msm_rpm_send_request(req));
  1150. bail:
  1151. msm_rpm_free_request(req);
  1152. return rc;
  1153. }
  1154. EXPORT_SYMBOL(msm_rpm_send_message);
  1155. int msm_rpm_send_message_noirq(enum msm_rpm_set set, uint32_t rsc_type,
  1156. uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems)
  1157. {
  1158. return msm_rpm_send_message(set, rsc_type, rsc_id, kvp, nelems);
  1159. }
  1160. EXPORT_SYMBOL(msm_rpm_send_message_noirq);
  1161. static int smd_mask_receive_interrupt(bool mask,
  1162. const struct cpumask *cpumask)
  1163. {
  1164. struct irq_chip *irq_chip;
  1165. struct irq_data *irq_data;
  1166. irq_data = irq_get_irq_data(rpm->irq);
  1167. if (!irq_data)
  1168. return -ENODEV;
  1169. irq_chip = irq_data->chip;
  1170. if (!irq_chip)
  1171. return -ENODEV;
  1172. if (mask) {
  1173. irq_chip->irq_mask(irq_data);
  1174. if (cpumask && irq_chip->irq_set_affinity)
  1175. irq_chip->irq_set_affinity(irq_data, cpumask, true);
  1176. } else {
  1177. irq_chip->irq_unmask(irq_data);
  1178. }
  1179. return 0;
  1180. }
  1181. /**
  1182. * During power collapse, the rpm driver disables the SMD interrupts to make
  1183. * sure that the interrupt doesn't wakes us from sleep.
  1184. */
  1185. static int msm_rpm_enter_sleep(void)
  1186. {
  1187. int ret = 0;
  1188. struct cpumask cpumask;
  1189. unsigned int cpu = 0;
  1190. if (standalone)
  1191. return 0;
  1192. if (probe_status)
  1193. return 0;
  1194. cpumask_copy(&cpumask, cpumask_of(cpu));
  1195. ret = smd_mask_receive_interrupt(true, &cpumask);
  1196. if (!ret) {
  1197. ret = msm_rpm_flush_requests();
  1198. if (ret)
  1199. smd_mask_receive_interrupt(false, NULL);
  1200. }
  1201. return msm_mpm_enter_sleep(&cpumask);
  1202. }
  1203. /**
  1204. * When the system resumes from power collapse, the SMD interrupt disabled by
  1205. * enter function has to reenabled to continue processing SMD message.
  1206. */
  1207. static void msm_rpm_exit_sleep(void)
  1208. {
  1209. if (standalone)
  1210. return;
  1211. if (probe_status)
  1212. return;
  1213. smd_mask_receive_interrupt(false, NULL);
  1214. }
  1215. static int rpm_smd_power_cb(struct notifier_block *nb, unsigned long action, void *d)
  1216. {
  1217. switch (action) {
  1218. case GENPD_NOTIFY_OFF:
  1219. if (msm_rpm_waiting_for_ack())
  1220. return NOTIFY_BAD;
  1221. if (msm_rpm_enter_sleep())
  1222. return NOTIFY_BAD;
  1223. break;
  1224. case GENPD_NOTIFY_ON:
  1225. msm_rpm_exit_sleep();
  1226. break;
  1227. }
  1228. return NOTIFY_OK;
  1229. }
  1230. static int rpm_smd_pm_notifier(struct notifier_block *nb, unsigned long event, void *unused)
  1231. {
  1232. int ret;
  1233. if (event == PM_SUSPEND_PREPARE) {
  1234. ret = msm_rpm_flush_requests();
  1235. pr_debug("ret = %d\n", ret);
  1236. }
  1237. /* continue to suspend */
  1238. return NOTIFY_OK;
  1239. }
  1240. static struct notifier_block rpm_smd_pm_nb = {
  1241. .notifier_call = rpm_smd_pm_notifier,
  1242. };
  1243. static int qcom_smd_rpm_callback(struct rpmsg_device *rpdev, void *ptr,
  1244. int size, void *priv, u32 addr)
  1245. {
  1246. uint32_t msg_id;
  1247. int errno;
  1248. char buf[MAX_ERR_BUFFER_SIZE] = {0};
  1249. struct msm_rpm_wait_data *elem;
  1250. static DEFINE_SPINLOCK(rx_notify_lock);
  1251. unsigned long flags;
  1252. if (!size)
  1253. return -EINVAL;
  1254. WARN_ON(size > MAX_ERR_BUFFER_SIZE);
  1255. spin_lock_irqsave(&rx_notify_lock, flags);
  1256. memcpy(buf, ptr, size);
  1257. msg_id = msm_rpm_get_msg_id_from_ack(buf);
  1258. errno = msm_rpm_get_error_from_ack(buf);
  1259. elem = msm_rpm_get_entry_from_msg_id(msg_id);
  1260. /*
  1261. * It is applicable for sleep set requests
  1262. * Sleep set requests are not added to the
  1263. * wait queue list. Without this check we
  1264. * run into NULL pointer deferrence issue.
  1265. */
  1266. if (!elem) {
  1267. spin_unlock_irqrestore(&rx_notify_lock, flags);
  1268. return 0;
  1269. }
  1270. msm_rpm_process_ack(msg_id, errno);
  1271. spin_unlock_irqrestore(&rx_notify_lock, flags);
  1272. return 0;
  1273. }
  1274. static int qcom_smd_rpm_probe(struct rpmsg_device *rpdev)
  1275. {
  1276. char *key = NULL;
  1277. struct device_node *p;
  1278. struct platform_device *rpm_device;
  1279. int ret = 0;
  1280. int irq;
  1281. void __iomem *reg_base;
  1282. uint64_t version = V0_PROTOCOL_VERSION; /* set to default v0 format */
  1283. p = of_find_compatible_node(NULL, NULL, "qcom,rpm-smd");
  1284. if (!p) {
  1285. pr_err("Unable to find rpm-smd\n");
  1286. probe_status = -ENODEV;
  1287. goto fail;
  1288. }
  1289. rpm_device = of_find_device_by_node(p);
  1290. if (!rpm_device) {
  1291. probe_status = -ENODEV;
  1292. pr_err(" Unable to get rpm device structure\n");
  1293. goto fail;
  1294. }
  1295. key = "rpm-standalone";
  1296. standalone = of_property_read_bool(p, key);
  1297. if (standalone) {
  1298. probe_status = ret;
  1299. pr_info("RPM running in standalone mode\n");
  1300. return ret;
  1301. }
  1302. reg_base = of_iomap(p, 0);
  1303. if (reg_base) {
  1304. version = readq_relaxed(reg_base);
  1305. iounmap(reg_base);
  1306. }
  1307. if (version == V1_PROTOCOL_VERSION)
  1308. rpm_msg_fmt_ver = RPM_MSG_V1_FMT;
  1309. pr_info("RPM-SMD running version %d\n", rpm_msg_fmt_ver);
  1310. irq = of_irq_get(p, 0);
  1311. if (!irq) {
  1312. pr_err("Unable to get rpm-smd interrupt number\n");
  1313. probe_status = -ENODEV;
  1314. goto fail;
  1315. }
  1316. rpm = devm_kzalloc(&rpdev->dev, sizeof(*rpm), GFP_KERNEL);
  1317. if (!rpm) {
  1318. probe_status = -ENOMEM;
  1319. goto fail;
  1320. }
  1321. ret = register_pm_notifier(&rpm_smd_pm_nb);
  1322. if (ret) {
  1323. pr_err("%s: power state notif error %d\n", __func__, ret);
  1324. probe_status = -ENODEV;
  1325. goto fail;
  1326. }
  1327. rpm->dev = &rpdev->dev;
  1328. rpm->rpm_channel = rpdev->ept;
  1329. dev_set_drvdata(&rpdev->dev, rpm);
  1330. priv_rpm = *rpm;
  1331. rpm->irq = irq;
  1332. if (of_find_property(p, "power-domains", NULL)) {
  1333. pm_runtime_enable(&rpm_device->dev);
  1334. rpm->genpd_nb.notifier_call = rpm_smd_power_cb;
  1335. ret = dev_pm_genpd_add_notifier(&rpm_device->dev, &rpm->genpd_nb);
  1336. if (ret) {
  1337. pm_runtime_disable(&rpm_device->dev);
  1338. probe_status = ret;
  1339. goto fail;
  1340. }
  1341. }
  1342. mutex_init(&rpm->lock);
  1343. init_completion(&rpm->ack);
  1344. probe_status = 0;
  1345. fail:
  1346. return probe_status;
  1347. }
  1348. static struct rpmsg_device_id rpmsg_driver_rpm_id_table[] = {
  1349. { .name = "rpm_requests" },
  1350. { },
  1351. };
  1352. static struct rpmsg_driver qcom_smd_rpm_driver = {
  1353. .probe = qcom_smd_rpm_probe,
  1354. .callback = qcom_smd_rpm_callback,
  1355. .id_table = rpmsg_driver_rpm_id_table,
  1356. .drv = {
  1357. .name = "qcom_rpm_smd",
  1358. .owner = THIS_MODULE,
  1359. },
  1360. };
  1361. static int rpm_driver_probe(struct platform_device *pdev)
  1362. {
  1363. int ret;
  1364. struct device_node *p = pdev->dev.of_node;
  1365. ret = of_platform_populate(p, NULL, NULL, &pdev->dev);
  1366. if (ret)
  1367. return ret;
  1368. ret = register_rpmsg_driver(&qcom_smd_rpm_driver);
  1369. if (ret) {
  1370. of_platform_depopulate(&pdev->dev);
  1371. pr_err("register_rpmsg_driver: failed with err %d\n", ret);
  1372. return ret;
  1373. }
  1374. return 0;
  1375. }
  1376. static const struct of_device_id rpm_of_match[] = {
  1377. { .compatible = "qcom,rpm-smd" },
  1378. {},
  1379. };
  1380. struct platform_driver rpm_driver = {
  1381. .probe = rpm_driver_probe,
  1382. .driver = {
  1383. .name = "rpm-smd",
  1384. .of_match_table = rpm_of_match,
  1385. .suppress_bind_attrs = true,
  1386. },
  1387. };
  1388. int __init msm_rpm_driver_init(void)
  1389. {
  1390. return platform_driver_register(&rpm_driver);
  1391. }
  1392. #ifdef MODULE
  1393. module_init(msm_rpm_driver_init);
  1394. #else
  1395. postcore_initcall_sync(msm_rpm_driver_init);
  1396. #endif
  1397. MODULE_DESCRIPTION("Qualcomm Technologies, Inc. RPM-SMD Driver");
  1398. MODULE_LICENSE("GPL");