qcom_glink_native.c 66 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2016-2017, Linaro Ltd
  4. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  5. * Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
  6. */
  7. #include <linux/idr.h>
  8. #include <linux/interrupt.h>
  9. #include <linux/io.h>
  10. #include <linux/list.h>
  11. #include <linux/mfd/syscon.h>
  12. #include <linux/module.h>
  13. #include <linux/of.h>
  14. #include <linux/of_address.h>
  15. #include <linux/of_irq.h>
  16. #include <linux/platform_device.h>
  17. #include <linux/regmap.h>
  18. #include <linux/rpmsg.h>
  19. #include <linux/sizes.h>
  20. #include <linux/slab.h>
  21. #include <linux/workqueue.h>
  22. #include <linux/kthread.h>
  23. #include <linux/mailbox_client.h>
  24. #include <linux/suspend.h>
  25. #include <linux/termios.h>
  26. #include <linux/ipc_logging.h>
  27. #include "rpmsg_internal.h"
  28. #include "qcom_glink_native.h"
  29. #define GLINK_LOG_PAGE_CNT 32
  30. #define GLINK_INFO(ctxt, x, ...) \
  31. ipc_log_string(ctxt, "[%s]: "x, __func__, ##__VA_ARGS__)
  32. #define CH_INFO(ch, x, ...) \
  33. do { \
  34. if (ch->glink) \
  35. ipc_log_string(ch->glink->ilc, "%s[%d:%d] %s: "x, ch->name, \
  36. ch->lcid, ch->rcid, __func__, ##__VA_ARGS__); \
  37. } while (0)
  38. #define CH_ERR(ch, x, ...) \
  39. do { \
  40. if (ch->glink) { \
  41. ipc_log_string(ch->glink->ilc, "%s[%d:%d] %s: "x, ch->name, \
  42. ch->lcid, ch->rcid, __func__, ##__VA_ARGS__); \
  43. dev_err_ratelimited(ch->glink->dev, "[%s]: "x, __func__, ##__VA_ARGS__); \
  44. } \
  45. } while (0)
  46. #if 1//IS_ENABLED(CONFIG_RPMSG_QCOM_GLINK_DEBUG)
  47. #define GLINK_BUG(ctxt, x, ...) \
  48. do { \
  49. ipc_log_string(ctxt, "[%s]: ASSERT at line %d: "x, \
  50. __func__, __LINE__, ##__VA_ARGS__); \
  51. pr_err("[%s]: ASSERT at line %d: "x, \
  52. __func__, __LINE__, ##__VA_ARGS__); \
  53. BUG(); \
  54. } while (0)
  55. #else
  56. #define GLINK_BUG(ctxt, x, ...) \
  57. do { \
  58. ipc_log_string(ctxt, "[%s]: WARN at line %d: "x, \
  59. __func__, __LINE__, ##__VA_ARGS__); \
  60. pr_err("[%s]: WARN at line %d: "x, \
  61. __func__, __LINE__, ##__VA_ARGS__); \
  62. } while (0)
  63. #endif
  64. #define GLINK_NAME_SIZE 32
  65. #define GLINK_VERSION_1 1
  66. #define RPM_GLINK_CID_MIN 1
  67. #define RPM_GLINK_CID_MAX 65536
  68. static int should_wake;
  69. static int glink_resume_pkt;
  70. struct glink_msg {
  71. __le16 cmd;
  72. __le16 param1;
  73. __le32 param2;
  74. u8 data[];
  75. } __packed;
  76. /**
  77. * struct glink_defer_cmd - deferred incoming control message
  78. * @node: list node
  79. * @msg: message header
  80. * @data: payload of the message
  81. *
  82. * Copy of a received control message, to be added to @rx_queue and processed
  83. * by @rx_work of @qcom_glink.
  84. */
  85. struct glink_defer_cmd {
  86. struct list_head node;
  87. struct glink_msg msg;
  88. u8 data[];
  89. };
  90. /**
  91. * struct glink_core_rx_intent - RX intent
  92. * RX intent
  93. *
  94. * @data: pointer to the data (may be NULL for zero-copy)
  95. * @id: remote or local intent ID
  96. * @size: size of the original intent (do not modify)
  97. * @reuse: To mark if the intent can be reused after first use
  98. * @in_use: To mark if intent is already in use for the channel
  99. * @offset: next write offset (initially 0)
  100. * @node: list node
  101. */
  102. struct glink_core_rx_intent {
  103. void *data;
  104. u32 id;
  105. size_t size;
  106. bool reuse;
  107. bool in_use;
  108. bool advertised;
  109. u32 offset;
  110. struct list_head node;
  111. };
  112. /**
  113. * struct qcom_glink - driver context, relates to one remote subsystem
  114. * @dev: reference to the associated struct device
  115. * @mbox_client: mailbox client
  116. * @mbox_chan: mailbox channel
  117. * @rx_pipe: pipe object for receive FIFO
  118. * @tx_pipe: pipe object for transmit FIFO
  119. * @irq: IRQ for signaling incoming events
  120. * @kworker: kworker to handle rx_done work
  121. * @task: kthread running @kworker
  122. * @rx_work: worker for handling received control messages
  123. * @rx_lock: protects the @rx_queue
  124. * @rx_queue: queue of received control messages to be processed in @rx_work
  125. * @tx_lock: synchronizes operations on the tx fifo
  126. * @idr_lock: synchronizes @lcids and @rcids modifications
  127. * @lcids: idr of all channels with a known local channel id
  128. * @rcids: idr of all channels with a known remote channel id
  129. * @in_reset: reset status of this edge
  130. * @features: remote features
  131. * @intentless: flag to indicate that there is no intent
  132. * @tx_avail_notify: Waitqueue for pending tx tasks
  133. * @sent_read_notify: flag to check cmd sent or not
  134. * @ilc: ipc logging context reference
  135. */
  136. struct qcom_glink {
  137. struct device *dev;
  138. const char *name;
  139. struct mbox_client mbox_client;
  140. struct mbox_chan *mbox_chan;
  141. struct qcom_glink_pipe *rx_pipe;
  142. struct qcom_glink_pipe *tx_pipe;
  143. int irq;
  144. char irqname[GLINK_NAME_SIZE];
  145. spinlock_t irq_lock;
  146. bool irq_running;
  147. struct kthread_worker kworker;
  148. struct task_struct *task;
  149. struct cpumask cpu_mask;
  150. struct work_struct rx_work;
  151. spinlock_t rx_lock;
  152. struct list_head rx_queue;
  153. spinlock_t tx_lock;
  154. spinlock_t idr_lock;
  155. struct idr lcids;
  156. struct idr rcids;
  157. atomic_t in_reset;
  158. unsigned long features;
  159. bool intentless;
  160. wait_queue_head_t tx_avail_notify;
  161. bool sent_read_notify;
  162. void *ilc;
  163. };
  164. enum {
  165. GLINK_STATE_CLOSED,
  166. GLINK_STATE_OPENING,
  167. GLINK_STATE_OPEN,
  168. GLINK_STATE_CLOSING,
  169. };
  170. /**
  171. * struct glink_channel - internal representation of a channel
  172. * @rpdev: rpdev reference, only used for primary endpoints
  173. * @ept: rpmsg endpoint this channel is associated with
  174. * @glink: qcom_glink context handle
  175. * @refcount: refcount for the channel object
  176. * @recv_lock: guard for @ept.cb
  177. * @name: unique channel name/identifier
  178. * @lcid: channel id, in local space
  179. * @rcid: channel id, in remote space
  180. * @intent_lock: lock for protection of @liids, @riids
  181. * @liids: idr of all local intents
  182. * @riids: idr of all remote intents
  183. * @intent_work: worker responsible for transmitting rx_done packets
  184. * @done_intents: list of intents that needs to be announced rx_done
  185. * @defer_intents: list of intents held by the client released by rpmsg_rx_done
  186. * @buf: receive buffer, for gathering fragments
  187. * @buf_offset: write offset in @buf
  188. * @buf_size: size of current @buf
  189. * @open_ack: completed once remote has acked the open-request
  190. * @open_req: completed once open-request has been received
  191. * @intent_req_lock: Synchronises multiple intent requests
  192. * @intent_req_result: Result of intent request
  193. * @intent_req_acked: Status of intent request acknowledgment
  194. * @intent_req_completed: Status of intent request completion
  195. * @intent_req_ack: Waitqueue for @intent_req_acked
  196. * @intent_req_comp: Waitqueue for @intent_req_completed
  197. * @intent_timeout_count: number of times intents have timed out consecutively
  198. * @local_signals: local side signals
  199. * @remote_sigalss: remote side signals
  200. * @signals_cb: client callback for notifying signal change
  201. */
  202. struct glink_channel {
  203. struct rpmsg_endpoint ept;
  204. struct rpmsg_device *rpdev;
  205. struct qcom_glink *glink;
  206. struct kref refcount;
  207. spinlock_t recv_lock;
  208. char *name;
  209. unsigned int lcid;
  210. unsigned int rcid;
  211. spinlock_t intent_lock;
  212. struct idr liids;
  213. struct idr riids;
  214. struct kthread_work intent_work;
  215. struct list_head done_intents;
  216. struct list_head defer_intents;
  217. struct glink_core_rx_intent *buf;
  218. int buf_offset;
  219. int buf_size;
  220. struct completion open_ack;
  221. struct completion open_req;
  222. struct mutex intent_req_lock;
  223. bool intent_req_result;
  224. bool channel_ready;
  225. atomic_t intent_req_acked;
  226. atomic_t intent_req_completed;
  227. wait_queue_head_t intent_req_ack;
  228. wait_queue_head_t intent_req_comp;
  229. int intent_timeout_count;
  230. unsigned int local_signals;
  231. unsigned int remote_signals;
  232. int (*signals_cb)(struct rpmsg_device *dev, void *priv, u32 old, u32 new);
  233. };
  234. #define MAX_INTENT_TIMEOUTS 2
  235. #define to_glink_channel(_ept) container_of(_ept, struct glink_channel, ept)
  236. static const struct rpmsg_endpoint_ops glink_endpoint_ops;
  237. #define RPM_CMD_VERSION 0
  238. #define RPM_CMD_VERSION_ACK 1
  239. #define RPM_CMD_OPEN 2
  240. #define RPM_CMD_CLOSE 3
  241. #define RPM_CMD_OPEN_ACK 4
  242. #define RPM_CMD_INTENT 5
  243. #define RPM_CMD_RX_DONE 6
  244. #define RPM_CMD_RX_INTENT_REQ 7
  245. #define RPM_CMD_RX_INTENT_REQ_ACK 8
  246. #define RPM_CMD_TX_DATA 9
  247. #define RPM_CMD_TX_DATA_ZERO_COPY 10
  248. #define RPM_CMD_CLOSE_ACK 11
  249. #define RPM_CMD_TX_DATA_CONT 12
  250. #define RPM_CMD_READ_NOTIF 13
  251. #define RPM_CMD_RX_DONE_W_REUSE 14
  252. #define RPM_CMD_SIGNALS 15
  253. #define GLINK_FEATURE_INTENTLESS BIT(1)
  254. #define NATIVE_DTR_SIG BIT(31)
  255. #define NATIVE_CTS_SIG BIT(30)
  256. #define NATIVE_CD_SIG BIT(29)
  257. #define NATIVE_RI_SIG BIT(28)
  258. static void qcom_glink_rx_done_work(struct kthread_work *work);
  259. static struct glink_channel *qcom_glink_alloc_channel(struct qcom_glink *glink,
  260. const char *name)
  261. {
  262. struct glink_channel *channel;
  263. channel = kzalloc(sizeof(*channel), GFP_KERNEL);
  264. if (!channel)
  265. return ERR_PTR(-ENOMEM);
  266. /* Setup glink internal glink_channel data */
  267. spin_lock_init(&channel->recv_lock);
  268. spin_lock_init(&channel->intent_lock);
  269. mutex_init(&channel->intent_req_lock);
  270. channel->glink = glink;
  271. channel->name = kstrdup(name, GFP_KERNEL);
  272. if (!channel->name) {
  273. kfree(channel);
  274. return ERR_PTR(-ENOMEM);
  275. }
  276. init_completion(&channel->open_req);
  277. init_completion(&channel->open_ack);
  278. atomic_set(&channel->intent_req_acked, 0);
  279. atomic_set(&channel->intent_req_completed, 0);
  280. init_waitqueue_head(&channel->intent_req_ack);
  281. init_waitqueue_head(&channel->intent_req_comp);
  282. channel->intent_timeout_count = 0;
  283. INIT_LIST_HEAD(&channel->done_intents);
  284. INIT_LIST_HEAD(&channel->defer_intents);
  285. kthread_init_work(&channel->intent_work, qcom_glink_rx_done_work);
  286. idr_init(&channel->liids);
  287. idr_init(&channel->riids);
  288. kref_init(&channel->refcount);
  289. return channel;
  290. }
  291. static void qcom_glink_channel_release(struct kref *ref)
  292. {
  293. struct glink_channel *channel = container_of(ref, struct glink_channel,
  294. refcount);
  295. struct glink_core_rx_intent *intent;
  296. struct glink_core_rx_intent *tmp;
  297. unsigned long flags;
  298. int iid;
  299. CH_INFO(channel, "\n");
  300. channel->intent_req_result = false;
  301. atomic_inc(&channel->intent_req_acked);
  302. wake_up(&channel->intent_req_ack);
  303. atomic_inc(&channel->intent_req_completed);
  304. wake_up(&channel->intent_req_comp);
  305. /* cancel pending rx_done work */
  306. kthread_cancel_work_sync(&channel->intent_work);
  307. spin_lock_irqsave(&channel->intent_lock, flags);
  308. /* Free all non-reuse intents pending rx_done work */
  309. list_for_each_entry_safe(intent, tmp, &channel->done_intents, node) {
  310. if (!intent->size)
  311. intent->data = NULL;
  312. if (!intent->reuse) {
  313. kfree(intent->data);
  314. kfree(intent);
  315. }
  316. }
  317. list_for_each_entry_safe(intent, tmp, &channel->defer_intents, node) {
  318. if (!intent->size)
  319. intent->data = NULL;
  320. if (!intent->reuse) {
  321. kfree(intent->data);
  322. kfree(intent);
  323. }
  324. }
  325. idr_for_each_entry(&channel->liids, tmp, iid) {
  326. kfree(tmp->data);
  327. kfree(tmp);
  328. }
  329. idr_destroy(&channel->liids);
  330. idr_for_each_entry(&channel->riids, tmp, iid)
  331. kfree(tmp);
  332. idr_destroy(&channel->riids);
  333. spin_unlock_irqrestore(&channel->intent_lock, flags);
  334. kfree(channel->name);
  335. kfree(channel);
  336. }
  337. static struct glink_channel *qcom_glink_channel_ref_get(struct qcom_glink *glink,
  338. bool remote_channel, int cid)
  339. {
  340. struct glink_channel *channel = NULL;
  341. unsigned long flags;
  342. if (!glink)
  343. return NULL;
  344. spin_lock_irqsave(&glink->idr_lock, flags);
  345. if (remote_channel)
  346. channel = idr_find(&glink->rcids, cid);
  347. else
  348. channel = idr_find(&glink->lcids, cid);
  349. if (channel)
  350. kref_get(&channel->refcount);
  351. spin_unlock_irqrestore(&glink->idr_lock, flags);
  352. return channel;
  353. }
  354. static void qcom_glink_channel_ref_put(struct glink_channel *channel)
  355. {
  356. if (!channel)
  357. return;
  358. kref_put(&channel->refcount, qcom_glink_channel_release);
  359. }
  360. static size_t qcom_glink_rx_avail(struct qcom_glink *glink)
  361. {
  362. return glink->rx_pipe->avail(glink->rx_pipe);
  363. }
  364. static void qcom_glink_rx_peak(struct qcom_glink *glink,
  365. void *data, unsigned int offset, size_t count)
  366. {
  367. glink->rx_pipe->peak(glink->rx_pipe, data, offset, count);
  368. }
  369. static void qcom_glink_rx_advance(struct qcom_glink *glink, size_t count)
  370. {
  371. glink->rx_pipe->advance(glink->rx_pipe, count);
  372. }
  373. static size_t qcom_glink_tx_avail(struct qcom_glink *glink)
  374. {
  375. return glink->tx_pipe->avail(glink->tx_pipe);
  376. }
  377. static void qcom_glink_tx_write(struct qcom_glink *glink,
  378. const void *hdr, size_t hlen,
  379. const void *data, size_t dlen)
  380. {
  381. glink->tx_pipe->write(glink->tx_pipe, hdr, hlen, data, dlen);
  382. }
  383. static void qcom_glink_pipe_reset(struct qcom_glink *glink)
  384. {
  385. if (glink->tx_pipe->reset)
  386. glink->tx_pipe->reset(glink->tx_pipe);
  387. if (glink->rx_pipe->reset)
  388. glink->rx_pipe->reset(glink->rx_pipe);
  389. }
  390. static void qcom_glink_send_read_notify(struct qcom_glink *glink)
  391. {
  392. struct glink_msg msg;
  393. msg.cmd = cpu_to_le16(RPM_CMD_READ_NOTIF);
  394. msg.param1 = 0;
  395. msg.param2 = 0;
  396. GLINK_INFO(glink->ilc, "send READ NOTIFY cmd\n");
  397. qcom_glink_tx_write(glink, &msg, sizeof(msg), NULL, 0);
  398. mbox_send_message(glink->mbox_chan, NULL);
  399. mbox_client_txdone(glink->mbox_chan, 0);
  400. }
  401. static int qcom_glink_tx(struct qcom_glink *glink,
  402. const void *hdr, size_t hlen,
  403. const void *data, size_t dlen, bool wait)
  404. {
  405. unsigned int tlen = hlen + dlen;
  406. unsigned long flags;
  407. int ret = 0;
  408. /* Reject packets that are too big */
  409. if (tlen >= glink->tx_pipe->length)
  410. return -EINVAL;
  411. if (atomic_read(&glink->in_reset))
  412. return -ECONNRESET;
  413. spin_lock_irqsave(&glink->tx_lock, flags);
  414. while (qcom_glink_tx_avail(glink) < tlen) {
  415. if (!wait) {
  416. ret = -EAGAIN;
  417. goto out;
  418. }
  419. if (!glink->sent_read_notify) {
  420. glink->sent_read_notify = true;
  421. qcom_glink_send_read_notify(glink);
  422. }
  423. /* Wait without holding the tx_lock */
  424. spin_unlock_irqrestore(&glink->tx_lock, flags);
  425. wait_event_timeout(glink->tx_avail_notify,
  426. qcom_glink_tx_avail(glink) >= tlen ||
  427. atomic_read(&glink->in_reset), 10 * HZ);
  428. spin_lock_irqsave(&glink->tx_lock, flags);
  429. if (atomic_read(&glink->in_reset)) {
  430. ret = -ECONNRESET;
  431. goto out;
  432. }
  433. if (qcom_glink_tx_avail(glink) >= tlen)
  434. glink->sent_read_notify = false;
  435. }
  436. qcom_glink_tx_write(glink, hdr, hlen, data, dlen);
  437. mbox_send_message(glink->mbox_chan, NULL);
  438. mbox_client_txdone(glink->mbox_chan, 0);
  439. out:
  440. spin_unlock_irqrestore(&glink->tx_lock, flags);
  441. return ret;
  442. }
  443. static int qcom_glink_send_version(struct qcom_glink *glink)
  444. {
  445. struct glink_msg msg;
  446. msg.cmd = cpu_to_le16(RPM_CMD_VERSION);
  447. msg.param1 = cpu_to_le16(GLINK_VERSION_1);
  448. msg.param2 = cpu_to_le32(glink->features);
  449. GLINK_INFO(glink->ilc, "vers:%d features:%d\n", msg.param1, msg.param2);
  450. return qcom_glink_tx(glink, &msg, sizeof(msg), NULL, 0, true);
  451. }
  452. static void qcom_glink_send_version_ack(struct qcom_glink *glink)
  453. {
  454. struct glink_msg msg;
  455. msg.cmd = cpu_to_le16(RPM_CMD_VERSION_ACK);
  456. msg.param1 = cpu_to_le16(GLINK_VERSION_1);
  457. msg.param2 = cpu_to_le32(glink->features);
  458. GLINK_INFO(glink->ilc, "vers:%d features:%d\n", msg.param1, msg.param2);
  459. qcom_glink_tx(glink, &msg, sizeof(msg), NULL, 0, true);
  460. }
  461. static void qcom_glink_send_open_ack(struct qcom_glink *glink,
  462. struct glink_channel *channel)
  463. {
  464. struct glink_msg msg;
  465. msg.cmd = cpu_to_le16(RPM_CMD_OPEN_ACK);
  466. msg.param1 = cpu_to_le16(channel->rcid);
  467. msg.param2 = cpu_to_le32(0);
  468. CH_INFO(channel, "\n");
  469. qcom_glink_tx(glink, &msg, sizeof(msg), NULL, 0, true);
  470. }
  471. static void qcom_glink_handle_intent_req_ack(struct qcom_glink *glink,
  472. unsigned int cid, bool granted)
  473. {
  474. struct glink_channel *channel;
  475. channel = qcom_glink_channel_ref_get(glink, true, cid);
  476. if (!channel) {
  477. dev_err(glink->dev, "unable to find channel\n");
  478. return;
  479. }
  480. channel->intent_req_result = granted;
  481. channel->intent_timeout_count = 0;
  482. atomic_inc(&channel->intent_req_acked);
  483. wake_up(&channel->intent_req_ack);
  484. CH_INFO(channel, "\n");
  485. qcom_glink_channel_ref_put(channel);
  486. }
  487. /**
  488. * qcom_glink_send_open_req() - send a RPM_CMD_OPEN request to the remote
  489. * @glink: Ptr to the glink edge
  490. * @channel: Ptr to the channel that the open req is sent
  491. *
  492. * Allocates a local channel id and sends a RPM_CMD_OPEN message to the remote.
  493. * Will return with refcount held, regardless of outcome.
  494. *
  495. * Return: 0 on success, negative errno otherwise.
  496. */
  497. static int qcom_glink_send_open_req(struct qcom_glink *glink,
  498. struct glink_channel *channel)
  499. {
  500. struct {
  501. struct glink_msg msg;
  502. u8 name[GLINK_NAME_SIZE];
  503. } __packed req;
  504. int name_len = strlen(channel->name) + 1;
  505. int req_len = ALIGN(sizeof(req.msg) + name_len, 8);
  506. int ret;
  507. unsigned long flags;
  508. kref_get(&channel->refcount);
  509. spin_lock_irqsave(&glink->idr_lock, flags);
  510. ret = idr_alloc_cyclic(&glink->lcids, channel,
  511. RPM_GLINK_CID_MIN, RPM_GLINK_CID_MAX,
  512. GFP_ATOMIC);
  513. spin_unlock_irqrestore(&glink->idr_lock, flags);
  514. if (ret < 0)
  515. return ret;
  516. channel->lcid = ret;
  517. CH_INFO(channel, "\n");
  518. req.msg.cmd = cpu_to_le16(RPM_CMD_OPEN);
  519. req.msg.param1 = cpu_to_le16(channel->lcid);
  520. req.msg.param2 = cpu_to_le32(name_len);
  521. strcpy(req.name, channel->name);
  522. ret = qcom_glink_tx(glink, &req, req_len, NULL, 0, true);
  523. if (ret)
  524. goto remove_idr;
  525. return 0;
  526. remove_idr:
  527. CH_INFO(channel, "remote_idr\n");
  528. spin_lock_irqsave(&glink->idr_lock, flags);
  529. idr_remove(&glink->lcids, channel->lcid);
  530. channel->lcid = 0;
  531. spin_unlock_irqrestore(&glink->idr_lock, flags);
  532. return ret;
  533. }
  534. static void qcom_glink_send_close_req(struct qcom_glink *glink,
  535. struct glink_channel *channel)
  536. {
  537. struct glink_msg req;
  538. req.cmd = cpu_to_le16(RPM_CMD_CLOSE);
  539. req.param1 = cpu_to_le16(channel->lcid);
  540. req.param2 = 0;
  541. CH_INFO(channel, "\n");
  542. qcom_glink_tx(glink, &req, sizeof(req), NULL, 0, true);
  543. }
  544. static void qcom_glink_send_close_ack(struct qcom_glink *glink,
  545. unsigned int rcid)
  546. {
  547. struct glink_msg req;
  548. req.cmd = cpu_to_le16(RPM_CMD_CLOSE_ACK);
  549. req.param1 = cpu_to_le16(rcid);
  550. req.param2 = 0;
  551. GLINK_INFO(glink->ilc, "rcid:%d\n", rcid);
  552. qcom_glink_tx(glink, &req, sizeof(req), NULL, 0, true);
  553. }
  554. static int qcom_glink_send_rx_done(struct qcom_glink *glink,
  555. struct glink_channel *channel,
  556. struct glink_core_rx_intent *intent,
  557. bool wait)
  558. {
  559. struct {
  560. u16 id;
  561. u16 lcid;
  562. u32 liid;
  563. } __packed cmd;
  564. unsigned int cid = channel->lcid;
  565. unsigned int iid = intent->id;
  566. bool reuse = intent->reuse;
  567. int ret;
  568. cmd.id = reuse ? RPM_CMD_RX_DONE_W_REUSE : RPM_CMD_RX_DONE;
  569. cmd.lcid = cid;
  570. cmd.liid = iid;
  571. ret = qcom_glink_tx(glink, &cmd, sizeof(cmd), NULL, 0, wait);
  572. if (ret)
  573. return ret;
  574. /* clear data if zero copy intent */
  575. if (!intent->size)
  576. intent->data = NULL;
  577. ret = intent->offset;
  578. if (!reuse) {
  579. kfree(intent->data);
  580. kfree(intent);
  581. }
  582. CH_INFO(channel, "reuse:%d liid:%d data_size:%d", reuse, iid, ret);
  583. return 0;
  584. }
  585. static void qcom_glink_rx_done_work(struct kthread_work *work)
  586. {
  587. struct glink_channel *channel = container_of(work, struct glink_channel,
  588. intent_work);
  589. struct qcom_glink *glink = channel->glink;
  590. struct glink_core_rx_intent *intent, *tmp;
  591. unsigned long flags;
  592. spin_lock_irqsave(&channel->intent_lock, flags);
  593. list_for_each_entry_safe(intent, tmp, &channel->done_intents, node) {
  594. list_del(&intent->node);
  595. spin_unlock_irqrestore(&channel->intent_lock, flags);
  596. qcom_glink_send_rx_done(glink, channel, intent, true);
  597. spin_lock_irqsave(&channel->intent_lock, flags);
  598. }
  599. spin_unlock_irqrestore(&channel->intent_lock, flags);
  600. }
  601. static void __qcom_glink_rx_done(struct qcom_glink *glink,
  602. struct glink_channel *channel,
  603. struct glink_core_rx_intent *intent)
  604. {
  605. unsigned long flags;
  606. int ret = -EAGAIN;
  607. /* We don't send RX_DONE to intentless systems */
  608. if (glink->intentless) {
  609. kfree(intent->data);
  610. kfree(intent);
  611. return;
  612. }
  613. /* Take it off the tree of receive intents */
  614. if (!intent->reuse) {
  615. spin_lock_irqsave(&channel->intent_lock, flags);
  616. idr_remove(&channel->liids, intent->id);
  617. spin_unlock_irqrestore(&channel->intent_lock, flags);
  618. }
  619. spin_lock_irqsave(&channel->intent_lock, flags);
  620. /* Remove intent from intent defer list */
  621. list_del(&intent->node);
  622. /* Schedule the sending of a rx_done indication */
  623. if (list_empty(&channel->done_intents))
  624. ret = qcom_glink_send_rx_done(glink, channel, intent, false);
  625. if (ret) {
  626. list_add_tail(&intent->node, &channel->done_intents);
  627. kthread_queue_work(&glink->kworker, &channel->intent_work);
  628. }
  629. spin_unlock_irqrestore(&channel->intent_lock, flags);
  630. }
  631. bool qcom_glink_rx_done_supported(struct rpmsg_endpoint *ept)
  632. {
  633. struct glink_channel *channel;
  634. struct qcom_glink *glink;
  635. if (WARN_ON(!ept))
  636. return -EINVAL;
  637. channel = to_glink_channel(ept);
  638. glink = channel->glink;
  639. return glink->features & GLINK_FEATURE_ZERO_COPY;
  640. }
  641. EXPORT_SYMBOL(qcom_glink_rx_done_supported);
  642. /**
  643. * rpmsg_rx_done() - release resources related to @data from a @rx_cb
  644. * @ept: the rpmsg endpoint
  645. * @data: payload from a message
  646. *
  647. * Returns 0 on success and an appropriate error value on failure.
  648. */
  649. int qcom_glink_rx_done(struct rpmsg_endpoint *ept, void *data)
  650. {
  651. struct glink_core_rx_intent *intent, *tmp;
  652. struct glink_channel *channel;
  653. struct qcom_glink *glink;
  654. unsigned long flags;
  655. if (WARN_ON(!ept))
  656. return -EINVAL;
  657. if (!qcom_glink_rx_done_supported(ept))
  658. return -EINVAL;
  659. channel = to_glink_channel(ept);
  660. glink = channel->glink;
  661. spin_lock_irqsave(&channel->intent_lock, flags);
  662. list_for_each_entry_safe(intent, tmp, &channel->defer_intents, node) {
  663. if (intent->data == data) {
  664. list_del(&intent->node);
  665. if (!intent->reuse)
  666. idr_remove(&channel->liids, intent->id);
  667. spin_unlock_irqrestore(&channel->intent_lock, flags);
  668. if (intent->reuse == false)
  669. idr_remove(&channel->liids, intent->id);
  670. qcom_glink_send_rx_done(glink, channel, intent, true);
  671. return 0;
  672. }
  673. }
  674. spin_unlock_irqrestore(&channel->intent_lock, flags);
  675. return -EINVAL;
  676. }
  677. EXPORT_SYMBOL(qcom_glink_rx_done);
  678. /**
  679. * qcom_glink_receive_version() - receive version/features from remote system
  680. *
  681. * @glink: pointer to transport interface
  682. * @version: remote version
  683. * @features: remote features
  684. *
  685. * This function is called in response to a remote-initiated version/feature
  686. * negotiation sequence.
  687. */
  688. static void qcom_glink_receive_version(struct qcom_glink *glink,
  689. u32 version,
  690. u32 features)
  691. {
  692. GLINK_INFO(glink->ilc, "vers:%d features:%d\n", version, features);
  693. switch (version) {
  694. case 0:
  695. break;
  696. case GLINK_VERSION_1:
  697. glink->features &= features;
  698. fallthrough;
  699. default:
  700. qcom_glink_send_version_ack(glink);
  701. break;
  702. }
  703. }
  704. /**
  705. * qcom_glink_receive_version_ack() - receive negotiation ack from remote system
  706. *
  707. * @glink: pointer to transport interface
  708. * @version: remote version response
  709. * @features: remote features response
  710. *
  711. * This function is called in response to a local-initiated version/feature
  712. * negotiation sequence and is the counter-offer from the remote side based
  713. * upon the initial version and feature set requested.
  714. */
  715. static void qcom_glink_receive_version_ack(struct qcom_glink *glink,
  716. u32 version,
  717. u32 features)
  718. {
  719. GLINK_INFO(glink->ilc, "vers:%d features:%d\n", version, features);
  720. switch (version) {
  721. case 0:
  722. /* Version negotiation failed */
  723. break;
  724. case GLINK_VERSION_1:
  725. if (features == glink->features)
  726. break;
  727. glink->features &= features;
  728. fallthrough;
  729. default:
  730. qcom_glink_send_version(glink);
  731. break;
  732. }
  733. }
  734. /**
  735. * qcom_glink_send_intent_req_ack() - convert an rx intent request ack cmd to
  736. * wire format and transmit
  737. * @glink: The transport to transmit on.
  738. * @channel: The glink channel
  739. * @granted: The request response to encode.
  740. *
  741. * Return: 0 on success or standard Linux error code.
  742. */
  743. static int qcom_glink_send_intent_req_ack(struct qcom_glink *glink,
  744. struct glink_channel *channel,
  745. bool granted)
  746. {
  747. struct glink_msg msg;
  748. msg.cmd = cpu_to_le16(RPM_CMD_RX_INTENT_REQ_ACK);
  749. msg.param1 = cpu_to_le16(channel->lcid);
  750. msg.param2 = cpu_to_le32(granted);
  751. CH_INFO(channel, "\n");
  752. qcom_glink_tx(glink, &msg, sizeof(msg), NULL, 0, true);
  753. return 0;
  754. }
  755. /**
  756. * qcom_glink_advertise_intent - convert an rx intent cmd to wire format and
  757. * transmit
  758. * @glink: The transport to transmit on.
  759. * @channel: The local channel
  760. * @intent: The intent to pass on to remote.
  761. *
  762. * Return: 0 on success or standard Linux error code.
  763. */
  764. static int qcom_glink_advertise_intent(struct qcom_glink *glink,
  765. struct glink_channel *channel,
  766. struct glink_core_rx_intent *intent)
  767. {
  768. struct command {
  769. __le16 id;
  770. __le16 lcid;
  771. __le32 count;
  772. __le32 size;
  773. __le32 liid;
  774. } __packed;
  775. struct command cmd;
  776. unsigned long flags;
  777. spin_lock_irqsave(&channel->intent_lock, flags);
  778. if (intent->advertised) {
  779. spin_unlock_irqrestore(&channel->intent_lock, flags);
  780. return 0;
  781. }
  782. intent->advertised = true;
  783. spin_unlock_irqrestore(&channel->intent_lock, flags);
  784. cmd.id = cpu_to_le16(RPM_CMD_INTENT);
  785. cmd.lcid = cpu_to_le16(channel->lcid);
  786. cmd.count = cpu_to_le32(1);
  787. cmd.size = cpu_to_le32(intent->size);
  788. cmd.liid = cpu_to_le32(intent->id);
  789. CH_INFO(channel, "count:%d size:%zd liid:%d\n", 1,
  790. intent->size, intent->id);
  791. qcom_glink_tx(glink, &cmd, sizeof(cmd), NULL, 0, true);
  792. return 0;
  793. }
  794. static struct glink_core_rx_intent *
  795. qcom_glink_alloc_intent(struct qcom_glink *glink,
  796. struct glink_channel *channel,
  797. size_t size,
  798. bool reuseable)
  799. {
  800. struct glink_core_rx_intent *intent;
  801. int ret;
  802. unsigned long flags;
  803. intent = kzalloc(sizeof(*intent), GFP_KERNEL);
  804. if (!intent)
  805. return NULL;
  806. if (size) {
  807. intent->data = kzalloc(size, GFP_KERNEL);
  808. if (!intent->data)
  809. goto free_intent;
  810. }
  811. spin_lock_irqsave(&channel->intent_lock, flags);
  812. ret = idr_alloc_cyclic(&channel->liids, intent, 1, -1, GFP_ATOMIC);
  813. if (ret < 0) {
  814. spin_unlock_irqrestore(&channel->intent_lock, flags);
  815. goto free_data;
  816. }
  817. spin_unlock_irqrestore(&channel->intent_lock, flags);
  818. intent->id = ret;
  819. intent->size = size;
  820. intent->reuse = reuseable;
  821. return intent;
  822. free_data:
  823. kfree(intent->data);
  824. free_intent:
  825. kfree(intent);
  826. return NULL;
  827. }
  828. static void qcom_glink_handle_rx_done(struct qcom_glink *glink,
  829. u32 cid, uint32_t iid,
  830. bool reuse)
  831. {
  832. struct glink_core_rx_intent *intent;
  833. struct glink_channel *channel;
  834. unsigned long flags;
  835. channel = qcom_glink_channel_ref_get(glink, true, cid);
  836. if (!channel) {
  837. dev_err(glink->dev, "invalid channel id received\n");
  838. return;
  839. }
  840. spin_lock_irqsave(&channel->intent_lock, flags);
  841. intent = idr_find(&channel->riids, iid);
  842. if (!intent) {
  843. spin_unlock_irqrestore(&channel->intent_lock, flags);
  844. dev_err(glink->dev, "invalid intent id received\n");
  845. qcom_glink_channel_ref_put(channel);
  846. return;
  847. }
  848. intent->in_use = false;
  849. CH_INFO(channel, "reuse:%d iid:%d\n", reuse, intent->id);
  850. if (!reuse) {
  851. idr_remove(&channel->riids, intent->id);
  852. kfree(intent);
  853. }
  854. spin_unlock_irqrestore(&channel->intent_lock, flags);
  855. qcom_glink_channel_ref_put(channel);
  856. }
  857. /**
  858. * qcom_glink_handle_intent_req() - Receive a request for rx_intent
  859. * from remote side
  860. * @glink: Pointer to the transport interface
  861. * @cid: Remote channel ID
  862. * @size: size of the intent
  863. *
  864. * The function searches for the local channel to which the request for
  865. * rx_intent has arrived and allocates and notifies the remote back
  866. */
  867. static void qcom_glink_handle_intent_req(struct qcom_glink *glink,
  868. u32 cid, size_t size)
  869. {
  870. struct glink_core_rx_intent *intent = NULL;
  871. struct glink_core_rx_intent *tmp;
  872. struct glink_channel *channel;
  873. struct rpmsg_endpoint *ept;
  874. unsigned long flags;
  875. int iid;
  876. channel = qcom_glink_channel_ref_get(glink, true, cid);
  877. if (!channel) {
  878. pr_err("%s channel not found for cid %u\n", __func__, cid);
  879. return;
  880. }
  881. spin_lock_irqsave(&channel->intent_lock, flags);
  882. idr_for_each_entry(&channel->liids, tmp, iid) {
  883. if (tmp->size >= size && tmp->reuse) {
  884. intent = tmp;
  885. break;
  886. }
  887. }
  888. spin_unlock_irqrestore(&channel->intent_lock, flags);
  889. if (intent) {
  890. qcom_glink_send_intent_req_ack(glink, channel, !!intent);
  891. qcom_glink_channel_ref_put(channel);
  892. return;
  893. }
  894. ept = &channel->ept;
  895. intent = qcom_glink_alloc_intent(glink, channel, size, false);
  896. if (intent && channel->channel_ready)
  897. qcom_glink_advertise_intent(glink, channel, intent);
  898. qcom_glink_send_intent_req_ack(glink, channel, !!intent);
  899. qcom_glink_channel_ref_put(channel);
  900. }
  901. static int qcom_glink_rx_defer(struct qcom_glink *glink, size_t extra)
  902. {
  903. struct glink_defer_cmd *dcmd;
  904. unsigned long flags;
  905. extra = ALIGN(extra, 8);
  906. if (qcom_glink_rx_avail(glink) < sizeof(struct glink_msg) + extra) {
  907. dev_dbg(glink->dev, "Insufficient data in rx fifo");
  908. return -ENXIO;
  909. }
  910. dcmd = kzalloc(struct_size(dcmd, data, extra), GFP_ATOMIC);
  911. if (!dcmd)
  912. return -ENOMEM;
  913. INIT_LIST_HEAD(&dcmd->node);
  914. qcom_glink_rx_peak(glink, &dcmd->msg, 0, sizeof(dcmd->msg) + extra);
  915. spin_lock_irqsave(&glink->rx_lock, flags);
  916. list_add_tail(&dcmd->node, &glink->rx_queue);
  917. spin_unlock_irqrestore(&glink->rx_lock, flags);
  918. queue_work(system_highpri_wq, &glink->rx_work);
  919. qcom_glink_rx_advance(glink, sizeof(dcmd->msg) + extra);
  920. return 0;
  921. }
  922. bool qcom_glink_is_wakeup(bool reset)
  923. {
  924. if (!glink_resume_pkt)
  925. return false;
  926. if (reset)
  927. glink_resume_pkt = false;
  928. return true;
  929. }
  930. EXPORT_SYMBOL(qcom_glink_is_wakeup);
  931. static int qcom_glink_rx_data(struct qcom_glink *glink, size_t avail)
  932. {
  933. struct glink_core_rx_intent *intent;
  934. struct glink_channel *channel = NULL;
  935. struct {
  936. struct glink_msg msg;
  937. __le32 chunk_size;
  938. __le32 left_size;
  939. } __packed hdr;
  940. unsigned int chunk_size;
  941. unsigned int left_size;
  942. unsigned int rcid;
  943. unsigned int liid;
  944. int ret = 0;
  945. unsigned long flags;
  946. if (avail < sizeof(hdr)) {
  947. dev_dbg(glink->dev, "Not enough data in fifo\n");
  948. return -EAGAIN;
  949. }
  950. qcom_glink_rx_peak(glink, &hdr, 0, sizeof(hdr));
  951. chunk_size = le32_to_cpu(hdr.chunk_size);
  952. left_size = le32_to_cpu(hdr.left_size);
  953. if (avail < sizeof(hdr) + chunk_size) {
  954. dev_dbg(glink->dev, "Payload not yet in fifo\n");
  955. return -EAGAIN;
  956. }
  957. rcid = le16_to_cpu(hdr.msg.param1);
  958. channel = qcom_glink_channel_ref_get(glink, true, rcid);
  959. if (!channel) {
  960. dev_dbg(glink->dev, "Data on non-existing channel\n");
  961. /* Drop the message */
  962. goto advance_rx;
  963. }
  964. CH_INFO(channel, "chunk_size:%d left_size:%d\n", chunk_size, left_size);
  965. if (glink->intentless) {
  966. /* Might have an ongoing, fragmented, message to append */
  967. if (!channel->buf) {
  968. intent = kzalloc(sizeof(*intent), GFP_ATOMIC);
  969. if (!intent) {
  970. qcom_glink_channel_ref_put(channel);
  971. return -ENOMEM;
  972. }
  973. intent->data = kmalloc(chunk_size + left_size,
  974. GFP_ATOMIC);
  975. if (!intent->data) {
  976. kfree(intent);
  977. qcom_glink_channel_ref_put(channel);
  978. return -ENOMEM;
  979. }
  980. intent->id = 0xbabababa;
  981. intent->size = chunk_size + left_size;
  982. intent->offset = 0;
  983. channel->buf = intent;
  984. } else {
  985. intent = channel->buf;
  986. }
  987. } else {
  988. liid = le32_to_cpu(hdr.msg.param2);
  989. spin_lock_irqsave(&channel->intent_lock, flags);
  990. intent = idr_find(&channel->liids, liid);
  991. spin_unlock_irqrestore(&channel->intent_lock, flags);
  992. if (!intent) {
  993. dev_err(glink->dev,
  994. "no intent found for channel %s intent %d",
  995. channel->name, liid);
  996. ret = -ENOENT;
  997. goto advance_rx;
  998. }
  999. }
  1000. if (intent->size - intent->offset < chunk_size) {
  1001. dev_err(glink->dev, "Insufficient space in intent\n");
  1002. /* The packet header lied, drop payload */
  1003. goto advance_rx;
  1004. }
  1005. qcom_glink_rx_peak(glink, intent->data + intent->offset,
  1006. sizeof(hdr), chunk_size);
  1007. intent->offset += chunk_size;
  1008. /* Handle message when no fragments remain to be received */
  1009. if (!left_size) {
  1010. if (!glink->intentless) {
  1011. spin_lock_irqsave(&channel->intent_lock, flags);
  1012. list_add_tail(&intent->node, &channel->defer_intents);
  1013. spin_unlock_irqrestore(&channel->intent_lock, flags);
  1014. }
  1015. spin_lock_irqsave(&channel->recv_lock, flags);
  1016. if (channel->ept.cb) {
  1017. ret = channel->ept.cb(channel->ept.rpdev,
  1018. intent->data,
  1019. intent->offset,
  1020. channel->ept.priv,
  1021. RPMSG_ADDR_ANY);
  1022. if (ret < 0) {
  1023. if (ret != -ENODEV) {
  1024. CH_ERR(channel,
  1025. "callback error ret = %d\n", ret);
  1026. }
  1027. }
  1028. } else {
  1029. CH_ERR(channel, "callback not present\n");
  1030. }
  1031. spin_unlock_irqrestore(&channel->recv_lock, flags);
  1032. if (qcom_glink_is_wakeup(true)) {
  1033. pr_info("%s[%d:%d] %s: wakeup packet size:%d\n",
  1034. channel->name, channel->lcid, channel->rcid,
  1035. __func__, intent->offset);
  1036. }
  1037. intent->offset = 0;
  1038. channel->buf = NULL;
  1039. if (!(qcom_glink_rx_done_supported(&channel->ept) && ret == RPMSG_DEFER))
  1040. __qcom_glink_rx_done(glink, channel, intent);
  1041. ret = 0;
  1042. }
  1043. advance_rx:
  1044. qcom_glink_rx_advance(glink, ALIGN(sizeof(hdr) + chunk_size, 8));
  1045. qcom_glink_channel_ref_put(channel);
  1046. return ret;
  1047. }
  1048. static int qcom_glink_rx_data_zero_copy(struct qcom_glink *glink, size_t avail)
  1049. {
  1050. struct glink_core_rx_intent *intent;
  1051. struct glink_channel *channel = NULL;
  1052. struct {
  1053. struct glink_msg msg;
  1054. __le32 pool_id;
  1055. __le32 size;
  1056. __le64 addr;
  1057. } __packed hdr;
  1058. unsigned long flags;
  1059. unsigned int rcid;
  1060. unsigned int liid;
  1061. unsigned int len;
  1062. int ret = 0;
  1063. void *data;
  1064. u64 da;
  1065. if (avail < sizeof(hdr)) {
  1066. dev_dbg(glink->dev, "Not enough data in fifo\n");
  1067. return -EAGAIN;
  1068. }
  1069. qcom_glink_rx_peak(glink, &hdr, 0, sizeof(hdr));
  1070. if (glink->intentless) {
  1071. dev_dbg(glink->dev, "Zero copy cannot be intentless\n");
  1072. goto advance_rx;
  1073. }
  1074. rcid = le16_to_cpu(hdr.msg.param1);
  1075. channel = qcom_glink_channel_ref_get(glink, true, rcid);
  1076. if (!channel) {
  1077. dev_dbg(glink->dev, "Data on non-existing channel\n");
  1078. goto advance_rx;
  1079. }
  1080. liid = le32_to_cpu(hdr.msg.param2);
  1081. spin_lock_irqsave(&channel->intent_lock, flags);
  1082. intent = idr_find(&channel->liids, liid);
  1083. spin_unlock_irqrestore(&channel->intent_lock, flags);
  1084. if (!intent) {
  1085. CH_ERR(channel, "no intent found liid:%d\n", liid);
  1086. ret = -ENOENT;
  1087. goto advance_rx;
  1088. }
  1089. if (intent->size) {
  1090. CH_ERR(channel, "zero copy req wrong intent liid:%d size:%d\n",
  1091. liid, intent->size);
  1092. goto advance_rx;
  1093. }
  1094. /* Only process the first vector in the array */
  1095. da = le64_to_cpu(hdr.addr);
  1096. len = le32_to_cpu(hdr.size);
  1097. data = qcom_glink_prepare_da_for_cpu(da, len);
  1098. if (!data) {
  1099. CH_ERR(channel, "failed to get va da:0x%llx len:%d\n", da, len);
  1100. goto advance_rx;
  1101. }
  1102. CH_INFO(channel, "da:0x%llx va:0x%llx len:%d\n", da, data, len);
  1103. intent->data = data;
  1104. intent->offset = len;
  1105. spin_lock_irqsave(&channel->intent_lock, flags);
  1106. list_add_tail(&intent->node, &channel->defer_intents);
  1107. spin_unlock_irqrestore(&channel->intent_lock, flags);
  1108. spin_lock_irqsave(&channel->recv_lock, flags);
  1109. if (channel->ept.cb) {
  1110. ret = channel->ept.cb(channel->ept.rpdev, intent->data,
  1111. intent->offset,
  1112. channel->ept.priv,
  1113. RPMSG_ADDR_ANY);
  1114. if (ret < 0 && ret != -ENODEV) {
  1115. CH_ERR(channel,
  1116. "callback error ret = %d\n", ret);
  1117. ret = 0;
  1118. }
  1119. } else {
  1120. CH_ERR(channel, "callback not present\n");
  1121. }
  1122. spin_unlock_irqrestore(&channel->recv_lock, flags);
  1123. if (qcom_glink_is_wakeup(true)) {
  1124. pr_info("%s[%d:%d] %s: wakeup packet size:%d\n", channel->name,
  1125. channel->lcid, channel->rcid,
  1126. __func__, intent->offset);
  1127. }
  1128. intent->offset = 0;
  1129. if (!(qcom_glink_rx_done_supported(&channel->ept) && ret == RPMSG_DEFER))
  1130. __qcom_glink_rx_done(glink, channel, intent);
  1131. advance_rx:
  1132. qcom_glink_rx_advance(glink, ALIGN(sizeof(hdr), 8));
  1133. qcom_glink_channel_ref_put(channel);
  1134. return 0;
  1135. }
  1136. static void qcom_glink_handle_intent(struct qcom_glink *glink,
  1137. unsigned int cid,
  1138. unsigned int count,
  1139. size_t avail)
  1140. {
  1141. struct glink_core_rx_intent *intent;
  1142. struct glink_channel *channel;
  1143. struct intent_pair {
  1144. __le32 size;
  1145. __le32 iid;
  1146. };
  1147. struct {
  1148. struct glink_msg msg;
  1149. struct intent_pair intents[];
  1150. } __packed * msg;
  1151. const size_t msglen = struct_size(msg, intents, count);
  1152. int ret;
  1153. int i;
  1154. unsigned long flags;
  1155. if (avail < msglen) {
  1156. dev_dbg(glink->dev, "Not enough data in fifo\n");
  1157. return;
  1158. }
  1159. channel = qcom_glink_channel_ref_get(glink, true, cid);
  1160. if (!channel) {
  1161. dev_err(glink->dev, "intents for non-existing channel\n");
  1162. qcom_glink_rx_advance(glink, ALIGN(msglen, 8));
  1163. return;
  1164. }
  1165. msg = kmalloc(msglen, GFP_ATOMIC);
  1166. if (!msg) {
  1167. qcom_glink_channel_ref_put(channel);
  1168. return;
  1169. }
  1170. qcom_glink_rx_peak(glink, msg, 0, msglen);
  1171. for (i = 0; i < count; ++i) {
  1172. intent = kzalloc(sizeof(*intent), GFP_ATOMIC);
  1173. if (!intent)
  1174. break;
  1175. intent->id = le32_to_cpu(msg->intents[i].iid);
  1176. intent->size = le32_to_cpu(msg->intents[i].size);
  1177. CH_INFO(channel, "riid:%d size:%zd\n",
  1178. intent->id, intent->size);
  1179. spin_lock_irqsave(&channel->intent_lock, flags);
  1180. ret = idr_alloc(&channel->riids, intent,
  1181. intent->id, intent->id + 1, GFP_ATOMIC);
  1182. spin_unlock_irqrestore(&channel->intent_lock, flags);
  1183. if (ret < 0)
  1184. dev_err(glink->dev, "failed to store remote intent\n");
  1185. }
  1186. atomic_inc(&channel->intent_req_completed);
  1187. wake_up(&channel->intent_req_comp);
  1188. kfree(msg);
  1189. qcom_glink_rx_advance(glink, ALIGN(msglen, 8));
  1190. qcom_glink_channel_ref_put(channel);
  1191. }
  1192. static int qcom_glink_rx_open_ack(struct qcom_glink *glink, unsigned int lcid)
  1193. {
  1194. struct glink_channel *channel;
  1195. channel = qcom_glink_channel_ref_get(glink, false, lcid);
  1196. if (!channel) {
  1197. dev_err(glink->dev, "Invalid open ack packet\n");
  1198. return -EINVAL;
  1199. }
  1200. CH_INFO(channel, "\n");
  1201. complete_all(&channel->open_ack);
  1202. qcom_glink_channel_ref_put(channel);
  1203. return 0;
  1204. }
  1205. /**
  1206. * qcom_glink_send_signals() - convert a signal cmd to wire format and transmit
  1207. * @glink: The transport to transmit on.
  1208. * @channel: The glink channel
  1209. * @signals: The signals to encode.
  1210. *
  1211. * Return: 0 on success or standard Linux error code.
  1212. */
  1213. static int qcom_glink_send_signals(struct qcom_glink *glink,
  1214. struct glink_channel *channel,
  1215. u32 signals)
  1216. {
  1217. struct glink_msg msg;
  1218. /* convert signals from TIOCM to NATIVE */
  1219. signals &= 0x0fff;
  1220. if (signals & TIOCM_DTR)
  1221. signals |= NATIVE_DTR_SIG;
  1222. if (signals & TIOCM_RTS)
  1223. signals |= NATIVE_CTS_SIG;
  1224. if (signals & TIOCM_CD)
  1225. signals |= NATIVE_CD_SIG;
  1226. if (signals & TIOCM_RI)
  1227. signals |= NATIVE_RI_SIG;
  1228. msg.cmd = cpu_to_le16(RPM_CMD_SIGNALS);
  1229. msg.param1 = cpu_to_le16(channel->lcid);
  1230. msg.param2 = cpu_to_le32(signals);
  1231. GLINK_INFO(glink->ilc, "signals:%d\n", signals);
  1232. return qcom_glink_tx(glink, &msg, sizeof(msg), NULL, 0, true);
  1233. }
  1234. static int qcom_glink_handle_signals(struct qcom_glink *glink,
  1235. unsigned int rcid, unsigned int signals)
  1236. {
  1237. struct glink_channel *channel;
  1238. u32 old;
  1239. channel = qcom_glink_channel_ref_get(glink, true, rcid);
  1240. if (!channel) {
  1241. dev_err(glink->dev, "signal for non-existing channel\n");
  1242. return -EINVAL;
  1243. }
  1244. old = channel->remote_signals;
  1245. /* convert signals from NATIVE to TIOCM */
  1246. if (signals & NATIVE_DTR_SIG)
  1247. signals |= TIOCM_DSR;
  1248. if (signals & NATIVE_CTS_SIG)
  1249. signals |= TIOCM_CTS;
  1250. if (signals & NATIVE_CD_SIG)
  1251. signals |= TIOCM_CD;
  1252. if (signals & NATIVE_RI_SIG)
  1253. signals |= TIOCM_RI;
  1254. signals &= 0x0fff;
  1255. channel->remote_signals = signals;
  1256. CH_INFO(channel, "old:%d new:%d\n", old, channel->remote_signals);
  1257. if (channel->signals_cb)
  1258. channel->signals_cb(channel->ept.rpdev, channel->ept.priv,
  1259. old, channel->remote_signals);
  1260. qcom_glink_channel_ref_put(channel);
  1261. return 0;
  1262. }
  1263. static int qcom_glink_native_rx(struct qcom_glink *glink, int iterations)
  1264. {
  1265. struct glink_msg msg;
  1266. unsigned long flags;
  1267. unsigned int param1;
  1268. unsigned int param2;
  1269. unsigned int avail;
  1270. unsigned int cmd;
  1271. int ret = 0;
  1272. int i;
  1273. if (should_wake) {
  1274. pr_info("%s: wakeup %s\n", __func__, glink->irqname);
  1275. glink_resume_pkt = true;
  1276. should_wake = false;
  1277. pm_system_wakeup();
  1278. }
  1279. spin_lock_irqsave(&glink->irq_lock, flags);
  1280. if (glink->irq_running) {
  1281. spin_unlock_irqrestore(&glink->irq_lock, flags);
  1282. return 0;
  1283. }
  1284. glink->irq_running = true;
  1285. spin_unlock_irqrestore(&glink->irq_lock, flags);
  1286. /* To wakeup any blocking writers */
  1287. wake_up_all(&glink->tx_avail_notify);
  1288. for (i = 0; i < iterations || !iterations; i++) {
  1289. avail = qcom_glink_rx_avail(glink);
  1290. if (avail < sizeof(msg))
  1291. break;
  1292. qcom_glink_rx_peak(glink, &msg, 0, sizeof(msg));
  1293. cmd = le16_to_cpu(msg.cmd);
  1294. param1 = le16_to_cpu(msg.param1);
  1295. param2 = le32_to_cpu(msg.param2);
  1296. switch (cmd) {
  1297. case RPM_CMD_VERSION:
  1298. case RPM_CMD_VERSION_ACK:
  1299. case RPM_CMD_CLOSE:
  1300. case RPM_CMD_CLOSE_ACK:
  1301. case RPM_CMD_RX_INTENT_REQ:
  1302. ret = qcom_glink_rx_defer(glink, 0);
  1303. break;
  1304. case RPM_CMD_OPEN_ACK:
  1305. ret = qcom_glink_rx_open_ack(glink, param1);
  1306. qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8));
  1307. break;
  1308. case RPM_CMD_OPEN:
  1309. ret = qcom_glink_rx_defer(glink, param2);
  1310. break;
  1311. case RPM_CMD_TX_DATA:
  1312. case RPM_CMD_TX_DATA_CONT:
  1313. ret = qcom_glink_rx_data(glink, avail);
  1314. break;
  1315. case RPM_CMD_TX_DATA_ZERO_COPY:
  1316. ret = qcom_glink_rx_data_zero_copy(glink, avail);
  1317. break;
  1318. case RPM_CMD_READ_NOTIF:
  1319. qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8));
  1320. mbox_send_message(glink->mbox_chan, NULL);
  1321. mbox_client_txdone(glink->mbox_chan, 0);
  1322. break;
  1323. case RPM_CMD_INTENT:
  1324. qcom_glink_handle_intent(glink, param1, param2, avail);
  1325. break;
  1326. case RPM_CMD_RX_DONE:
  1327. qcom_glink_handle_rx_done(glink, param1, param2, false);
  1328. qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8));
  1329. break;
  1330. case RPM_CMD_RX_DONE_W_REUSE:
  1331. qcom_glink_handle_rx_done(glink, param1, param2, true);
  1332. qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8));
  1333. break;
  1334. case RPM_CMD_RX_INTENT_REQ_ACK:
  1335. qcom_glink_handle_intent_req_ack(glink, param1, param2);
  1336. qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8));
  1337. break;
  1338. case RPM_CMD_SIGNALS:
  1339. qcom_glink_handle_signals(glink, param1, param2);
  1340. qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8));
  1341. break;
  1342. default:
  1343. dev_err(glink->dev, "unhandled rx cmd: %d\n", cmd);
  1344. ret = -EINVAL;
  1345. break;
  1346. }
  1347. if (ret)
  1348. break;
  1349. }
  1350. spin_lock_irqsave(&glink->irq_lock, flags);
  1351. glink->irq_running = false;
  1352. spin_unlock_irqrestore(&glink->irq_lock, flags);
  1353. return qcom_glink_rx_avail(glink);
  1354. }
  1355. static irqreturn_t qcom_glink_native_intr(int irq, void *data)
  1356. {
  1357. struct qcom_glink *glink = data;
  1358. int ret;
  1359. ret = qcom_glink_native_rx(glink, 10);
  1360. return (ret) ? IRQ_WAKE_THREAD : IRQ_HANDLED;
  1361. }
  1362. static irqreturn_t qcom_glink_native_thread_intr(int irq, void *data)
  1363. {
  1364. struct qcom_glink *glink = data;
  1365. qcom_glink_native_rx(glink, 0);
  1366. return IRQ_HANDLED;
  1367. }
  1368. /* Locally initiated rpmsg_create_ept */
  1369. static struct glink_channel *qcom_glink_create_local(struct qcom_glink *glink,
  1370. const char *name)
  1371. {
  1372. struct glink_channel *channel;
  1373. int ret;
  1374. unsigned long flags;
  1375. channel = qcom_glink_alloc_channel(glink, name);
  1376. if (IS_ERR(channel))
  1377. return ERR_CAST(channel);
  1378. CH_INFO(channel, "\n");
  1379. ret = qcom_glink_send_open_req(glink, channel);
  1380. if (ret)
  1381. goto release_channel;
  1382. ret = wait_for_completion_timeout(&channel->open_ack, 5 * HZ);
  1383. if (!ret)
  1384. goto err_timeout;
  1385. ret = wait_for_completion_timeout(&channel->open_req, 5 * HZ);
  1386. if (!ret)
  1387. goto err_timeout;
  1388. return channel;
  1389. err_timeout:
  1390. CH_INFO(channel, "err_timeout\n");
  1391. /* qcom_glink_send_open_req() did register the channel in lcids*/
  1392. spin_lock_irqsave(&glink->idr_lock, flags);
  1393. idr_remove(&glink->lcids, channel->lcid);
  1394. spin_unlock_irqrestore(&glink->idr_lock, flags);
  1395. release_channel:
  1396. CH_INFO(channel, "release_channel\n");
  1397. /* Release qcom_glink_send_open_req() reference */
  1398. kref_put(&channel->refcount, qcom_glink_channel_release);
  1399. /* Release qcom_glink_alloc_channel() reference */
  1400. kref_put(&channel->refcount, qcom_glink_channel_release);
  1401. return ERR_PTR(-ETIMEDOUT);
  1402. }
  1403. /* Remote initiated rpmsg_create_ept */
  1404. static int qcom_glink_create_remote(struct qcom_glink *glink,
  1405. struct glink_channel *channel)
  1406. {
  1407. int ret;
  1408. CH_INFO(channel, "\n");
  1409. ret = qcom_glink_send_open_req(glink, channel);
  1410. if (ret)
  1411. goto close_link;
  1412. ret = wait_for_completion_timeout(&channel->open_ack, 5 * HZ);
  1413. if (!ret) {
  1414. ret = -ETIMEDOUT;
  1415. goto close_link;
  1416. }
  1417. return 0;
  1418. close_link:
  1419. CH_INFO(channel, "close_link %d\n", ret);
  1420. /*
  1421. * Send a close request to "undo" our open-ack. The close-ack will
  1422. * release qcom_glink_send_open_req() reference and the last reference
  1423. * will be relesed after receiving remote_close or transport unregister
  1424. * by calling qcom_glink_native_remove().
  1425. */
  1426. qcom_glink_send_close_req(glink, channel);
  1427. return ret;
  1428. }
  1429. static struct rpmsg_endpoint *qcom_glink_create_ept(struct rpmsg_device *rpdev,
  1430. rpmsg_rx_cb_t cb,
  1431. void *priv,
  1432. struct rpmsg_channel_info
  1433. chinfo)
  1434. {
  1435. struct glink_channel *parent = to_glink_channel(rpdev->ept);
  1436. struct glink_channel *channel;
  1437. struct qcom_glink *glink = parent->glink;
  1438. struct rpmsg_endpoint *ept;
  1439. const char *name = chinfo.name;
  1440. int cid;
  1441. int ret;
  1442. unsigned long flags;
  1443. spin_lock_irqsave(&glink->idr_lock, flags);
  1444. idr_for_each_entry(&glink->rcids, channel, cid) {
  1445. if (!strcmp(channel->name, name))
  1446. break;
  1447. }
  1448. spin_unlock_irqrestore(&glink->idr_lock, flags);
  1449. if (!channel) {
  1450. channel = qcom_glink_create_local(glink, name);
  1451. if (IS_ERR(channel))
  1452. return NULL;
  1453. } else {
  1454. ret = qcom_glink_create_remote(glink, channel);
  1455. if (ret)
  1456. return NULL;
  1457. }
  1458. CH_INFO(channel, "Initializing ept\n");
  1459. ept = &channel->ept;
  1460. ept->rpdev = rpdev;
  1461. ept->cb = cb;
  1462. ept->priv = priv;
  1463. ept->ops = &glink_endpoint_ops;
  1464. CH_INFO(channel, "Initialized ept\n");
  1465. return ept;
  1466. }
  1467. static int qcom_glink_announce_create(struct rpmsg_device *rpdev)
  1468. {
  1469. struct glink_channel *channel = to_glink_channel(rpdev->ept);
  1470. struct device_node *np = rpdev->dev.of_node;
  1471. struct qcom_glink *glink = channel->glink;
  1472. struct glink_core_rx_intent *intent;
  1473. struct glink_core_rx_intent *tmp;
  1474. const struct property *prop = NULL;
  1475. __be32 defaults[] = { cpu_to_be32(0), cpu_to_be32(2),
  1476. cpu_to_be32(SZ_1K), cpu_to_be32(5) };
  1477. int num_intents;
  1478. int num_groups = 2;
  1479. __be32 *val = defaults;
  1480. unsigned long flags;
  1481. int iid;
  1482. int size;
  1483. CH_INFO(channel, "Entered\n");
  1484. if (glink->intentless || !completion_done(&channel->open_ack))
  1485. return 0;
  1486. channel->channel_ready = true;
  1487. /*Serve any pending intent request*/
  1488. spin_lock_irqsave(&channel->intent_lock, flags);
  1489. idr_for_each_entry(&channel->liids, tmp, iid) {
  1490. if (!tmp->reuse && !tmp->advertised) {
  1491. intent = tmp;
  1492. spin_unlock_irqrestore(&channel->intent_lock, flags);
  1493. qcom_glink_advertise_intent(glink, channel, intent);
  1494. spin_lock_irqsave(&channel->intent_lock, flags);
  1495. }
  1496. }
  1497. spin_unlock_irqrestore(&channel->intent_lock, flags);
  1498. prop = of_find_property(np, "qcom,intents", NULL);
  1499. if (prop) {
  1500. val = prop->value;
  1501. num_groups = prop->length / sizeof(u32) / 2;
  1502. }
  1503. /* Channel is now open, advertise base set of intents */
  1504. while (num_groups--) {
  1505. size = be32_to_cpup(val++);
  1506. num_intents = be32_to_cpup(val++);
  1507. while (num_intents--) {
  1508. intent = qcom_glink_alloc_intent(glink, channel, size,
  1509. true);
  1510. if (!intent)
  1511. break;
  1512. qcom_glink_advertise_intent(glink, channel, intent);
  1513. }
  1514. }
  1515. CH_INFO(channel, "Exit\n");
  1516. return 0;
  1517. }
  1518. static void qcom_glink_destroy_ept(struct rpmsg_endpoint *ept)
  1519. {
  1520. struct glink_channel *channel = to_glink_channel(ept);
  1521. struct qcom_glink *glink = channel->glink;
  1522. unsigned long flags;
  1523. spin_lock_irqsave(&channel->recv_lock, flags);
  1524. if (!channel->ept.cb) {
  1525. spin_unlock_irqrestore(&channel->recv_lock, flags);
  1526. return;
  1527. }
  1528. channel->ept.cb = NULL;
  1529. spin_unlock_irqrestore(&channel->recv_lock, flags);
  1530. qcom_glink_send_close_req(glink, channel);
  1531. }
  1532. static int qcom_glink_request_intent(struct qcom_glink *glink,
  1533. struct glink_channel *channel,
  1534. size_t size)
  1535. {
  1536. struct {
  1537. u16 id;
  1538. u16 cid;
  1539. u32 size;
  1540. } __packed cmd;
  1541. int ret;
  1542. mutex_lock(&channel->intent_req_lock);
  1543. atomic_set(&channel->intent_req_acked, 0);
  1544. atomic_set(&channel->intent_req_completed, 0);
  1545. cmd.id = RPM_CMD_RX_INTENT_REQ;
  1546. cmd.cid = channel->lcid;
  1547. cmd.size = size;
  1548. CH_INFO(channel, "size:%zd\n", size);
  1549. ret = qcom_glink_tx(glink, &cmd, sizeof(cmd), NULL, 0, true);
  1550. if (ret)
  1551. goto unlock;
  1552. ret = wait_event_timeout(channel->intent_req_ack,
  1553. atomic_read(&channel->intent_req_acked) ||
  1554. atomic_read(&glink->in_reset), 10 * HZ);
  1555. if (!ret) {
  1556. dev_err(glink->dev, "%s: intent request ack timed out (%d)\n",
  1557. channel->name, channel->intent_timeout_count);
  1558. ret = -ETIMEDOUT;
  1559. channel->intent_timeout_count++;
  1560. if (channel->intent_timeout_count >= MAX_INTENT_TIMEOUTS)
  1561. GLINK_BUG(glink->ilc,
  1562. "remoteproc:%s channel:%s unresponsive\n",
  1563. glink->name, channel->name);
  1564. } else if (atomic_read(&glink->in_reset)) {
  1565. CH_INFO(channel, "ssr detected\n");
  1566. ret = -ECONNRESET;
  1567. } else {
  1568. ret = channel->intent_req_result ? 0 : -ECANCELED;
  1569. }
  1570. unlock:
  1571. mutex_unlock(&channel->intent_req_lock);
  1572. return ret;
  1573. }
  1574. static int __qcom_glink_send(struct glink_channel *channel,
  1575. void *data, int len, bool wait)
  1576. {
  1577. struct qcom_glink *glink = channel->glink;
  1578. struct glink_core_rx_intent *intent = NULL;
  1579. struct glink_core_rx_intent *tmp;
  1580. int iid = 0;
  1581. struct {
  1582. struct glink_msg msg;
  1583. __le32 chunk_size;
  1584. __le32 left_size;
  1585. } __packed req;
  1586. int ret;
  1587. unsigned long flags;
  1588. int chunk_size = len;
  1589. int left_size = 0;
  1590. if (!glink->intentless) {
  1591. while (!intent) {
  1592. spin_lock_irqsave(&channel->intent_lock, flags);
  1593. idr_for_each_entry(&channel->riids, tmp, iid) {
  1594. if (tmp->size >= len && !tmp->in_use) {
  1595. if (!intent)
  1596. intent = tmp;
  1597. else if (intent->size > tmp->size)
  1598. intent = tmp;
  1599. if (intent->size == len)
  1600. break;
  1601. }
  1602. }
  1603. if (intent)
  1604. intent->in_use = true;
  1605. spin_unlock_irqrestore(&channel->intent_lock, flags);
  1606. /* We found an available intent */
  1607. if (intent)
  1608. break;
  1609. if (atomic_read(&glink->in_reset))
  1610. return -ECONNRESET;
  1611. if (!wait)
  1612. return -EBUSY;
  1613. ret = qcom_glink_request_intent(glink, channel, len);
  1614. if (ret < 0)
  1615. return ret;
  1616. /*Wait for intents to arrive*/
  1617. ret = wait_event_timeout(channel->intent_req_comp,
  1618. atomic_read(&channel->intent_req_completed) ||
  1619. atomic_read(&glink->in_reset), 10 * HZ);
  1620. if (!ret) {
  1621. dev_err(glink->dev,
  1622. "intent request completion timed out\n");
  1623. ret = -ETIMEDOUT;
  1624. } else if (atomic_read(&glink->in_reset)) {
  1625. CH_INFO(channel, "ssr detected\n");
  1626. ret = -ECONNRESET;
  1627. } else {
  1628. ret = channel->intent_req_result ? 0 : -ECANCELED;
  1629. }
  1630. if (ret < 0)
  1631. return ret;
  1632. }
  1633. iid = intent->id;
  1634. }
  1635. if (wait && chunk_size > SZ_8K) {
  1636. chunk_size = SZ_8K;
  1637. left_size = len - chunk_size;
  1638. }
  1639. req.msg.cmd = cpu_to_le16(RPM_CMD_TX_DATA);
  1640. req.msg.param1 = cpu_to_le16(channel->lcid);
  1641. req.msg.param2 = cpu_to_le32(iid);
  1642. req.chunk_size = cpu_to_le32(chunk_size);
  1643. req.left_size = cpu_to_le32(left_size);
  1644. CH_INFO(channel, "iid:%d chunk_size:%d left_size:%d\n", iid,
  1645. chunk_size, left_size);
  1646. ret = qcom_glink_tx(glink, &req, sizeof(req), data, chunk_size, wait);
  1647. /* Mark intent available if we failed */
  1648. if (ret) {
  1649. if (intent)
  1650. intent->in_use = false;
  1651. return ret;
  1652. }
  1653. while (left_size > 0) {
  1654. data = (void *)((char *)data + chunk_size);
  1655. chunk_size = left_size;
  1656. if (chunk_size > SZ_8K)
  1657. chunk_size = SZ_8K;
  1658. left_size -= chunk_size;
  1659. req.msg.cmd = cpu_to_le16(RPM_CMD_TX_DATA_CONT);
  1660. req.msg.param1 = cpu_to_le16(channel->lcid);
  1661. req.msg.param2 = cpu_to_le32(iid);
  1662. req.chunk_size = cpu_to_le32(chunk_size);
  1663. req.left_size = cpu_to_le32(left_size);
  1664. CH_INFO(channel, "iid:%d chunk_size:%d left_size:%d\n", iid,
  1665. chunk_size, left_size);
  1666. ret = qcom_glink_tx(glink, &req, sizeof(req), data,
  1667. chunk_size, wait);
  1668. /* Mark intent available if we failed */
  1669. if (ret) {
  1670. if (intent)
  1671. intent->in_use = false;
  1672. break;
  1673. }
  1674. }
  1675. return ret;
  1676. }
  1677. static int qcom_glink_send(struct rpmsg_endpoint *ept, void *data, int len)
  1678. {
  1679. struct glink_channel *channel = to_glink_channel(ept);
  1680. return __qcom_glink_send(channel, data, len, true);
  1681. }
  1682. static int qcom_glink_trysend(struct rpmsg_endpoint *ept, void *data, int len)
  1683. {
  1684. struct glink_channel *channel = to_glink_channel(ept);
  1685. return __qcom_glink_send(channel, data, len, false);
  1686. }
  1687. static int qcom_glink_sendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst)
  1688. {
  1689. struct glink_channel *channel = to_glink_channel(ept);
  1690. return __qcom_glink_send(channel, data, len, true);
  1691. }
  1692. static int qcom_glink_trysendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst)
  1693. {
  1694. struct glink_channel *channel = to_glink_channel(ept);
  1695. return __qcom_glink_send(channel, data, len, false);
  1696. }
  1697. int qcom_glink_get_signals(struct rpmsg_endpoint *ept)
  1698. {
  1699. struct glink_channel *channel;
  1700. if (!ept)
  1701. return 0;
  1702. channel = to_glink_channel(ept);
  1703. return channel->remote_signals;
  1704. }
  1705. EXPORT_SYMBOL(qcom_glink_get_signals);
  1706. int qcom_glink_set_signals(struct rpmsg_endpoint *ept, u32 set, u32 clear)
  1707. {
  1708. struct glink_channel *channel;
  1709. struct qcom_glink *glink;
  1710. u32 signals;
  1711. if (!ept)
  1712. return -EINVAL;
  1713. channel = to_glink_channel(ept);
  1714. glink = channel->glink;
  1715. signals = channel->local_signals;
  1716. if (set & TIOCM_DTR)
  1717. signals |= TIOCM_DTR;
  1718. if (set & TIOCM_RTS)
  1719. signals |= TIOCM_RTS;
  1720. if (set & TIOCM_CD)
  1721. signals |= TIOCM_CD;
  1722. if (set & TIOCM_RI)
  1723. signals |= TIOCM_RI;
  1724. if (clear & TIOCM_DTR)
  1725. signals &= ~TIOCM_DTR;
  1726. if (clear & TIOCM_RTS)
  1727. signals &= ~TIOCM_RTS;
  1728. if (clear & TIOCM_CD)
  1729. signals &= ~TIOCM_CD;
  1730. if (clear & TIOCM_RI)
  1731. signals &= ~TIOCM_RI;
  1732. channel->local_signals = signals;
  1733. return qcom_glink_send_signals(glink, channel, signals);
  1734. }
  1735. EXPORT_SYMBOL(qcom_glink_set_signals);
  1736. int qcom_glink_register_signals_cb(struct rpmsg_endpoint *ept,
  1737. int (*cb)(struct rpmsg_device *, void *, u32, u32))
  1738. {
  1739. struct glink_channel *channel;
  1740. if (!ept || !cb)
  1741. return -EINVAL;
  1742. channel = to_glink_channel(ept);
  1743. channel->signals_cb = cb;
  1744. return 0;
  1745. }
  1746. EXPORT_SYMBOL(qcom_glink_register_signals_cb);
  1747. /*
  1748. * Finds the device_node for the glink child interested in this channel.
  1749. */
  1750. static struct device_node *qcom_glink_match_channel(struct device_node *node,
  1751. const char *channel)
  1752. {
  1753. struct device_node *child;
  1754. const char *name;
  1755. const char *key;
  1756. int ret;
  1757. for_each_available_child_of_node(node, child) {
  1758. key = "qcom,glink-channels";
  1759. ret = of_property_read_string(child, key, &name);
  1760. if (ret)
  1761. continue;
  1762. if (strcmp(name, channel) == 0)
  1763. return child;
  1764. }
  1765. return NULL;
  1766. }
  1767. static const struct rpmsg_device_ops glink_device_ops = {
  1768. .create_ept = qcom_glink_create_ept,
  1769. .announce_create = qcom_glink_announce_create,
  1770. };
  1771. static const struct rpmsg_endpoint_ops glink_endpoint_ops = {
  1772. .destroy_ept = qcom_glink_destroy_ept,
  1773. .send = qcom_glink_send,
  1774. .sendto = qcom_glink_sendto,
  1775. .trysend = qcom_glink_trysend,
  1776. .trysendto = qcom_glink_trysendto,
  1777. };
  1778. static void qcom_glink_rpdev_release(struct device *dev)
  1779. {
  1780. struct rpmsg_device *rpdev = to_rpmsg_device(dev);
  1781. kfree(rpdev->driver_override);
  1782. kfree(rpdev);
  1783. }
  1784. static int qcom_glink_rx_open(struct qcom_glink *glink, unsigned int rcid,
  1785. char *name)
  1786. {
  1787. struct glink_channel *channel;
  1788. struct rpmsg_device *rpdev;
  1789. bool create_device = false;
  1790. struct device_node *node;
  1791. int cid;
  1792. int ret;
  1793. unsigned long flags;
  1794. spin_lock_irqsave(&glink->idr_lock, flags);
  1795. idr_for_each_entry(&glink->rcids, channel, cid) {
  1796. if (!strcmp(channel->name, name))
  1797. break;
  1798. }
  1799. spin_unlock_irqrestore(&glink->idr_lock, flags);
  1800. if (!channel) {
  1801. channel = qcom_glink_alloc_channel(glink, name);
  1802. if (IS_ERR(channel))
  1803. return PTR_ERR(channel);
  1804. /* The opening dance was initiated by the remote */
  1805. create_device = true;
  1806. }
  1807. spin_lock_irqsave(&glink->idr_lock, flags);
  1808. ret = idr_alloc(&glink->rcids, channel, rcid, rcid + 1, GFP_ATOMIC);
  1809. if (ret < 0) {
  1810. dev_err(glink->dev, "Unable to insert channel into rcid list\n");
  1811. spin_unlock_irqrestore(&glink->idr_lock, flags);
  1812. goto free_channel;
  1813. }
  1814. channel->rcid = ret;
  1815. spin_unlock_irqrestore(&glink->idr_lock, flags);
  1816. complete_all(&channel->open_req);
  1817. /*
  1818. * Acknowledge the open request to establish the channel
  1819. * before initializing the rpmsg device.
  1820. */
  1821. qcom_glink_send_open_ack(glink, channel);
  1822. if (create_device) {
  1823. rpdev = kzalloc(sizeof(*rpdev), GFP_KERNEL);
  1824. if (!rpdev) {
  1825. ret = -ENOMEM;
  1826. goto rcid_remove;
  1827. }
  1828. rpdev->ept = &channel->ept;
  1829. strscpy_pad(rpdev->id.name, name, RPMSG_NAME_SIZE);
  1830. rpdev->src = RPMSG_ADDR_ANY;
  1831. rpdev->dst = RPMSG_ADDR_ANY;
  1832. rpdev->ops = &glink_device_ops;
  1833. node = qcom_glink_match_channel(glink->dev->of_node, name);
  1834. rpdev->dev.of_node = node;
  1835. rpdev->dev.parent = glink->dev;
  1836. rpdev->dev.release = qcom_glink_rpdev_release;
  1837. ret = rpmsg_register_device(rpdev);
  1838. if (ret)
  1839. goto rcid_remove;
  1840. channel->rpdev = rpdev;
  1841. }
  1842. CH_INFO(channel, "\n");
  1843. return 0;
  1844. rcid_remove:
  1845. CH_INFO(channel, "rcid_remove\n");
  1846. spin_lock_irqsave(&glink->idr_lock, flags);
  1847. idr_remove(&glink->rcids, channel->rcid);
  1848. channel->rcid = 0;
  1849. spin_unlock_irqrestore(&glink->idr_lock, flags);
  1850. free_channel:
  1851. CH_INFO(channel, "free_channel\n");
  1852. /* Release the reference, iff we took it */
  1853. if (create_device)
  1854. kref_put(&channel->refcount, qcom_glink_channel_release);
  1855. return ret;
  1856. }
  1857. static void qcom_glink_rx_close(struct qcom_glink *glink, unsigned int rcid)
  1858. {
  1859. struct rpmsg_channel_info chinfo;
  1860. struct glink_channel *channel;
  1861. unsigned long flags;
  1862. spin_lock_irqsave(&glink->idr_lock, flags);
  1863. channel = idr_find(&glink->rcids, rcid);
  1864. spin_unlock_irqrestore(&glink->idr_lock, flags);
  1865. if (WARN(!channel, "close request on unknown channel\n"))
  1866. return;
  1867. CH_INFO(channel, "\n");
  1868. /* cancel pending rx_done work */
  1869. kthread_cancel_work_sync(&channel->intent_work);
  1870. if (channel->rpdev) {
  1871. strscpy_pad(chinfo.name, channel->name, sizeof(chinfo.name));
  1872. chinfo.src = RPMSG_ADDR_ANY;
  1873. chinfo.dst = RPMSG_ADDR_ANY;
  1874. rpmsg_unregister_device(glink->dev, &chinfo);
  1875. }
  1876. channel->rpdev = NULL;
  1877. qcom_glink_send_close_ack(glink, channel->rcid);
  1878. spin_lock_irqsave(&glink->idr_lock, flags);
  1879. idr_remove(&glink->rcids, channel->rcid);
  1880. channel->rcid = 0;
  1881. spin_unlock_irqrestore(&glink->idr_lock, flags);
  1882. kref_put(&channel->refcount, qcom_glink_channel_release);
  1883. }
  1884. static void qcom_glink_rx_close_ack(struct qcom_glink *glink, unsigned int lcid)
  1885. {
  1886. struct glink_channel *channel;
  1887. unsigned long flags;
  1888. /* To wakeup any blocking writers */
  1889. wake_up_all(&glink->tx_avail_notify);
  1890. spin_lock_irqsave(&glink->idr_lock, flags);
  1891. channel = idr_find(&glink->lcids, lcid);
  1892. if (WARN(!channel, "close ack on unknown channel\n")) {
  1893. spin_unlock_irqrestore(&glink->idr_lock, flags);
  1894. return;
  1895. }
  1896. CH_INFO(channel, "\n");
  1897. idr_remove(&glink->lcids, channel->lcid);
  1898. channel->lcid = 0;
  1899. spin_unlock_irqrestore(&glink->idr_lock, flags);
  1900. /* Reinit any variables that are important to endpoint creation */
  1901. reinit_completion(&channel->open_ack);
  1902. channel->channel_ready = false;
  1903. kref_put(&channel->refcount, qcom_glink_channel_release);
  1904. }
  1905. static void qcom_glink_work(struct work_struct *work)
  1906. {
  1907. struct qcom_glink *glink = container_of(work, struct qcom_glink,
  1908. rx_work);
  1909. struct glink_defer_cmd *dcmd;
  1910. struct glink_msg *msg;
  1911. unsigned long flags;
  1912. unsigned int param1;
  1913. unsigned int param2;
  1914. unsigned int cmd;
  1915. for (;;) {
  1916. spin_lock_irqsave(&glink->rx_lock, flags);
  1917. if (list_empty(&glink->rx_queue)) {
  1918. spin_unlock_irqrestore(&glink->rx_lock, flags);
  1919. break;
  1920. }
  1921. dcmd = list_first_entry(&glink->rx_queue,
  1922. struct glink_defer_cmd, node);
  1923. list_del(&dcmd->node);
  1924. spin_unlock_irqrestore(&glink->rx_lock, flags);
  1925. msg = &dcmd->msg;
  1926. cmd = le16_to_cpu(msg->cmd);
  1927. param1 = le16_to_cpu(msg->param1);
  1928. param2 = le32_to_cpu(msg->param2);
  1929. switch (cmd) {
  1930. case RPM_CMD_VERSION:
  1931. qcom_glink_receive_version(glink, param1, param2);
  1932. break;
  1933. case RPM_CMD_VERSION_ACK:
  1934. qcom_glink_receive_version_ack(glink, param1, param2);
  1935. break;
  1936. case RPM_CMD_OPEN:
  1937. qcom_glink_rx_open(glink, param1, msg->data);
  1938. break;
  1939. case RPM_CMD_CLOSE:
  1940. qcom_glink_rx_close(glink, param1);
  1941. break;
  1942. case RPM_CMD_CLOSE_ACK:
  1943. qcom_glink_rx_close_ack(glink, param1);
  1944. break;
  1945. case RPM_CMD_RX_INTENT_REQ:
  1946. qcom_glink_handle_intent_req(glink, param1, param2);
  1947. break;
  1948. default:
  1949. WARN(1, "Unknown defer object %d\n", cmd);
  1950. break;
  1951. }
  1952. kfree(dcmd);
  1953. }
  1954. }
  1955. void qcom_glink_early_ssr_notify(void *data)
  1956. {
  1957. struct qcom_glink *glink = data;
  1958. struct glink_channel *channel;
  1959. unsigned long flags;
  1960. int cid;
  1961. if (!glink)
  1962. return;
  1963. atomic_inc(&glink->in_reset);
  1964. /* To wakeup any blocking writers */
  1965. wake_up_all(&glink->tx_avail_notify);
  1966. spin_lock_irqsave(&glink->idr_lock, flags);
  1967. idr_for_each_entry(&glink->lcids, channel, cid) {
  1968. wake_up(&channel->intent_req_ack);
  1969. wake_up(&channel->intent_req_comp);
  1970. }
  1971. spin_unlock_irqrestore(&glink->idr_lock, flags);
  1972. }
  1973. EXPORT_SYMBOL(qcom_glink_early_ssr_notify);
  1974. static void qcom_glink_cancel_rx_work(struct qcom_glink *glink)
  1975. {
  1976. struct glink_defer_cmd *dcmd;
  1977. struct glink_defer_cmd *tmp;
  1978. /* cancel any pending deferred rx_work */
  1979. cancel_work_sync(&glink->rx_work);
  1980. list_for_each_entry_safe(dcmd, tmp, &glink->rx_queue, node)
  1981. kfree(dcmd);
  1982. }
  1983. static ssize_t rpmsg_name_show(struct device *dev,
  1984. struct device_attribute *attr, char *buf)
  1985. {
  1986. int ret = 0;
  1987. const char *name;
  1988. ret = of_property_read_string(dev->of_node, "label", &name);
  1989. if (ret < 0)
  1990. name = dev->of_node->name;
  1991. return sysfs_emit(buf, "%s\n", name);
  1992. }
  1993. static DEVICE_ATTR_RO(rpmsg_name);
  1994. static struct attribute *qcom_glink_attrs[] = {
  1995. &dev_attr_rpmsg_name.attr,
  1996. NULL
  1997. };
  1998. ATTRIBUTE_GROUPS(qcom_glink);
  1999. static void qcom_glink_device_release(struct device *dev)
  2000. {
  2001. struct rpmsg_device *rpdev = to_rpmsg_device(dev);
  2002. struct glink_channel *channel = to_glink_channel(rpdev->ept);
  2003. /* Release qcom_glink_alloc_channel() reference */
  2004. kref_put(&channel->refcount, qcom_glink_channel_release);
  2005. kfree(rpdev->driver_override);
  2006. kfree(rpdev);
  2007. }
  2008. static int qcom_glink_create_chrdev(struct qcom_glink *glink)
  2009. {
  2010. struct rpmsg_device *rpdev;
  2011. struct glink_channel *channel;
  2012. rpdev = kzalloc(sizeof(*rpdev), GFP_KERNEL);
  2013. if (!rpdev)
  2014. return -ENOMEM;
  2015. channel = qcom_glink_alloc_channel(glink, "rpmsg_chrdev");
  2016. if (IS_ERR(channel)) {
  2017. kfree(rpdev);
  2018. return PTR_ERR(channel);
  2019. }
  2020. channel->rpdev = rpdev;
  2021. rpdev->ept = &channel->ept;
  2022. rpdev->ops = &glink_device_ops;
  2023. rpdev->dev.parent = glink->dev;
  2024. rpdev->dev.release = qcom_glink_device_release;
  2025. return rpmsg_ctrldev_register_device(rpdev);
  2026. }
  2027. static void qcom_glink_set_affinity(struct qcom_glink *glink, u32 *arr,
  2028. size_t size)
  2029. {
  2030. int i;
  2031. cpumask_clear(&glink->cpu_mask);
  2032. for (i = 0; i < size; i++) {
  2033. if (arr[i] < num_possible_cpus())
  2034. cpumask_set_cpu(arr[i], &glink->cpu_mask);
  2035. }
  2036. if (irq_set_affinity_hint(glink->irq, &glink->cpu_mask))
  2037. dev_err(glink->dev, "failed to set irq affinity\n");
  2038. if (set_cpus_allowed_ptr(glink->task, &glink->cpu_mask))
  2039. dev_err(glink->dev, "failed to set task affinity\n");
  2040. }
  2041. struct qcom_glink *qcom_glink_native_probe(struct device *dev,
  2042. unsigned long features,
  2043. struct qcom_glink_pipe *rx,
  2044. struct qcom_glink_pipe *tx,
  2045. bool intentless)
  2046. {
  2047. int ret;
  2048. struct qcom_glink *glink;
  2049. glink = devm_kzalloc(dev, sizeof(*glink), GFP_KERNEL);
  2050. if (!glink)
  2051. return ERR_PTR(-ENOMEM);
  2052. glink->dev = dev;
  2053. glink->tx_pipe = tx;
  2054. glink->rx_pipe = rx;
  2055. glink->features = features;
  2056. glink->intentless = intentless;
  2057. spin_lock_init(&glink->tx_lock);
  2058. spin_lock_init(&glink->rx_lock);
  2059. INIT_LIST_HEAD(&glink->rx_queue);
  2060. INIT_WORK(&glink->rx_work, qcom_glink_work);
  2061. init_waitqueue_head(&glink->tx_avail_notify);
  2062. spin_lock_init(&glink->idr_lock);
  2063. idr_init(&glink->lcids);
  2064. idr_init(&glink->rcids);
  2065. atomic_set(&glink->in_reset, 0);
  2066. glink->dev->groups = qcom_glink_groups;
  2067. ret = device_add_groups(dev, qcom_glink_groups);
  2068. if (ret)
  2069. dev_err(dev, "failed to add groups\n");
  2070. ret = of_property_read_string(dev->of_node, "label", &glink->name);
  2071. if (ret < 0)
  2072. glink->name = dev->of_node->name;
  2073. glink->mbox_client.dev = dev;
  2074. glink->mbox_client.knows_txdone = true;
  2075. glink->mbox_chan = mbox_request_channel(&glink->mbox_client, 0);
  2076. if (IS_ERR(glink->mbox_chan)) {
  2077. if (PTR_ERR(glink->mbox_chan) != -EPROBE_DEFER)
  2078. dev_err(dev, "failed to acquire IPC channel\n");
  2079. return ERR_CAST(glink->mbox_chan);
  2080. }
  2081. kthread_init_worker(&glink->kworker);
  2082. glink->task = kthread_run(kthread_worker_fn, &glink->kworker,
  2083. "glink_%s", glink->name);
  2084. if (IS_ERR(glink->task)) {
  2085. dev_err(dev, "failed to spawn intent kthread %ld\n",
  2086. PTR_ERR(glink->task));
  2087. return ERR_CAST(glink->task);
  2088. }
  2089. scnprintf(glink->irqname, 32, "glink-native-%s", glink->name);
  2090. glink->ilc = ipc_log_context_create(GLINK_LOG_PAGE_CNT, glink->name, 0);
  2091. return glink;
  2092. }
  2093. EXPORT_SYMBOL(qcom_glink_native_probe);
  2094. int qcom_glink_native_start(struct qcom_glink *glink)
  2095. {
  2096. struct device *dev = glink->dev;
  2097. u32 *arr;
  2098. int size;
  2099. int irq;
  2100. int ret;
  2101. spin_lock_init(&glink->irq_lock);
  2102. glink->irq_running = false;
  2103. irq = of_irq_get(dev->of_node, 0);
  2104. ret = devm_request_threaded_irq(dev, irq,
  2105. qcom_glink_native_intr,
  2106. qcom_glink_native_thread_intr,
  2107. IRQF_NO_SUSPEND | IRQF_ONESHOT,
  2108. glink->irqname, glink);
  2109. if (ret) {
  2110. dev_err(dev, "failed to request IRQ with %d\n", ret);
  2111. return ret;
  2112. }
  2113. glink->irq = irq;
  2114. size = of_property_count_u32_elems(dev->of_node, "cpu-affinity");
  2115. if (size > 0) {
  2116. arr = kmalloc_array(size, sizeof(u32), GFP_KERNEL);
  2117. if (!arr)
  2118. return -ENOMEM;
  2119. ret = of_property_read_u32_array(dev->of_node, "cpu-affinity",
  2120. arr, size);
  2121. if (!ret)
  2122. qcom_glink_set_affinity(glink, arr, size);
  2123. kfree(arr);
  2124. }
  2125. ret = qcom_glink_send_version(glink);
  2126. if (ret) {
  2127. dev_err(glink->dev, "failed to send version: %d\n", ret);
  2128. return ret;
  2129. }
  2130. ret = qcom_glink_create_chrdev(glink);
  2131. if (ret)
  2132. dev_err(glink->dev, "failed to register chrdev\n");
  2133. return 0;
  2134. }
  2135. EXPORT_SYMBOL(qcom_glink_native_start);
  2136. static int qcom_glink_remove_device(struct device *dev, void *data)
  2137. {
  2138. device_unregister(dev);
  2139. return 0;
  2140. }
  2141. void qcom_glink_native_remove(struct qcom_glink *glink)
  2142. {
  2143. struct glink_channel *channel;
  2144. int size;
  2145. int cid;
  2146. int ret;
  2147. qcom_glink_early_ssr_notify(glink);
  2148. disable_irq(glink->irq);
  2149. qcom_glink_cancel_rx_work(glink);
  2150. ret = device_for_each_child(glink->dev, NULL, qcom_glink_remove_device);
  2151. if (ret)
  2152. dev_warn(glink->dev, "Can't remove GLINK devices: %d\n", ret);
  2153. /* Release any defunct local channels, waiting for close-ack */
  2154. idr_for_each_entry(&glink->lcids, channel, cid) {
  2155. kref_put(&channel->refcount, qcom_glink_channel_release);
  2156. idr_remove(&glink->lcids, cid);
  2157. }
  2158. /* Release any defunct local channels, waiting for close-req */
  2159. idr_for_each_entry(&glink->rcids, channel, cid) {
  2160. kref_put(&channel->refcount, qcom_glink_channel_release);
  2161. idr_remove(&glink->rcids, cid);
  2162. }
  2163. /* Release any defunct local channels, waiting for close-req */
  2164. idr_for_each_entry(&glink->rcids, channel, cid)
  2165. kref_put(&channel->refcount, qcom_glink_channel_release);
  2166. idr_destroy(&glink->lcids);
  2167. idr_destroy(&glink->rcids);
  2168. kthread_flush_worker(&glink->kworker);
  2169. kthread_stop(glink->task);
  2170. /*
  2171. * Required for spss only. A cb is provided for this in spss driver. For
  2172. * others, its done in prepare stage in smem driver. No cb is given.
  2173. */
  2174. qcom_glink_pipe_reset(glink);
  2175. mbox_free_channel(glink->mbox_chan);
  2176. size = of_property_count_u32_elems(glink->dev->of_node, "cpu-affinity");
  2177. if (size > 0 && irq_set_affinity_hint(glink->irq, NULL))
  2178. dev_err(glink->dev, "failed to clear irq affinity\n");
  2179. }
  2180. EXPORT_SYMBOL_GPL(qcom_glink_native_remove);
  2181. void qcom_glink_native_unregister(struct qcom_glink *glink)
  2182. {
  2183. device_unregister(glink->dev);
  2184. }
  2185. EXPORT_SYMBOL_GPL(qcom_glink_native_unregister);
  2186. static int qcom_glink_suspend_no_irq(struct device *dev)
  2187. {
  2188. should_wake = true;
  2189. return 0;
  2190. }
  2191. static int qcom_glink_resume_no_irq(struct device *dev)
  2192. {
  2193. should_wake = false;
  2194. return 0;
  2195. }
  2196. const struct dev_pm_ops glink_native_pm_ops = {
  2197. .suspend_noirq = qcom_glink_suspend_no_irq,
  2198. .resume_noirq = qcom_glink_resume_no_irq,
  2199. };
  2200. EXPORT_SYMBOL(glink_native_pm_ops);
  2201. MODULE_DESCRIPTION("Qualcomm GLINK driver");
  2202. MODULE_LICENSE("GPL v2");