smp2p.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2015, Sony Mobile Communications AB.
  4. * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  5. * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved.
  6. */
  7. #include <linux/interrupt.h>
  8. #include <linux/list.h>
  9. #include <linux/io.h>
  10. #include <linux/of.h>
  11. #include <linux/irq.h>
  12. #include <linux/irqdomain.h>
  13. #include <linux/mailbox_client.h>
  14. #include <linux/mfd/syscon.h>
  15. #include <linux/module.h>
  16. #include <linux/platform_device.h>
  17. #include <linux/pm_wakeirq.h>
  18. #include <linux/regmap.h>
  19. #include <linux/soc/qcom/smem.h>
  20. #include <linux/soc/qcom/smem_state.h>
  21. #include <linux/spinlock.h>
  22. #include <linux/ipc_logging.h>
  23. /*
  24. * The Shared Memory Point to Point (SMP2P) protocol facilitates communication
  25. * of a single 32-bit value between two processors. Each value has a single
  26. * writer (the local side) and a single reader (the remote side). Values are
  27. * uniquely identified in the system by the directed edge (local processor ID
  28. * to remote processor ID) and a string identifier.
  29. *
  30. * Each processor is responsible for creating the outgoing SMEM items and each
  31. * item is writable by the local processor and readable by the remote
  32. * processor. By using two separate SMEM items that are single-reader and
  33. * single-writer, SMP2P does not require any remote locking mechanisms.
  34. *
  35. * The driver uses the Linux GPIO and interrupt framework to expose a virtual
  36. * GPIO for each outbound entry and a virtual interrupt controller for each
  37. * inbound entry.
  38. */
  39. #define SMP2P_MAX_ENTRY 16
  40. #define SMP2P_MAX_ENTRY_NAME 16
  41. #define SMP2P_FEATURE_SSR_ACK 0x1
  42. #define SMP2P_FLAGS_RESTART_DONE_BIT 0
  43. #define SMP2P_FLAGS_RESTART_ACK_BIT 1
  44. #define SMP2P_MAGIC 0x504d5324
  45. #define SMP2P_ALL_FEATURES SMP2P_FEATURE_SSR_ACK
  46. /**
  47. * struct smp2p_smem_item - in memory communication structure
  48. * @magic: magic number
  49. * @version: version - must be 1
  50. * @features: features flag - currently unused
  51. * @local_pid: processor id of sending end
  52. * @remote_pid: processor id of receiving end
  53. * @total_entries: number of entries - always SMP2P_MAX_ENTRY
  54. * @valid_entries: number of allocated entries
  55. * @flags:
  56. * @entries: individual communication entries
  57. * @name: name of the entry
  58. * @value: content of the entry
  59. */
  60. struct smp2p_smem_item {
  61. u32 magic;
  62. u8 version;
  63. unsigned features:24;
  64. u16 local_pid;
  65. u16 remote_pid;
  66. u16 total_entries;
  67. u16 valid_entries;
  68. u32 flags;
  69. struct {
  70. u8 name[SMP2P_MAX_ENTRY_NAME];
  71. u32 value;
  72. } entries[SMP2P_MAX_ENTRY];
  73. } __packed;
  74. /**
  75. * struct smp2p_entry - driver context matching one entry
  76. * @node: list entry to keep track of allocated entries
  77. * @smp2p: reference to the device driver context
  78. * @name: name of the entry, to match against smp2p_smem_item
  79. * @value: pointer to smp2p_smem_item entry value
  80. * @last_value: last handled value
  81. * @domain: irq_domain for inbound entries
  82. * @irq_enabled:bitmap to track enabled irq bits
  83. * @irq_rising: bitmap to mark irq bits for rising detection
  84. * @irq_falling:bitmap to mark irq bits for falling detection
  85. * @state: smem state handle
  86. * @lock: spinlock to protect read-modify-write of the value
  87. */
  88. struct smp2p_entry {
  89. struct list_head node;
  90. struct qcom_smp2p *smp2p;
  91. const char *name;
  92. u32 *value;
  93. u32 last_value;
  94. struct irq_domain *domain;
  95. DECLARE_BITMAP(irq_enabled, 32);
  96. DECLARE_BITMAP(irq_pending, 32);
  97. DECLARE_BITMAP(irq_rising, 32);
  98. DECLARE_BITMAP(irq_falling, 32);
  99. struct qcom_smem_state *state;
  100. spinlock_t lock;
  101. };
  102. #define SMP2P_INBOUND 0
  103. #define SMP2P_OUTBOUND 1
  104. /**
  105. * struct qcom_smp2p - device driver context
  106. * @dev: device driver handle
  107. * @in: pointer to the inbound smem item
  108. * @out: pointer to the outbound smem item
  109. * @smem_items: ids of the two smem items
  110. * @valid_entries: already scanned inbound entries
  111. * @ssr_ack_enabled: SMP2P_FEATURE_SSR_ACK feature is supported and was enabled
  112. * @ssr_ack: current cached state of the local ack bit
  113. * @negotiation_done: whether negotiating finished
  114. * @irq_devname: poniter to the smp2p irq devname
  115. * @local_pid: processor id of the inbound edge
  116. * @remote_pid: processor id of the outbound edge
  117. * @ipc_regmap: regmap for the outbound ipc
  118. * @ipc_offset: offset within the regmap
  119. * @ipc_bit: bit in regmap@offset to kick to signal remote processor
  120. * @mbox_client: mailbox client handle
  121. * @mbox_chan: apcs ipc mailbox channel handle
  122. * @inbound: list of inbound entries
  123. * @outbound: list of outbound entries
  124. */
  125. struct qcom_smp2p {
  126. struct device *dev;
  127. struct smp2p_smem_item *in;
  128. struct smp2p_smem_item *out;
  129. unsigned smem_items[SMP2P_OUTBOUND + 1];
  130. unsigned valid_entries;
  131. bool ssr_ack_enabled;
  132. bool ssr_ack;
  133. bool negotiation_done;
  134. char *irq_devname;
  135. unsigned local_pid;
  136. unsigned remote_pid;
  137. int irq;
  138. struct regmap *ipc_regmap;
  139. int ipc_offset;
  140. int ipc_bit;
  141. struct mbox_client mbox_client;
  142. struct mbox_chan *mbox_chan;
  143. struct list_head inbound;
  144. struct list_head outbound;
  145. };
  146. static void *ilc;
  147. #define SMP2P_LOG_PAGE_CNT 2
  148. #define SMP2P_INFO(x, ...) \
  149. ipc_log_string(ilc, "[%s]: "x, __func__, ##__VA_ARGS__)
  150. static bool smp2p_suspend_in_progress;
  151. static void qcom_smp2p_kick(struct qcom_smp2p *smp2p)
  152. {
  153. /* Make sure any updated data is written before the kick */
  154. wmb();
  155. if (smp2p->mbox_chan) {
  156. mbox_send_message(smp2p->mbox_chan, NULL);
  157. mbox_client_txdone(smp2p->mbox_chan, 0);
  158. } else {
  159. regmap_write(smp2p->ipc_regmap, smp2p->ipc_offset, BIT(smp2p->ipc_bit));
  160. }
  161. }
  162. static bool qcom_smp2p_check_ssr(struct qcom_smp2p *smp2p)
  163. {
  164. struct smp2p_smem_item *in = smp2p->in;
  165. bool restart;
  166. if (!smp2p->ssr_ack_enabled)
  167. return false;
  168. restart = in->flags & BIT(SMP2P_FLAGS_RESTART_DONE_BIT);
  169. return restart != smp2p->ssr_ack;
  170. }
  171. static void qcom_smp2p_do_ssr_ack(struct qcom_smp2p *smp2p)
  172. {
  173. struct smp2p_smem_item *out = smp2p->out;
  174. u32 val;
  175. SMP2P_INFO("%d: SSR detected, doing SSR Handshake\n", smp2p->remote_pid);
  176. smp2p->ssr_ack = !smp2p->ssr_ack;
  177. val = out->flags & ~BIT(SMP2P_FLAGS_RESTART_ACK_BIT);
  178. if (smp2p->ssr_ack)
  179. val |= BIT(SMP2P_FLAGS_RESTART_ACK_BIT);
  180. out->flags = val;
  181. qcom_smp2p_kick(smp2p);
  182. }
  183. static void qcom_smp2p_negotiate(struct qcom_smp2p *smp2p)
  184. {
  185. struct smp2p_smem_item *out = smp2p->out;
  186. struct smp2p_smem_item *in = smp2p->in;
  187. if (in->version == out->version) {
  188. out->features &= in->features;
  189. if (out->features & SMP2P_FEATURE_SSR_ACK)
  190. smp2p->ssr_ack_enabled = true;
  191. smp2p->negotiation_done = true;
  192. SMP2P_INFO("%d: state=open ssr_ack=%d\n", smp2p->remote_pid,
  193. smp2p->ssr_ack_enabled);
  194. }
  195. }
  196. static void qcom_smp2p_notify_in(struct qcom_smp2p *smp2p)
  197. {
  198. struct smp2p_smem_item *in;
  199. struct smp2p_entry *entry;
  200. unsigned long status;
  201. int irq_pin;
  202. char buf[SMP2P_MAX_ENTRY_NAME];
  203. u32 val;
  204. int i;
  205. in = smp2p->in;
  206. /* Match newly created entries */
  207. for (i = smp2p->valid_entries; i < in->valid_entries; i++) {
  208. list_for_each_entry(entry, &smp2p->inbound, node) {
  209. memcpy(buf, in->entries[i].name, sizeof(buf));
  210. if (!strcmp(buf, entry->name)) {
  211. entry->value = &in->entries[i].value;
  212. break;
  213. }
  214. }
  215. }
  216. smp2p->valid_entries = i;
  217. SMP2P_INFO("%d: smp2p_num:%d in_num:%d\n",
  218. smp2p->remote_pid, smp2p->valid_entries, in->valid_entries);
  219. /* Fire interrupts based on any value changes */
  220. list_for_each_entry(entry, &smp2p->inbound, node) {
  221. /* Ignore entries not yet allocated by the remote side */
  222. if (!entry->value) {
  223. SMP2P_INFO("%d:\t%s: skipping not ready\n",
  224. smp2p->remote_pid, entry->name);
  225. continue;
  226. }
  227. val = readl(entry->value);
  228. status = val ^ entry->last_value;
  229. entry->last_value = val;
  230. /* Ensure irq_pending is read correctly */
  231. mb();
  232. status |= *entry->irq_pending;
  233. SMP2P_INFO("%d:\t%s: status:%0lx val:%0x\n",
  234. smp2p->remote_pid, entry->name, status, val);
  235. /* No changes of this entry? */
  236. if (!status)
  237. continue;
  238. if (smp2p_suspend_in_progress) {
  239. pr_info("SMP2P [name:%s] remote: entry:%s status:%0lx\n",
  240. smp2p->irq_devname,
  241. entry->name, status);
  242. smp2p_suspend_in_progress = false;
  243. }
  244. for_each_set_bit(i, &status, 32) {
  245. if ((val & BIT(i) && test_bit(i, entry->irq_rising)) ||
  246. (!(val & BIT(i)) && test_bit(i, entry->irq_falling))) {
  247. irq_pin = irq_find_mapping(entry->domain, i);
  248. handle_nested_irq(irq_pin);
  249. clear_bit(i, entry->irq_pending);
  250. }
  251. }
  252. }
  253. }
  254. /**
  255. * qcom_smp2p_intr() - interrupt handler for incoming notifications
  256. * @irq: unused
  257. * @data: smp2p driver context
  258. *
  259. * Handle notifications from the remote side to handle newly allocated entries
  260. * or any changes to the state bits of existing entries.
  261. */
  262. static irqreturn_t qcom_smp2p_intr(int irq, void *data)
  263. {
  264. struct smp2p_smem_item *in;
  265. struct qcom_smp2p *smp2p = data;
  266. unsigned int smem_id = smp2p->smem_items[SMP2P_INBOUND];
  267. unsigned int pid = smp2p->remote_pid;
  268. bool ack_restart;
  269. size_t size;
  270. in = smp2p->in;
  271. /* Acquire smem item, if not already found */
  272. if (!in) {
  273. in = qcom_smem_get(pid, smem_id, &size);
  274. if (IS_ERR(in)) {
  275. dev_err(smp2p->dev,
  276. "Unable to acquire remote smp2p item\n");
  277. goto out;
  278. }
  279. smp2p->in = in;
  280. }
  281. if (!smp2p->negotiation_done)
  282. qcom_smp2p_negotiate(smp2p);
  283. if (smp2p->negotiation_done) {
  284. ack_restart = qcom_smp2p_check_ssr(smp2p);
  285. qcom_smp2p_notify_in(smp2p);
  286. if (ack_restart)
  287. qcom_smp2p_do_ssr_ack(smp2p);
  288. }
  289. out:
  290. return IRQ_HANDLED;
  291. }
  292. static void smp2p_mask_irq(struct irq_data *irqd)
  293. {
  294. struct smp2p_entry *entry = irq_data_get_irq_chip_data(irqd);
  295. irq_hw_number_t irq = irqd_to_hwirq(irqd);
  296. clear_bit(irq, entry->irq_enabled);
  297. }
  298. static void smp2p_unmask_irq(struct irq_data *irqd)
  299. {
  300. struct smp2p_entry *entry = irq_data_get_irq_chip_data(irqd);
  301. irq_hw_number_t irq = irqd_to_hwirq(irqd);
  302. set_bit(irq, entry->irq_enabled);
  303. }
  304. static int smp2p_set_irq_type(struct irq_data *irqd, unsigned int type)
  305. {
  306. struct smp2p_entry *entry = irq_data_get_irq_chip_data(irqd);
  307. irq_hw_number_t irq = irqd_to_hwirq(irqd);
  308. if (!(type & IRQ_TYPE_EDGE_BOTH))
  309. return -EINVAL;
  310. if (type & IRQ_TYPE_EDGE_RISING)
  311. set_bit(irq, entry->irq_rising);
  312. else
  313. clear_bit(irq, entry->irq_rising);
  314. if (type & IRQ_TYPE_EDGE_FALLING)
  315. set_bit(irq, entry->irq_falling);
  316. else
  317. clear_bit(irq, entry->irq_falling);
  318. return 0;
  319. }
  320. static int smp2p_retrigger_irq(struct irq_data *irqd)
  321. {
  322. struct smp2p_entry *entry = irq_data_get_irq_chip_data(irqd);
  323. irq_hw_number_t irq = irqd_to_hwirq(irqd);
  324. SMP2P_INFO("%d: %s: %lu\n", entry->smp2p->remote_pid, entry->name, irq);
  325. set_bit(irq, entry->irq_pending);
  326. /* Ensure irq_pending is visible to all cpus that retried interrupt
  327. * can run on
  328. */
  329. mb();
  330. return 0;
  331. }
  332. static struct irq_chip smp2p_irq_chip = {
  333. .name = "smp2p",
  334. .irq_mask = smp2p_mask_irq,
  335. .irq_unmask = smp2p_unmask_irq,
  336. .irq_set_type = smp2p_set_irq_type,
  337. .irq_retrigger = smp2p_retrigger_irq,
  338. };
  339. static int smp2p_irq_map(struct irq_domain *d,
  340. unsigned int irq,
  341. irq_hw_number_t hw)
  342. {
  343. struct smp2p_entry *entry = d->host_data;
  344. irq_set_chip_and_handler(irq, &smp2p_irq_chip, handle_level_irq);
  345. irq_set_chip_data(irq, entry);
  346. irq_set_nested_thread(irq, 1);
  347. irq_set_noprobe(irq);
  348. irq_set_parent(irq, entry->smp2p->irq);
  349. irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
  350. return 0;
  351. }
  352. static const struct irq_domain_ops smp2p_irq_ops = {
  353. .map = smp2p_irq_map,
  354. .xlate = irq_domain_xlate_twocell,
  355. };
  356. static int qcom_smp2p_inbound_entry(struct qcom_smp2p *smp2p,
  357. struct smp2p_entry *entry,
  358. struct device_node *node)
  359. {
  360. entry->domain = irq_domain_add_linear(node, 32, &smp2p_irq_ops, entry);
  361. if (!entry->domain) {
  362. dev_err(smp2p->dev, "failed to add irq_domain\n");
  363. return -ENOMEM;
  364. }
  365. return 0;
  366. }
  367. static int smp2p_update_bits(void *data, u32 mask, u32 value)
  368. {
  369. struct smp2p_entry *entry = data;
  370. unsigned long flags;
  371. u32 orig;
  372. u32 val;
  373. spin_lock_irqsave(&entry->lock, flags);
  374. val = orig = readl(entry->value);
  375. val &= ~mask;
  376. val |= value;
  377. writel(val, entry->value);
  378. spin_unlock_irqrestore(&entry->lock, flags);
  379. SMP2P_INFO("%d: %s: orig:0x%0x new:0x%0x\n",
  380. entry->smp2p->remote_pid, entry->name, orig, val);
  381. if (val != orig)
  382. qcom_smp2p_kick(entry->smp2p);
  383. return 0;
  384. }
  385. static const struct qcom_smem_state_ops smp2p_state_ops = {
  386. .update_bits = smp2p_update_bits,
  387. };
  388. static int qcom_smp2p_outbound_entry(struct qcom_smp2p *smp2p,
  389. struct smp2p_entry *entry,
  390. struct device_node *node)
  391. {
  392. struct smp2p_smem_item *out = smp2p->out;
  393. char buf[SMP2P_MAX_ENTRY_NAME] = {};
  394. /* Allocate an entry from the smem item */
  395. strlcpy(buf, entry->name, SMP2P_MAX_ENTRY_NAME);
  396. memcpy(out->entries[out->valid_entries].name, buf, SMP2P_MAX_ENTRY_NAME);
  397. /* Make the logical entry reference the physical value */
  398. entry->value = &out->entries[out->valid_entries].value;
  399. out->valid_entries++;
  400. entry->state = qcom_smem_state_register(node, &smp2p_state_ops, entry);
  401. if (IS_ERR(entry->state)) {
  402. dev_err(smp2p->dev, "failed to register qcom_smem_state\n");
  403. return PTR_ERR(entry->state);
  404. }
  405. return 0;
  406. }
  407. static int qcom_smp2p_alloc_outbound_item(struct qcom_smp2p *smp2p)
  408. {
  409. struct smp2p_smem_item *out;
  410. unsigned smem_id = smp2p->smem_items[SMP2P_OUTBOUND];
  411. unsigned pid = smp2p->remote_pid;
  412. int ret;
  413. ret = qcom_smem_alloc(pid, smem_id, sizeof(*out));
  414. if (ret < 0 && ret != -EEXIST) {
  415. if (ret != -EPROBE_DEFER)
  416. dev_err(smp2p->dev,
  417. "unable to allocate local smp2p item\n");
  418. return ret;
  419. }
  420. out = qcom_smem_get(pid, smem_id, NULL);
  421. if (IS_ERR(out)) {
  422. dev_err(smp2p->dev, "Unable to acquire local smp2p item\n");
  423. return PTR_ERR(out);
  424. }
  425. memset(out, 0, sizeof(*out));
  426. out->magic = SMP2P_MAGIC;
  427. out->local_pid = smp2p->local_pid;
  428. out->remote_pid = smp2p->remote_pid;
  429. out->total_entries = SMP2P_MAX_ENTRY;
  430. out->valid_entries = 0;
  431. out->features = SMP2P_ALL_FEATURES;
  432. /*
  433. * Make sure the rest of the header is written before we validate the
  434. * item by writing a valid version number.
  435. */
  436. wmb();
  437. out->version = 1;
  438. qcom_smp2p_kick(smp2p);
  439. smp2p->out = out;
  440. return 0;
  441. }
  442. static int smp2p_parse_ipc(struct qcom_smp2p *smp2p)
  443. {
  444. struct device_node *syscon;
  445. struct device *dev = smp2p->dev;
  446. const char *key;
  447. int ret;
  448. syscon = of_parse_phandle(dev->of_node, "qcom,ipc", 0);
  449. if (!syscon) {
  450. dev_err(dev, "no qcom,ipc node\n");
  451. return -ENODEV;
  452. }
  453. smp2p->ipc_regmap = syscon_node_to_regmap(syscon);
  454. of_node_put(syscon);
  455. if (IS_ERR(smp2p->ipc_regmap))
  456. return PTR_ERR(smp2p->ipc_regmap);
  457. key = "qcom,ipc";
  458. ret = of_property_read_u32_index(dev->of_node, key, 1, &smp2p->ipc_offset);
  459. if (ret < 0) {
  460. dev_err(dev, "no offset in %s\n", key);
  461. return -EINVAL;
  462. }
  463. ret = of_property_read_u32_index(dev->of_node, key, 2, &smp2p->ipc_bit);
  464. if (ret < 0) {
  465. dev_err(dev, "no bit in %s\n", key);
  466. return -EINVAL;
  467. }
  468. return 0;
  469. }
  470. static int qcom_smp2p_probe(struct platform_device *pdev)
  471. {
  472. struct smp2p_entry *entry;
  473. struct smp2p_entry *next_entry;
  474. struct device_node *node;
  475. struct qcom_smp2p *smp2p;
  476. const char *key;
  477. int irq;
  478. int ret;
  479. if (!ilc)
  480. ilc = ipc_log_context_create(SMP2P_LOG_PAGE_CNT, "smp2p", 0);
  481. smp2p = devm_kzalloc(&pdev->dev, sizeof(*smp2p), GFP_KERNEL);
  482. if (!smp2p)
  483. return -ENOMEM;
  484. smp2p->dev = &pdev->dev;
  485. INIT_LIST_HEAD(&smp2p->inbound);
  486. INIT_LIST_HEAD(&smp2p->outbound);
  487. platform_set_drvdata(pdev, smp2p);
  488. key = "qcom,smem";
  489. ret = of_property_read_u32_array(pdev->dev.of_node, key,
  490. smp2p->smem_items, 2);
  491. if (ret)
  492. return ret;
  493. key = "qcom,local-pid";
  494. ret = of_property_read_u32(pdev->dev.of_node, key, &smp2p->local_pid);
  495. if (ret)
  496. goto report_read_failure;
  497. key = "qcom,remote-pid";
  498. ret = of_property_read_u32(pdev->dev.of_node, key, &smp2p->remote_pid);
  499. if (ret)
  500. goto report_read_failure;
  501. irq = platform_get_irq(pdev, 0);
  502. if (irq < 0)
  503. return irq;
  504. smp2p->mbox_client.dev = &pdev->dev;
  505. smp2p->mbox_client.knows_txdone = true;
  506. smp2p->mbox_chan = mbox_request_channel(&smp2p->mbox_client, 0);
  507. if (IS_ERR(smp2p->mbox_chan)) {
  508. if (PTR_ERR(smp2p->mbox_chan) != -ENODEV)
  509. return PTR_ERR(smp2p->mbox_chan);
  510. smp2p->mbox_chan = NULL;
  511. ret = smp2p_parse_ipc(smp2p);
  512. if (ret)
  513. return ret;
  514. }
  515. ret = qcom_smp2p_alloc_outbound_item(smp2p);
  516. if (ret < 0)
  517. goto release_mbox;
  518. for_each_available_child_of_node(pdev->dev.of_node, node) {
  519. entry = kzalloc(sizeof(*entry), GFP_KERNEL);
  520. if (!entry) {
  521. ret = -ENOMEM;
  522. of_node_put(node);
  523. goto unwind_interfaces;
  524. }
  525. entry->smp2p = smp2p;
  526. spin_lock_init(&entry->lock);
  527. ret = of_property_read_string(node, "qcom,entry-name", &entry->name);
  528. if (ret < 0) {
  529. of_node_put(node);
  530. goto unwind_interfaces;
  531. }
  532. if (of_property_read_bool(node, "interrupt-controller")) {
  533. ret = qcom_smp2p_inbound_entry(smp2p, entry, node);
  534. if (ret < 0) {
  535. of_node_put(node);
  536. goto unwind_interfaces;
  537. }
  538. list_add(&entry->node, &smp2p->inbound);
  539. } else {
  540. ret = qcom_smp2p_outbound_entry(smp2p, entry, node);
  541. if (ret < 0) {
  542. of_node_put(node);
  543. goto unwind_interfaces;
  544. }
  545. list_add(&entry->node, &smp2p->outbound);
  546. }
  547. }
  548. /* Kick the outgoing edge after allocating entries */
  549. qcom_smp2p_kick(smp2p);
  550. smp2p->irq = irq;
  551. smp2p->irq_devname = kasprintf(GFP_KERNEL, "%s", pdev->dev.of_node->name);
  552. if (!smp2p->irq_devname) {
  553. ret = -ENOMEM;
  554. goto unwind_interfaces;
  555. }
  556. ret = devm_request_threaded_irq(&pdev->dev, irq,
  557. NULL, qcom_smp2p_intr,
  558. IRQF_ONESHOT,
  559. smp2p->irq_devname, (void *)smp2p);
  560. if (ret) {
  561. dev_err(&pdev->dev, "failed to request interrupt\n");
  562. goto unwind_interfaces;
  563. }
  564. /*
  565. * Treat smp2p interrupt as wakeup source, but keep it disabled
  566. * by default. User space can decide enabling it depending on its
  567. * use cases. For example if remoteproc crashes and device wants
  568. * to handle it immediatedly (e.g. to not miss phone calls) it can
  569. * enable wakeup source from user space, while other devices which
  570. * do not have proper autosleep feature may want to handle it with
  571. * other wakeup events (e.g. Power button) instead waking up immediately.
  572. */
  573. device_set_wakeup_capable(&pdev->dev, true);
  574. ret = dev_pm_set_wake_irq(&pdev->dev, irq);
  575. if (ret)
  576. goto set_wake_irq_fail;
  577. return 0;
  578. set_wake_irq_fail:
  579. dev_pm_clear_wake_irq(&pdev->dev);
  580. unwind_interfaces:
  581. list_for_each_entry_safe(entry, next_entry, &smp2p->inbound, node) {
  582. irq_domain_remove(entry->domain);
  583. kfree(entry);
  584. }
  585. list_for_each_entry_safe(entry, next_entry, &smp2p->outbound, node) {
  586. qcom_smem_state_unregister(entry->state);
  587. kfree(entry);
  588. }
  589. kfree(smp2p->irq_devname);
  590. smp2p->out->valid_entries = 0;
  591. release_mbox:
  592. mbox_free_channel(smp2p->mbox_chan);
  593. return ret;
  594. report_read_failure:
  595. dev_err(&pdev->dev, "failed to read %s\n", key);
  596. return -EINVAL;
  597. }
  598. static int qcom_smp2p_remove(struct platform_device *pdev)
  599. {
  600. struct qcom_smp2p *smp2p = platform_get_drvdata(pdev);
  601. struct smp2p_entry *entry;
  602. struct smp2p_entry *next_entry;
  603. dev_pm_clear_wake_irq(&pdev->dev);
  604. list_for_each_entry_safe(entry, next_entry, &smp2p->inbound, node) {
  605. irq_domain_remove(entry->domain);
  606. kfree(entry);
  607. }
  608. list_for_each_entry_safe(entry, next_entry, &smp2p->outbound, node) {
  609. qcom_smem_state_unregister(entry->state);
  610. kfree(entry);
  611. }
  612. mbox_free_channel(smp2p->mbox_chan);
  613. kfree(smp2p->irq_devname);
  614. smp2p->out->valid_entries = 0;
  615. return 0;
  616. }
  617. static int qcom_smp2p_restore(struct device *dev)
  618. {
  619. int ret = 0;
  620. struct qcom_smp2p *smp2p = dev_get_drvdata(dev);
  621. struct smp2p_entry *entry;
  622. struct device_node *node;
  623. struct platform_device *pdev = container_of(dev, struct
  624. platform_device, dev);
  625. ret = qcom_smp2p_alloc_outbound_item(smp2p);
  626. if (ret < 0)
  627. goto print_err;
  628. for_each_available_child_of_node(pdev->dev.of_node, node) {
  629. entry = kzalloc(sizeof(*entry), GFP_KERNEL);
  630. if (!entry) {
  631. ret = -ENOMEM;
  632. goto print_err;
  633. }
  634. entry->smp2p = smp2p;
  635. spin_lock_init(&entry->lock);
  636. ret = of_property_read_string(node, "qcom,entry-name",
  637. &entry->name);
  638. if (ret < 0)
  639. goto rel_entry;
  640. if (!of_property_read_bool(node, "interrupt-controller")) {
  641. ret = qcom_smp2p_outbound_entry(smp2p, entry, node);
  642. if (ret < 0)
  643. goto rel_entry;
  644. list_add(&entry->node, &smp2p->outbound);
  645. } else {
  646. kfree(entry);
  647. }
  648. }
  649. enable_irq_wake(smp2p->irq);
  650. /* Kick the outgoing edge after allocating entries */
  651. qcom_smp2p_kick(smp2p);
  652. return ret;
  653. rel_entry:
  654. kfree(entry);
  655. print_err:
  656. if (ret < 0 && ret != -EEXIST)
  657. dev_err(dev, "failed to alloc items ret = %d\n", ret);
  658. return ret;
  659. }
  660. static int qcom_smp2p_freeze(struct device *dev)
  661. {
  662. struct qcom_smp2p *smp2p = dev_get_drvdata(dev);
  663. struct smp2p_entry *entry;
  664. struct smp2p_entry *next_entry;
  665. disable_irq_wake(smp2p->irq);
  666. /* Walk through the out bound list and release state and entry */
  667. list_for_each_entry_safe(entry, next_entry, &smp2p->outbound, node) {
  668. qcom_smem_state_unregister(entry->state);
  669. list_del(&entry->node);
  670. kfree(entry);
  671. }
  672. INIT_LIST_HEAD(&smp2p->outbound);
  673. /* Walk through the in bound list and reset last value */
  674. list_for_each_entry_safe(entry, next_entry, &smp2p->inbound, node) {
  675. entry->last_value = 0;
  676. }
  677. /* make null to point it to valid smem item during first interrupt */
  678. smp2p->in = NULL;
  679. smp2p->valid_entries = 0;
  680. return 0;
  681. }
  682. static int qcom_smp2p_suspend_no_irq(struct device *dev)
  683. {
  684. smp2p_suspend_in_progress = true;
  685. return 0;
  686. }
  687. static int qcom_smp2p_resume(struct device *dev)
  688. {
  689. smp2p_suspend_in_progress = false;
  690. return 0;
  691. }
  692. static const struct dev_pm_ops qcom_smp2p_pm_ops = {
  693. .suspend_noirq = qcom_smp2p_suspend_no_irq,
  694. .resume = qcom_smp2p_resume,
  695. .freeze = qcom_smp2p_freeze,
  696. .restore = qcom_smp2p_restore,
  697. .thaw = qcom_smp2p_restore,
  698. };
  699. static const struct of_device_id qcom_smp2p_of_match[] = {
  700. { .compatible = "qcom,smp2p" },
  701. {}
  702. };
  703. MODULE_DEVICE_TABLE(of, qcom_smp2p_of_match);
  704. static struct platform_driver qcom_smp2p_driver = {
  705. .probe = qcom_smp2p_probe,
  706. .remove = qcom_smp2p_remove,
  707. .driver = {
  708. .name = "qcom_smp2p",
  709. .of_match_table = qcom_smp2p_of_match,
  710. .pm = &qcom_smp2p_pm_ops,
  711. },
  712. };
  713. module_platform_driver(qcom_smp2p_driver);
  714. MODULE_DESCRIPTION("Qualcomm Shared Memory Point to Point driver");
  715. MODULE_LICENSE("GPL v2");