rsc_mgr.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. */
  5. #include <linux/of.h>
  6. #include <linux/slab.h>
  7. #include <linux/mutex.h>
  8. #include <linux/sched.h>
  9. #include <linux/gunyah.h>
  10. #include <linux/module.h>
  11. #include <linux/of_irq.h>
  12. #include <linux/notifier.h>
  13. #include <linux/workqueue.h>
  14. #include <linux/completion.h>
  15. #include <linux/auxiliary_bus.h>
  16. #include <linux/gunyah_rsc_mgr.h>
  17. #include <linux/platform_device.h>
  18. #include <linux/miscdevice.h>
  19. #include <asm/gunyah.h>
  20. #include "rsc_mgr.h"
  21. #include "vm_mgr.h"
  22. #define RM_RPC_API_VERSION_MASK GENMASK(3, 0)
  23. #define RM_RPC_HEADER_WORDS_MASK GENMASK(7, 4)
  24. #define RM_RPC_API_VERSION FIELD_PREP(RM_RPC_API_VERSION_MASK, 1)
  25. #define RM_RPC_HEADER_WORDS FIELD_PREP(RM_RPC_HEADER_WORDS_MASK, \
  26. (sizeof(struct gh_rm_rpc_hdr) / sizeof(u32)))
  27. #define RM_RPC_API (RM_RPC_API_VERSION | RM_RPC_HEADER_WORDS)
  28. #define RM_RPC_TYPE_CONTINUATION 0x0
  29. #define RM_RPC_TYPE_REQUEST 0x1
  30. #define RM_RPC_TYPE_REPLY 0x2
  31. #define RM_RPC_TYPE_NOTIF 0x3
  32. #define RM_RPC_TYPE_MASK GENMASK(1, 0)
  33. #define GH_RM_MAX_NUM_FRAGMENTS 62
  34. #define RM_RPC_FRAGMENTS_MASK GENMASK(7, 2)
  35. struct gh_rm_rpc_hdr {
  36. u8 api;
  37. u8 type;
  38. __le16 seq;
  39. __le32 msg_id;
  40. } __packed;
  41. struct gh_rm_rpc_reply_hdr {
  42. struct gh_rm_rpc_hdr hdr;
  43. __le32 err_code; /* GH_RM_ERROR_* */
  44. } __packed;
  45. #define GH_RM_MAX_MSG_SIZE (GH_MSGQ_MAX_MSG_SIZE - sizeof(struct gh_rm_rpc_hdr))
  46. /* RM Error codes */
  47. enum gh_rm_error {
  48. GH_RM_ERROR_OK = 0x0,
  49. GH_RM_ERROR_UNIMPLEMENTED = 0xFFFFFFFF,
  50. GH_RM_ERROR_NOMEM = 0x1,
  51. GH_RM_ERROR_NORESOURCE = 0x2,
  52. GH_RM_ERROR_DENIED = 0x3,
  53. GH_RM_ERROR_INVALID = 0x4,
  54. GH_RM_ERROR_BUSY = 0x5,
  55. GH_RM_ERROR_ARGUMENT_INVALID = 0x6,
  56. GH_RM_ERROR_HANDLE_INVALID = 0x7,
  57. GH_RM_ERROR_VALIDATE_FAILED = 0x8,
  58. GH_RM_ERROR_MAP_FAILED = 0x9,
  59. GH_RM_ERROR_MEM_INVALID = 0xA,
  60. GH_RM_ERROR_MEM_INUSE = 0xB,
  61. GH_RM_ERROR_MEM_RELEASED = 0xC,
  62. GH_RM_ERROR_VMID_INVALID = 0xD,
  63. GH_RM_ERROR_LOOKUP_FAILED = 0xE,
  64. GH_RM_ERROR_IRQ_INVALID = 0xF,
  65. GH_RM_ERROR_IRQ_INUSE = 0x10,
  66. GH_RM_ERROR_IRQ_RELEASED = 0x11,
  67. };
  68. /**
  69. * struct gh_rm_connection - Represents a complete message from resource manager
  70. * @payload: Combined payload of all the fragments (msg headers stripped off).
  71. * @size: Size of the payload received so far.
  72. * @msg_id: Message ID from the header.
  73. * @type: RM_RPC_TYPE_REPLY or RM_RPC_TYPE_NOTIF.
  74. * @num_fragments: total number of fragments expected to be received.
  75. * @fragments_received: fragments received so far.
  76. * @reply: Fields used for request/reply sequences
  77. * @notification: Fields used for notifiations
  78. */
  79. struct gh_rm_connection {
  80. void *payload;
  81. size_t size;
  82. __le32 msg_id;
  83. u8 type;
  84. u8 num_fragments;
  85. u8 fragments_received;
  86. union {
  87. /**
  88. * @ret: Linux return code, there was an error processing connection
  89. * @seq: Sequence ID for the main message.
  90. * @rm_error: For request/reply sequences with standard replies
  91. * @seq_done: Signals caller that the RM reply has been received
  92. */
  93. struct {
  94. int ret;
  95. u16 seq;
  96. enum gh_rm_error rm_error;
  97. struct completion seq_done;
  98. } reply;
  99. /**
  100. * @rm: Pointer to the RM that launched the connection
  101. * @work: Triggered when all fragments of a notification received
  102. */
  103. struct {
  104. struct gh_rm *rm;
  105. struct work_struct work;
  106. } notification;
  107. };
  108. };
  109. /**
  110. * struct gh_rm - private data for communicating w/Gunyah resource manager
  111. * @dev: pointer to RM platform device
  112. * @tx_ghrsc: message queue resource to TX to RM
  113. * @rx_ghrsc: message queue resource to RX from RM
  114. * @msgq: mailbox instance of TX/RX resources above
  115. * @msgq_client: mailbox client of above msgq
  116. * @active_rx_connection: ongoing gh_rm_connection for which we're receiving fragments
  117. * @last_tx_ret: return value of last mailbox tx
  118. * @call_xarray: xarray to allocate & lookup sequence IDs for Request/Response flows
  119. * @next_seq: next ID to allocate (for xa_alloc_cyclic)
  120. * @cache: cache for allocating Tx messages
  121. * @send_lock: synchronization to allow only one request to be sent at a time
  122. * @nh: notifier chain for clients interested in RM notification messages
  123. * @miscdev: /dev/gunyah
  124. * @irq_domain: Domain to translate Gunyah hwirqs to Linux irqs
  125. */
  126. struct gh_rm {
  127. struct device *dev;
  128. struct gh_resource tx_ghrsc;
  129. struct gh_resource rx_ghrsc;
  130. struct gh_msgq msgq;
  131. struct mbox_client msgq_client;
  132. struct gh_rm_connection *active_rx_connection;
  133. int last_tx_ret;
  134. struct xarray call_xarray;
  135. u32 next_seq;
  136. struct kmem_cache *cache;
  137. struct mutex send_lock;
  138. struct blocking_notifier_head nh;
  139. struct auxiliary_device adev;
  140. struct miscdevice miscdev;
  141. struct irq_domain *irq_domain;
  142. };
  143. /**
  144. * gh_rm_error_remap() - Remap Gunyah resource manager errors into a Linux error code
  145. * @rm_error: "Standard" return value from Gunyah resource manager
  146. */
  147. static inline int gh_rm_error_remap(enum gh_rm_error rm_error)
  148. {
  149. switch (rm_error) {
  150. case GH_RM_ERROR_OK:
  151. return 0;
  152. case GH_RM_ERROR_UNIMPLEMENTED:
  153. return -EOPNOTSUPP;
  154. case GH_RM_ERROR_NOMEM:
  155. return -ENOMEM;
  156. case GH_RM_ERROR_NORESOURCE:
  157. return -ENODEV;
  158. case GH_RM_ERROR_DENIED:
  159. return -EPERM;
  160. case GH_RM_ERROR_BUSY:
  161. return -EBUSY;
  162. case GH_RM_ERROR_INVALID:
  163. case GH_RM_ERROR_ARGUMENT_INVALID:
  164. case GH_RM_ERROR_HANDLE_INVALID:
  165. case GH_RM_ERROR_VALIDATE_FAILED:
  166. case GH_RM_ERROR_MAP_FAILED:
  167. case GH_RM_ERROR_MEM_INVALID:
  168. case GH_RM_ERROR_MEM_INUSE:
  169. case GH_RM_ERROR_MEM_RELEASED:
  170. case GH_RM_ERROR_VMID_INVALID:
  171. case GH_RM_ERROR_LOOKUP_FAILED:
  172. case GH_RM_ERROR_IRQ_INVALID:
  173. case GH_RM_ERROR_IRQ_INUSE:
  174. case GH_RM_ERROR_IRQ_RELEASED:
  175. return -EINVAL;
  176. default:
  177. return -EBADMSG;
  178. }
  179. }
  180. struct gh_irq_chip_data {
  181. u32 gh_virq;
  182. };
  183. static struct irq_chip gh_rm_irq_chip = {
  184. .name = "Gunyah",
  185. .irq_enable = irq_chip_enable_parent,
  186. .irq_disable = irq_chip_disable_parent,
  187. .irq_ack = irq_chip_ack_parent,
  188. .irq_mask = irq_chip_mask_parent,
  189. .irq_mask_ack = irq_chip_mask_ack_parent,
  190. .irq_unmask = irq_chip_unmask_parent,
  191. .irq_eoi = irq_chip_eoi_parent,
  192. .irq_set_affinity = irq_chip_set_affinity_parent,
  193. .irq_set_type = irq_chip_set_type_parent,
  194. .irq_set_wake = irq_chip_set_wake_parent,
  195. .irq_set_vcpu_affinity = irq_chip_set_vcpu_affinity_parent,
  196. .irq_retrigger = irq_chip_retrigger_hierarchy,
  197. .irq_get_irqchip_state = irq_chip_get_parent_state,
  198. .irq_set_irqchip_state = irq_chip_set_parent_state,
  199. .flags = IRQCHIP_SET_TYPE_MASKED |
  200. IRQCHIP_SKIP_SET_WAKE |
  201. IRQCHIP_MASK_ON_SUSPEND,
  202. };
  203. static int gh_rm_irq_domain_alloc(struct irq_domain *d, unsigned int virq, unsigned int nr_irqs,
  204. void *arg)
  205. {
  206. struct gh_irq_chip_data *chip_data, *spec = arg;
  207. struct irq_fwspec parent_fwspec = {};
  208. struct gh_rm *rm = d->host_data;
  209. u32 gh_virq = spec->gh_virq;
  210. int ret;
  211. if (nr_irqs != 1)
  212. return -EINVAL;
  213. chip_data = kzalloc(sizeof(*chip_data), GFP_KERNEL);
  214. if (!chip_data)
  215. return -ENOMEM;
  216. chip_data->gh_virq = gh_virq;
  217. ret = irq_domain_set_hwirq_and_chip(d, virq, chip_data->gh_virq, &gh_rm_irq_chip,
  218. chip_data);
  219. if (ret)
  220. goto err_free_irq_data;
  221. parent_fwspec.fwnode = d->parent->fwnode;
  222. ret = arch_gh_fill_irq_fwspec_params(chip_data->gh_virq, &parent_fwspec);
  223. if (ret) {
  224. dev_err(rm->dev, "virq translation failed %u: %d\n", chip_data->gh_virq, ret);
  225. goto err_free_irq_data;
  226. }
  227. ret = irq_domain_alloc_irqs_parent(d, virq, nr_irqs, &parent_fwspec);
  228. if (ret)
  229. goto err_free_irq_data;
  230. return ret;
  231. err_free_irq_data:
  232. kfree(chip_data);
  233. return ret;
  234. }
  235. static void gh_rm_irq_domain_free_single(struct irq_domain *d, unsigned int virq)
  236. {
  237. struct irq_data *irq_data;
  238. irq_data = irq_domain_get_irq_data(d, virq);
  239. if (!irq_data)
  240. return;
  241. kfree(irq_data->chip_data);
  242. irq_data->chip_data = NULL;
  243. }
  244. static void gh_rm_irq_domain_free(struct irq_domain *d, unsigned int virq, unsigned int nr_irqs)
  245. {
  246. unsigned int i;
  247. for (i = 0; i < nr_irqs; i++)
  248. gh_rm_irq_domain_free_single(d, virq);
  249. }
  250. static const struct irq_domain_ops gh_rm_irq_domain_ops = {
  251. .alloc = gh_rm_irq_domain_alloc,
  252. .free = gh_rm_irq_domain_free,
  253. };
  254. struct gh_resource *gh_rm_alloc_resource(struct gh_rm *rm, struct gh_rm_hyp_resource *hyp_resource)
  255. {
  256. struct gh_resource *ghrsc;
  257. int ret;
  258. ghrsc = kzalloc(sizeof(*ghrsc), GFP_KERNEL);
  259. if (!ghrsc)
  260. return NULL;
  261. ghrsc->type = hyp_resource->type;
  262. ghrsc->capid = le64_to_cpu(hyp_resource->cap_id);
  263. ghrsc->irq = IRQ_NOTCONNECTED;
  264. ghrsc->rm_label = le32_to_cpu(hyp_resource->resource_label);
  265. if (hyp_resource->virq && hyp_resource->virq != GH_RM_RESOURCE_NO_VIRQ) {
  266. struct gh_irq_chip_data irq_data = {
  267. .gh_virq = le32_to_cpu(hyp_resource->virq),
  268. };
  269. ret = irq_domain_alloc_irqs(rm->irq_domain, 1, NUMA_NO_NODE, &irq_data);
  270. if (ret < 0) {
  271. dev_err(rm->dev,
  272. "Failed to allocate interrupt for resource %d label: %d: %d\n",
  273. ghrsc->type, ghrsc->rm_label, ret);
  274. kfree(ghrsc);
  275. return NULL;
  276. } else {
  277. ghrsc->irq = ret;
  278. }
  279. }
  280. return ghrsc;
  281. }
  282. void gh_rm_free_resource(struct gh_resource *ghrsc)
  283. {
  284. irq_dispose_mapping(ghrsc->irq);
  285. kfree(ghrsc);
  286. }
  287. static int gh_rm_init_connection_payload(struct gh_rm_connection *connection, void *msg,
  288. size_t hdr_size, size_t msg_size)
  289. {
  290. size_t max_buf_size, payload_size;
  291. struct gh_rm_rpc_hdr *hdr = msg;
  292. if (msg_size < hdr_size)
  293. return -EINVAL;
  294. payload_size = msg_size - hdr_size;
  295. connection->num_fragments = FIELD_GET(RM_RPC_FRAGMENTS_MASK, hdr->type);
  296. connection->fragments_received = 0;
  297. /* There's not going to be any payload, no need to allocate buffer. */
  298. if (!payload_size && !connection->num_fragments)
  299. return 0;
  300. if (connection->num_fragments > GH_RM_MAX_NUM_FRAGMENTS)
  301. return -EINVAL;
  302. max_buf_size = payload_size + (connection->num_fragments * GH_RM_MAX_MSG_SIZE);
  303. connection->payload = kzalloc(max_buf_size, GFP_KERNEL);
  304. if (!connection->payload)
  305. return -ENOMEM;
  306. memcpy(connection->payload, msg + hdr_size, payload_size);
  307. connection->size = payload_size;
  308. return 0;
  309. }
  310. static void gh_rm_abort_connection(struct gh_rm *rm)
  311. {
  312. switch (rm->active_rx_connection->type) {
  313. case RM_RPC_TYPE_REPLY:
  314. rm->active_rx_connection->reply.ret = -EIO;
  315. complete(&rm->active_rx_connection->reply.seq_done);
  316. break;
  317. case RM_RPC_TYPE_NOTIF:
  318. fallthrough;
  319. default:
  320. kfree(rm->active_rx_connection->payload);
  321. kfree(rm->active_rx_connection);
  322. }
  323. rm->active_rx_connection = NULL;
  324. }
  325. static void gh_rm_notif_work(struct work_struct *work)
  326. {
  327. struct gh_rm_connection *connection = container_of(work, struct gh_rm_connection,
  328. notification.work);
  329. struct gh_rm *rm = connection->notification.rm;
  330. blocking_notifier_call_chain(&rm->nh, le32_to_cpu(connection->msg_id), connection->payload);
  331. put_device(rm->dev);
  332. kfree(connection->payload);
  333. kfree(connection);
  334. }
  335. static void gh_rm_process_notif(struct gh_rm *rm, void *msg, size_t msg_size)
  336. {
  337. struct gh_rm_connection *connection;
  338. struct gh_rm_rpc_hdr *hdr = msg;
  339. int ret;
  340. if (rm->active_rx_connection)
  341. gh_rm_abort_connection(rm);
  342. connection = kzalloc(sizeof(*connection), GFP_KERNEL);
  343. if (!connection)
  344. return;
  345. connection->type = RM_RPC_TYPE_NOTIF;
  346. connection->msg_id = hdr->msg_id;
  347. get_device(rm->dev);
  348. connection->notification.rm = rm;
  349. INIT_WORK(&connection->notification.work, gh_rm_notif_work);
  350. ret = gh_rm_init_connection_payload(connection, msg, sizeof(*hdr), msg_size);
  351. if (ret) {
  352. dev_err(rm->dev, "Failed to initialize connection for notification: %d\n", ret);
  353. put_device(rm->dev);
  354. kfree(connection);
  355. return;
  356. }
  357. rm->active_rx_connection = connection;
  358. }
  359. static void gh_rm_process_reply(struct gh_rm *rm, void *msg, size_t msg_size)
  360. {
  361. struct gh_rm_rpc_reply_hdr *reply_hdr = msg;
  362. struct gh_rm_connection *connection;
  363. u16 seq_id;
  364. seq_id = le16_to_cpu(reply_hdr->hdr.seq);
  365. connection = xa_load(&rm->call_xarray, seq_id);
  366. if (!connection || connection->msg_id != reply_hdr->hdr.msg_id)
  367. return;
  368. if (rm->active_rx_connection)
  369. gh_rm_abort_connection(rm);
  370. if (gh_rm_init_connection_payload(connection, msg, sizeof(*reply_hdr), msg_size)) {
  371. dev_err(rm->dev, "Failed to alloc connection buffer for sequence %d\n", seq_id);
  372. /* Send connection complete and error the client. */
  373. connection->reply.ret = -ENOMEM;
  374. complete(&connection->reply.seq_done);
  375. return;
  376. }
  377. connection->reply.rm_error = le32_to_cpu(reply_hdr->err_code);
  378. rm->active_rx_connection = connection;
  379. }
  380. static void gh_rm_process_cont(struct gh_rm *rm, struct gh_rm_connection *connection,
  381. void *msg, size_t msg_size)
  382. {
  383. struct gh_rm_rpc_hdr *hdr = msg;
  384. size_t payload_size = msg_size - sizeof(*hdr);
  385. if (!rm->active_rx_connection)
  386. return;
  387. /*
  388. * hdr->fragments and hdr->msg_id preserves the value from first reply
  389. * or notif message. To detect mishandling, check it's still intact.
  390. */
  391. if (connection->msg_id != hdr->msg_id ||
  392. connection->num_fragments != FIELD_GET(RM_RPC_FRAGMENTS_MASK, hdr->type)) {
  393. gh_rm_abort_connection(rm);
  394. return;
  395. }
  396. memcpy(connection->payload + connection->size, msg + sizeof(*hdr), payload_size);
  397. connection->size += payload_size;
  398. connection->fragments_received++;
  399. }
  400. static void gh_rm_try_complete_connection(struct gh_rm *rm)
  401. {
  402. struct gh_rm_connection *connection = rm->active_rx_connection;
  403. if (!connection || connection->fragments_received != connection->num_fragments)
  404. return;
  405. switch (connection->type) {
  406. case RM_RPC_TYPE_REPLY:
  407. complete(&connection->reply.seq_done);
  408. break;
  409. case RM_RPC_TYPE_NOTIF:
  410. schedule_work(&connection->notification.work);
  411. break;
  412. default:
  413. dev_err_ratelimited(rm->dev, "Invalid message type (%u) received\n",
  414. connection->type);
  415. gh_rm_abort_connection(rm);
  416. break;
  417. }
  418. rm->active_rx_connection = NULL;
  419. }
  420. static void gh_rm_msgq_rx_data(struct mbox_client *cl, void *mssg)
  421. {
  422. struct gh_rm *rm = container_of(cl, struct gh_rm, msgq_client);
  423. struct gh_msgq_rx_data *rx_data = mssg;
  424. size_t msg_size = rx_data->length;
  425. void *msg = rx_data->data;
  426. struct gh_rm_rpc_hdr *hdr;
  427. if (msg_size < sizeof(*hdr) || msg_size > GH_MSGQ_MAX_MSG_SIZE)
  428. return;
  429. hdr = msg;
  430. if (hdr->api != RM_RPC_API) {
  431. dev_err(rm->dev, "Unknown RM RPC API version: %x\n", hdr->api);
  432. return;
  433. }
  434. switch (FIELD_GET(RM_RPC_TYPE_MASK, hdr->type)) {
  435. case RM_RPC_TYPE_NOTIF:
  436. gh_rm_process_notif(rm, msg, msg_size);
  437. break;
  438. case RM_RPC_TYPE_REPLY:
  439. gh_rm_process_reply(rm, msg, msg_size);
  440. break;
  441. case RM_RPC_TYPE_CONTINUATION:
  442. gh_rm_process_cont(rm, rm->active_rx_connection, msg, msg_size);
  443. break;
  444. default:
  445. dev_err(rm->dev, "Invalid message type (%lu) received\n",
  446. FIELD_GET(RM_RPC_TYPE_MASK, hdr->type));
  447. return;
  448. }
  449. gh_rm_try_complete_connection(rm);
  450. }
  451. static void gh_rm_msgq_tx_done(struct mbox_client *cl, void *mssg, int r)
  452. {
  453. struct gh_rm *rm = container_of(cl, struct gh_rm, msgq_client);
  454. kmem_cache_free(rm->cache, mssg);
  455. rm->last_tx_ret = r;
  456. }
  457. static int gh_rm_send_request(struct gh_rm *rm, u32 message_id,
  458. const void *req_buf, size_t req_buf_size,
  459. struct gh_rm_connection *connection)
  460. {
  461. size_t buf_size_remaining = req_buf_size;
  462. const void *req_buf_curr = req_buf;
  463. struct gh_msgq_tx_data *msg;
  464. struct gh_rm_rpc_hdr *hdr, hdr_template;
  465. u32 cont_fragments = 0;
  466. size_t payload_size;
  467. void *payload;
  468. int ret;
  469. if (req_buf_size > GH_RM_MAX_NUM_FRAGMENTS * GH_RM_MAX_MSG_SIZE) {
  470. dev_warn(rm->dev, "Limit (%lu bytes) exceeded for the maximum message size: %lu\n",
  471. GH_RM_MAX_NUM_FRAGMENTS * GH_RM_MAX_MSG_SIZE, req_buf_size);
  472. dump_stack();
  473. return -E2BIG;
  474. }
  475. if (req_buf_size)
  476. cont_fragments = (req_buf_size - 1) / GH_RM_MAX_MSG_SIZE;
  477. hdr_template.api = RM_RPC_API;
  478. hdr_template.type = FIELD_PREP(RM_RPC_TYPE_MASK, RM_RPC_TYPE_REQUEST) |
  479. FIELD_PREP(RM_RPC_FRAGMENTS_MASK, cont_fragments);
  480. hdr_template.seq = cpu_to_le16(connection->reply.seq);
  481. hdr_template.msg_id = cpu_to_le32(message_id);
  482. mutex_lock(&rm->send_lock);
  483. do {
  484. msg = kmem_cache_zalloc(rm->cache, GFP_KERNEL);
  485. if (!msg) {
  486. ret = -ENOMEM;
  487. goto out;
  488. }
  489. /* Fill header */
  490. hdr = (struct gh_rm_rpc_hdr *)&msg->data[0];
  491. *hdr = hdr_template;
  492. /* Copy payload */
  493. payload = &msg->data[0] + sizeof(*hdr);
  494. payload_size = min(buf_size_remaining, GH_RM_MAX_MSG_SIZE);
  495. memcpy(payload, req_buf_curr, payload_size);
  496. req_buf_curr += payload_size;
  497. buf_size_remaining -= payload_size;
  498. /* Force the last fragment to immediately alert the receiver */
  499. msg->push = !buf_size_remaining;
  500. msg->length = sizeof(*hdr) + payload_size;
  501. ret = mbox_send_message(gh_msgq_chan(&rm->msgq), msg);
  502. if (ret < 0) {
  503. kmem_cache_free(rm->cache, msg);
  504. break;
  505. }
  506. if (rm->last_tx_ret) {
  507. ret = rm->last_tx_ret;
  508. break;
  509. }
  510. hdr_template.type = FIELD_PREP(RM_RPC_TYPE_MASK, RM_RPC_TYPE_CONTINUATION) |
  511. FIELD_PREP(RM_RPC_FRAGMENTS_MASK, cont_fragments);
  512. } while (buf_size_remaining);
  513. out:
  514. mutex_unlock(&rm->send_lock);
  515. return ret < 0 ? ret : 0;
  516. }
  517. /**
  518. * gh_rm_call: Achieve request-response type communication with RPC
  519. * @rm: Pointer to Gunyah resource manager internal data
  520. * @message_id: The RM RPC message-id
  521. * @req_buf: Request buffer that contains the payload
  522. * @req_buf_size: Total size of the payload
  523. * @resp_buf: Pointer to a response buffer
  524. * @resp_buf_size: Size of the response buffer
  525. *
  526. * Make a request to the Resource Manager and wait for reply back. For a successful
  527. * response, the function returns the payload. The size of the payload is set in
  528. * resp_buf_size. The resp_buf must be freed by the caller when 0 is returned
  529. * and resp_buf_size != 0.
  530. *
  531. * req_buf should be not NULL for req_buf_size >0. If req_buf_size == 0,
  532. * req_buf *can* be NULL and no additional payload is sent.
  533. *
  534. * Context: Process context. Will sleep waiting for reply.
  535. * Return: 0 on success. <0 if error.
  536. */
  537. int gh_rm_call(void *_rm, u32 message_id, const void *req_buf, size_t req_buf_size,
  538. void **resp_buf, size_t *resp_buf_size)
  539. {
  540. struct gh_rm *rm = _rm;
  541. struct gh_rm_connection *connection;
  542. u32 seq_id;
  543. int ret;
  544. /* message_id 0 is reserved. req_buf_size implies req_buf is not NULL */
  545. if (!rm || !message_id || (!req_buf && req_buf_size))
  546. return -EINVAL;
  547. connection = kzalloc(sizeof(*connection), GFP_KERNEL);
  548. if (!connection)
  549. return -ENOMEM;
  550. connection->type = RM_RPC_TYPE_REPLY;
  551. connection->msg_id = cpu_to_le32(message_id);
  552. init_completion(&connection->reply.seq_done);
  553. /* Allocate a new seq number for this connection */
  554. ret = xa_alloc_cyclic(&rm->call_xarray, &seq_id, connection, xa_limit_16b, &rm->next_seq,
  555. GFP_KERNEL);
  556. if (ret < 0)
  557. goto free;
  558. connection->reply.seq = lower_16_bits(seq_id);
  559. /* Send the request to the Resource Manager */
  560. ret = gh_rm_send_request(rm, message_id, req_buf, req_buf_size, connection);
  561. if (ret < 0)
  562. goto out;
  563. /* Wait for response. Uninterruptible because rollback based on what RM did to VM
  564. * requires us to know how RM handled the call.
  565. */
  566. wait_for_completion(&connection->reply.seq_done);
  567. /* Check for internal (kernel) error waiting for the response */
  568. if (connection->reply.ret) {
  569. ret = connection->reply.ret;
  570. if (ret != -ENOMEM)
  571. kfree(connection->payload);
  572. goto out;
  573. }
  574. /* Got a response, did resource manager give us an error? */
  575. if (connection->reply.rm_error != GH_RM_ERROR_OK) {
  576. dev_warn(rm->dev, "RM rejected message %08x. Error: %d\n", message_id,
  577. connection->reply.rm_error);
  578. ret = gh_rm_error_remap(connection->reply.rm_error);
  579. kfree(connection->payload);
  580. goto out;
  581. }
  582. /* Everything looks good, return the payload */
  583. if (resp_buf_size)
  584. *resp_buf_size = connection->size;
  585. if (connection->size && resp_buf)
  586. *resp_buf = connection->payload;
  587. else {
  588. /* kfree in case RM sent us multiple fragments but never any data in
  589. * those fragments. We would've allocated memory for it, but connection->size == 0
  590. */
  591. kfree(connection->payload);
  592. }
  593. out:
  594. xa_erase(&rm->call_xarray, connection->reply.seq);
  595. free:
  596. kfree(connection);
  597. return ret;
  598. }
  599. EXPORT_SYMBOL_GPL(gh_rm_call);
  600. int gh_rm_notifier_register(void *_rm, struct notifier_block *nb)
  601. {
  602. struct gh_rm *rm = _rm;
  603. return blocking_notifier_chain_register(&rm->nh, nb);
  604. }
  605. EXPORT_SYMBOL_GPL(gh_rm_notifier_register);
  606. int gh_rm_notifier_unregister(void *_rm, struct notifier_block *nb)
  607. {
  608. struct gh_rm *rm = _rm;
  609. return blocking_notifier_chain_unregister(&rm->nh, nb);
  610. }
  611. EXPORT_SYMBOL_GPL(gh_rm_notifier_unregister);
  612. struct device *gh_rm_get(struct gh_rm *rm)
  613. {
  614. return get_device(rm->miscdev.this_device);
  615. }
  616. EXPORT_SYMBOL_GPL(gh_rm_get);
  617. void gh_rm_put(struct gh_rm *rm)
  618. {
  619. put_device(rm->miscdev.this_device);
  620. }
  621. EXPORT_SYMBOL_GPL(gh_rm_put);
  622. static long gh_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
  623. {
  624. struct miscdevice *miscdev = filp->private_data;
  625. struct gh_rm *rm = container_of(miscdev, struct gh_rm, miscdev);
  626. return gh_dev_vm_mgr_ioctl(rm, cmd, arg);
  627. }
  628. static const struct file_operations gh_dev_fops = {
  629. .owner = THIS_MODULE,
  630. .unlocked_ioctl = gh_dev_ioctl,
  631. .compat_ioctl = compat_ptr_ioctl,
  632. .llseek = noop_llseek,
  633. };
  634. static void gh_adev_release(struct device *dev)
  635. {
  636. /* no-op */
  637. }
  638. static int gh_adev_init(struct gh_rm *rm, const char *name)
  639. {
  640. struct auxiliary_device *adev = &rm->adev;
  641. int ret = 0;
  642. adev->name = name;
  643. adev->dev.parent = rm->dev;
  644. adev->dev.release = gh_adev_release;
  645. ret = auxiliary_device_init(adev);
  646. if (ret)
  647. return ret;
  648. ret = auxiliary_device_add(adev);
  649. if (ret) {
  650. auxiliary_device_uninit(adev);
  651. return ret;
  652. }
  653. return ret;
  654. }
  655. static int gh_msgq_platform_probe_direction(struct platform_device *pdev, bool tx,
  656. struct gh_resource *ghrsc)
  657. {
  658. struct device_node *node = pdev->dev.of_node;
  659. int ret;
  660. int idx = tx ? 0 : 1;
  661. ghrsc->type = tx ? GH_RESOURCE_TYPE_MSGQ_TX : GH_RESOURCE_TYPE_MSGQ_RX;
  662. ghrsc->irq = platform_get_irq(pdev, idx);
  663. if (ghrsc->irq < 0) {
  664. dev_err(&pdev->dev, "Failed to get irq%d: %d\n", idx, ghrsc->irq);
  665. return ghrsc->irq;
  666. }
  667. ret = of_property_read_u64_index(node, "reg", idx, &ghrsc->capid);
  668. if (ret) {
  669. dev_err(&pdev->dev, "Failed to get capid%d: %d\n", idx, ret);
  670. return ret;
  671. }
  672. return 0;
  673. }
  674. static int gh_identify(void)
  675. {
  676. struct gh_hypercall_hyp_identify_resp gh_api;
  677. if (!arch_is_gh_guest())
  678. return -ENODEV;
  679. gh_hypercall_hyp_identify(&gh_api);
  680. pr_info("Running under Gunyah hypervisor %llx/v%u\n",
  681. FIELD_GET(GH_API_INFO_VARIANT_MASK, gh_api.api_info),
  682. gh_api_version(&gh_api));
  683. /* We might move this out to individual drivers if there's ever an API version bump */
  684. if (gh_api_version(&gh_api) != GH_API_V1) {
  685. pr_info("Unsupported Gunyah version: %u\n", gh_api_version(&gh_api));
  686. return -ENODEV;
  687. }
  688. return 0;
  689. }
  690. static int gh_rm_drv_probe(struct platform_device *pdev)
  691. {
  692. struct irq_domain *parent_irq_domain;
  693. struct device_node *parent_irq_node;
  694. struct gh_msgq_tx_data *msg;
  695. struct gh_rm *rm;
  696. int ret;
  697. ret = gh_identify();
  698. if (ret)
  699. return ret;
  700. rm = devm_kzalloc(&pdev->dev, sizeof(*rm), GFP_KERNEL);
  701. if (!rm)
  702. return -ENOMEM;
  703. platform_set_drvdata(pdev, rm);
  704. rm->dev = &pdev->dev;
  705. mutex_init(&rm->send_lock);
  706. BLOCKING_INIT_NOTIFIER_HEAD(&rm->nh);
  707. xa_init_flags(&rm->call_xarray, XA_FLAGS_ALLOC);
  708. rm->cache = kmem_cache_create("gh_rm", struct_size(msg, data, GH_MSGQ_MAX_MSG_SIZE), 0,
  709. SLAB_HWCACHE_ALIGN, NULL);
  710. if (!rm->cache)
  711. return -ENOMEM;
  712. ret = gh_msgq_platform_probe_direction(pdev, true, &rm->tx_ghrsc);
  713. if (ret)
  714. goto err_cache;
  715. ret = gh_msgq_platform_probe_direction(pdev, false, &rm->rx_ghrsc);
  716. if (ret)
  717. goto err_cache;
  718. rm->msgq_client.dev = &pdev->dev;
  719. rm->msgq_client.tx_block = true;
  720. rm->msgq_client.rx_callback = gh_rm_msgq_rx_data;
  721. rm->msgq_client.tx_done = gh_rm_msgq_tx_done;
  722. ret = gh_msgq_init(&pdev->dev, &rm->msgq, &rm->msgq_client, &rm->tx_ghrsc, &rm->rx_ghrsc);
  723. if (ret)
  724. goto err_cache;
  725. parent_irq_node = of_irq_find_parent(pdev->dev.of_node);
  726. if (!parent_irq_node) {
  727. dev_err(&pdev->dev, "Failed to find interrupt parent of resource manager\n");
  728. ret = -ENODEV;
  729. goto err_msgq;
  730. }
  731. parent_irq_domain = irq_find_host(parent_irq_node);
  732. if (!parent_irq_domain) {
  733. dev_err(&pdev->dev, "Failed to find interrupt parent domain of resource manager\n");
  734. ret = -ENODEV;
  735. goto err_msgq;
  736. }
  737. rm->irq_domain = irq_domain_add_hierarchy(parent_irq_domain, 0, 0, pdev->dev.of_node,
  738. &gh_rm_irq_domain_ops, NULL);
  739. if (!rm->irq_domain) {
  740. dev_err(&pdev->dev, "Failed to add irq domain\n");
  741. ret = -ENODEV;
  742. goto err_msgq;
  743. }
  744. rm->irq_domain->host_data = rm;
  745. rm->miscdev.parent = &pdev->dev;
  746. rm->miscdev.name = "gunyah";
  747. rm->miscdev.minor = MISC_DYNAMIC_MINOR;
  748. rm->miscdev.fops = &gh_dev_fops;
  749. ret = misc_register(&rm->miscdev);
  750. if (ret)
  751. goto err_irq_domain;
  752. ret = gh_adev_init(rm, "gh_rm_core");
  753. if (ret) {
  754. dev_err(&pdev->dev, "Failed to add gh_rm_core device\n");
  755. goto err_misc_device;
  756. }
  757. return 0;
  758. err_misc_device:
  759. misc_deregister(&rm->miscdev);
  760. err_irq_domain:
  761. irq_domain_remove(rm->irq_domain);
  762. err_msgq:
  763. gh_msgq_remove(&rm->msgq);
  764. err_cache:
  765. kmem_cache_destroy(rm->cache);
  766. return ret;
  767. }
  768. static int gh_rm_drv_remove(struct platform_device *pdev)
  769. {
  770. struct gh_rm *rm = platform_get_drvdata(pdev);
  771. auxiliary_device_delete(&rm->adev);
  772. auxiliary_device_uninit(&rm->adev);
  773. misc_deregister(&rm->miscdev);
  774. irq_domain_remove(rm->irq_domain);
  775. gh_msgq_remove(&rm->msgq);
  776. kmem_cache_destroy(rm->cache);
  777. return 0;
  778. }
  779. static const struct of_device_id gh_rm_of_match[] = {
  780. { .compatible = "gunyah-resource-manager" },
  781. {}
  782. };
  783. MODULE_DEVICE_TABLE(of, gh_rm_of_match);
  784. static struct platform_driver gh_rm_driver = {
  785. .probe = gh_rm_drv_probe,
  786. .remove = gh_rm_drv_remove,
  787. .driver = {
  788. .name = "gh_rsc_mgr",
  789. .of_match_table = gh_rm_of_match,
  790. },
  791. };
  792. module_platform_driver(gh_rm_driver);
  793. MODULE_LICENSE("GPL");
  794. MODULE_DESCRIPTION("Gunyah Resource Manager Driver");