bpmp.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
  4. */
  5. #include <linux/clk/tegra.h>
  6. #include <linux/genalloc.h>
  7. #include <linux/mailbox_client.h>
  8. #include <linux/module.h>
  9. #include <linux/of.h>
  10. #include <linux/of_address.h>
  11. #include <linux/of_device.h>
  12. #include <linux/platform_device.h>
  13. #include <linux/pm.h>
  14. #include <linux/semaphore.h>
  15. #include <linux/sched/clock.h>
  16. #include <soc/tegra/bpmp.h>
  17. #include <soc/tegra/bpmp-abi.h>
  18. #include <soc/tegra/ivc.h>
  19. #include "bpmp-private.h"
  20. #define MSG_ACK BIT(0)
  21. #define MSG_RING BIT(1)
  22. #define TAG_SZ 32
  23. static inline struct tegra_bpmp *
  24. mbox_client_to_bpmp(struct mbox_client *client)
  25. {
  26. return container_of(client, struct tegra_bpmp, mbox.client);
  27. }
  28. static inline const struct tegra_bpmp_ops *
  29. channel_to_ops(struct tegra_bpmp_channel *channel)
  30. {
  31. struct tegra_bpmp *bpmp = channel->bpmp;
  32. return bpmp->soc->ops;
  33. }
  34. struct tegra_bpmp *tegra_bpmp_get(struct device *dev)
  35. {
  36. struct platform_device *pdev;
  37. struct tegra_bpmp *bpmp;
  38. struct device_node *np;
  39. np = of_parse_phandle(dev->of_node, "nvidia,bpmp", 0);
  40. if (!np)
  41. return ERR_PTR(-ENOENT);
  42. pdev = of_find_device_by_node(np);
  43. if (!pdev) {
  44. bpmp = ERR_PTR(-ENODEV);
  45. goto put;
  46. }
  47. bpmp = platform_get_drvdata(pdev);
  48. if (!bpmp) {
  49. bpmp = ERR_PTR(-EPROBE_DEFER);
  50. put_device(&pdev->dev);
  51. goto put;
  52. }
  53. put:
  54. of_node_put(np);
  55. return bpmp;
  56. }
  57. EXPORT_SYMBOL_GPL(tegra_bpmp_get);
  58. void tegra_bpmp_put(struct tegra_bpmp *bpmp)
  59. {
  60. if (bpmp)
  61. put_device(bpmp->dev);
  62. }
  63. EXPORT_SYMBOL_GPL(tegra_bpmp_put);
  64. static int
  65. tegra_bpmp_channel_get_thread_index(struct tegra_bpmp_channel *channel)
  66. {
  67. struct tegra_bpmp *bpmp = channel->bpmp;
  68. unsigned int count;
  69. int index;
  70. count = bpmp->soc->channels.thread.count;
  71. index = channel - channel->bpmp->threaded_channels;
  72. if (index < 0 || index >= count)
  73. return -EINVAL;
  74. return index;
  75. }
  76. static bool tegra_bpmp_message_valid(const struct tegra_bpmp_message *msg)
  77. {
  78. return (msg->tx.size <= MSG_DATA_MIN_SZ) &&
  79. (msg->rx.size <= MSG_DATA_MIN_SZ) &&
  80. (msg->tx.size == 0 || msg->tx.data) &&
  81. (msg->rx.size == 0 || msg->rx.data);
  82. }
  83. static bool tegra_bpmp_is_response_ready(struct tegra_bpmp_channel *channel)
  84. {
  85. const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
  86. return ops->is_response_ready(channel);
  87. }
  88. static bool tegra_bpmp_is_request_ready(struct tegra_bpmp_channel *channel)
  89. {
  90. const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
  91. return ops->is_request_ready(channel);
  92. }
  93. static int tegra_bpmp_wait_response(struct tegra_bpmp_channel *channel)
  94. {
  95. unsigned long timeout = channel->bpmp->soc->channels.cpu_tx.timeout;
  96. ktime_t end;
  97. end = ktime_add_us(ktime_get(), timeout);
  98. do {
  99. if (tegra_bpmp_is_response_ready(channel))
  100. return 0;
  101. } while (ktime_before(ktime_get(), end));
  102. return -ETIMEDOUT;
  103. }
  104. static int tegra_bpmp_ack_response(struct tegra_bpmp_channel *channel)
  105. {
  106. const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
  107. return ops->ack_response(channel);
  108. }
  109. static int tegra_bpmp_ack_request(struct tegra_bpmp_channel *channel)
  110. {
  111. const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
  112. return ops->ack_request(channel);
  113. }
  114. static bool
  115. tegra_bpmp_is_request_channel_free(struct tegra_bpmp_channel *channel)
  116. {
  117. const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
  118. return ops->is_request_channel_free(channel);
  119. }
  120. static bool
  121. tegra_bpmp_is_response_channel_free(struct tegra_bpmp_channel *channel)
  122. {
  123. const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
  124. return ops->is_response_channel_free(channel);
  125. }
  126. static int
  127. tegra_bpmp_wait_request_channel_free(struct tegra_bpmp_channel *channel)
  128. {
  129. unsigned long timeout = channel->bpmp->soc->channels.cpu_tx.timeout;
  130. ktime_t start, now;
  131. start = ns_to_ktime(local_clock());
  132. do {
  133. if (tegra_bpmp_is_request_channel_free(channel))
  134. return 0;
  135. now = ns_to_ktime(local_clock());
  136. } while (ktime_us_delta(now, start) < timeout);
  137. return -ETIMEDOUT;
  138. }
  139. static int tegra_bpmp_post_request(struct tegra_bpmp_channel *channel)
  140. {
  141. const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
  142. return ops->post_request(channel);
  143. }
  144. static int tegra_bpmp_post_response(struct tegra_bpmp_channel *channel)
  145. {
  146. const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
  147. return ops->post_response(channel);
  148. }
  149. static int tegra_bpmp_ring_doorbell(struct tegra_bpmp *bpmp)
  150. {
  151. return bpmp->soc->ops->ring_doorbell(bpmp);
  152. }
  153. static ssize_t __tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel,
  154. void *data, size_t size, int *ret)
  155. {
  156. int err;
  157. if (data && size > 0)
  158. memcpy_fromio(data, channel->ib->data, size);
  159. err = tegra_bpmp_ack_response(channel);
  160. if (err < 0)
  161. return err;
  162. *ret = channel->ib->code;
  163. return 0;
  164. }
  165. static ssize_t tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel,
  166. void *data, size_t size, int *ret)
  167. {
  168. struct tegra_bpmp *bpmp = channel->bpmp;
  169. unsigned long flags;
  170. ssize_t err;
  171. int index;
  172. index = tegra_bpmp_channel_get_thread_index(channel);
  173. if (index < 0) {
  174. err = index;
  175. goto unlock;
  176. }
  177. spin_lock_irqsave(&bpmp->lock, flags);
  178. err = __tegra_bpmp_channel_read(channel, data, size, ret);
  179. clear_bit(index, bpmp->threaded.allocated);
  180. spin_unlock_irqrestore(&bpmp->lock, flags);
  181. unlock:
  182. up(&bpmp->threaded.lock);
  183. return err;
  184. }
  185. static ssize_t __tegra_bpmp_channel_write(struct tegra_bpmp_channel *channel,
  186. unsigned int mrq, unsigned long flags,
  187. const void *data, size_t size)
  188. {
  189. channel->ob->code = mrq;
  190. channel->ob->flags = flags;
  191. if (data && size > 0)
  192. memcpy_toio(channel->ob->data, data, size);
  193. return tegra_bpmp_post_request(channel);
  194. }
  195. static struct tegra_bpmp_channel *
  196. tegra_bpmp_write_threaded(struct tegra_bpmp *bpmp, unsigned int mrq,
  197. const void *data, size_t size)
  198. {
  199. unsigned long timeout = bpmp->soc->channels.thread.timeout;
  200. unsigned int count = bpmp->soc->channels.thread.count;
  201. struct tegra_bpmp_channel *channel;
  202. unsigned long flags;
  203. unsigned int index;
  204. int err;
  205. err = down_timeout(&bpmp->threaded.lock, usecs_to_jiffies(timeout));
  206. if (err < 0)
  207. return ERR_PTR(err);
  208. spin_lock_irqsave(&bpmp->lock, flags);
  209. index = find_first_zero_bit(bpmp->threaded.allocated, count);
  210. if (index == count) {
  211. err = -EBUSY;
  212. goto unlock;
  213. }
  214. channel = &bpmp->threaded_channels[index];
  215. if (!tegra_bpmp_is_request_channel_free(channel)) {
  216. err = -EBUSY;
  217. goto unlock;
  218. }
  219. set_bit(index, bpmp->threaded.allocated);
  220. err = __tegra_bpmp_channel_write(channel, mrq, MSG_ACK | MSG_RING,
  221. data, size);
  222. if (err < 0)
  223. goto clear_allocated;
  224. set_bit(index, bpmp->threaded.busy);
  225. spin_unlock_irqrestore(&bpmp->lock, flags);
  226. return channel;
  227. clear_allocated:
  228. clear_bit(index, bpmp->threaded.allocated);
  229. unlock:
  230. spin_unlock_irqrestore(&bpmp->lock, flags);
  231. up(&bpmp->threaded.lock);
  232. return ERR_PTR(err);
  233. }
  234. static ssize_t tegra_bpmp_channel_write(struct tegra_bpmp_channel *channel,
  235. unsigned int mrq, unsigned long flags,
  236. const void *data, size_t size)
  237. {
  238. int err;
  239. err = tegra_bpmp_wait_request_channel_free(channel);
  240. if (err < 0)
  241. return err;
  242. return __tegra_bpmp_channel_write(channel, mrq, flags, data, size);
  243. }
  244. int tegra_bpmp_transfer_atomic(struct tegra_bpmp *bpmp,
  245. struct tegra_bpmp_message *msg)
  246. {
  247. struct tegra_bpmp_channel *channel;
  248. int err;
  249. if (WARN_ON(!irqs_disabled()))
  250. return -EPERM;
  251. if (!tegra_bpmp_message_valid(msg))
  252. return -EINVAL;
  253. channel = bpmp->tx_channel;
  254. spin_lock(&bpmp->atomic_tx_lock);
  255. err = tegra_bpmp_channel_write(channel, msg->mrq, MSG_ACK,
  256. msg->tx.data, msg->tx.size);
  257. if (err < 0) {
  258. spin_unlock(&bpmp->atomic_tx_lock);
  259. return err;
  260. }
  261. spin_unlock(&bpmp->atomic_tx_lock);
  262. err = tegra_bpmp_ring_doorbell(bpmp);
  263. if (err < 0)
  264. return err;
  265. err = tegra_bpmp_wait_response(channel);
  266. if (err < 0)
  267. return err;
  268. return __tegra_bpmp_channel_read(channel, msg->rx.data, msg->rx.size,
  269. &msg->rx.ret);
  270. }
  271. EXPORT_SYMBOL_GPL(tegra_bpmp_transfer_atomic);
  272. int tegra_bpmp_transfer(struct tegra_bpmp *bpmp,
  273. struct tegra_bpmp_message *msg)
  274. {
  275. struct tegra_bpmp_channel *channel;
  276. unsigned long timeout;
  277. int err;
  278. if (WARN_ON(irqs_disabled()))
  279. return -EPERM;
  280. if (!tegra_bpmp_message_valid(msg))
  281. return -EINVAL;
  282. channel = tegra_bpmp_write_threaded(bpmp, msg->mrq, msg->tx.data,
  283. msg->tx.size);
  284. if (IS_ERR(channel))
  285. return PTR_ERR(channel);
  286. err = tegra_bpmp_ring_doorbell(bpmp);
  287. if (err < 0)
  288. return err;
  289. timeout = usecs_to_jiffies(bpmp->soc->channels.thread.timeout);
  290. err = wait_for_completion_timeout(&channel->completion, timeout);
  291. if (err == 0)
  292. return -ETIMEDOUT;
  293. return tegra_bpmp_channel_read(channel, msg->rx.data, msg->rx.size,
  294. &msg->rx.ret);
  295. }
  296. EXPORT_SYMBOL_GPL(tegra_bpmp_transfer);
  297. static struct tegra_bpmp_mrq *tegra_bpmp_find_mrq(struct tegra_bpmp *bpmp,
  298. unsigned int mrq)
  299. {
  300. struct tegra_bpmp_mrq *entry;
  301. list_for_each_entry(entry, &bpmp->mrqs, list)
  302. if (entry->mrq == mrq)
  303. return entry;
  304. return NULL;
  305. }
  306. void tegra_bpmp_mrq_return(struct tegra_bpmp_channel *channel, int code,
  307. const void *data, size_t size)
  308. {
  309. unsigned long flags = channel->ib->flags;
  310. struct tegra_bpmp *bpmp = channel->bpmp;
  311. int err;
  312. if (WARN_ON(size > MSG_DATA_MIN_SZ))
  313. return;
  314. err = tegra_bpmp_ack_request(channel);
  315. if (WARN_ON(err < 0))
  316. return;
  317. if ((flags & MSG_ACK) == 0)
  318. return;
  319. if (WARN_ON(!tegra_bpmp_is_response_channel_free(channel)))
  320. return;
  321. channel->ob->code = code;
  322. if (data && size > 0)
  323. memcpy_toio(channel->ob->data, data, size);
  324. err = tegra_bpmp_post_response(channel);
  325. if (WARN_ON(err < 0))
  326. return;
  327. if (flags & MSG_RING) {
  328. err = tegra_bpmp_ring_doorbell(bpmp);
  329. if (WARN_ON(err < 0))
  330. return;
  331. }
  332. }
  333. EXPORT_SYMBOL_GPL(tegra_bpmp_mrq_return);
  334. static void tegra_bpmp_handle_mrq(struct tegra_bpmp *bpmp,
  335. unsigned int mrq,
  336. struct tegra_bpmp_channel *channel)
  337. {
  338. struct tegra_bpmp_mrq *entry;
  339. u32 zero = 0;
  340. spin_lock(&bpmp->lock);
  341. entry = tegra_bpmp_find_mrq(bpmp, mrq);
  342. if (!entry) {
  343. spin_unlock(&bpmp->lock);
  344. tegra_bpmp_mrq_return(channel, -EINVAL, &zero, sizeof(zero));
  345. return;
  346. }
  347. entry->handler(mrq, channel, entry->data);
  348. spin_unlock(&bpmp->lock);
  349. }
  350. int tegra_bpmp_request_mrq(struct tegra_bpmp *bpmp, unsigned int mrq,
  351. tegra_bpmp_mrq_handler_t handler, void *data)
  352. {
  353. struct tegra_bpmp_mrq *entry;
  354. unsigned long flags;
  355. if (!handler)
  356. return -EINVAL;
  357. entry = devm_kzalloc(bpmp->dev, sizeof(*entry), GFP_KERNEL);
  358. if (!entry)
  359. return -ENOMEM;
  360. spin_lock_irqsave(&bpmp->lock, flags);
  361. entry->mrq = mrq;
  362. entry->handler = handler;
  363. entry->data = data;
  364. list_add(&entry->list, &bpmp->mrqs);
  365. spin_unlock_irqrestore(&bpmp->lock, flags);
  366. return 0;
  367. }
  368. EXPORT_SYMBOL_GPL(tegra_bpmp_request_mrq);
  369. void tegra_bpmp_free_mrq(struct tegra_bpmp *bpmp, unsigned int mrq, void *data)
  370. {
  371. struct tegra_bpmp_mrq *entry;
  372. unsigned long flags;
  373. spin_lock_irqsave(&bpmp->lock, flags);
  374. entry = tegra_bpmp_find_mrq(bpmp, mrq);
  375. if (!entry)
  376. goto unlock;
  377. list_del(&entry->list);
  378. devm_kfree(bpmp->dev, entry);
  379. unlock:
  380. spin_unlock_irqrestore(&bpmp->lock, flags);
  381. }
  382. EXPORT_SYMBOL_GPL(tegra_bpmp_free_mrq);
  383. bool tegra_bpmp_mrq_is_supported(struct tegra_bpmp *bpmp, unsigned int mrq)
  384. {
  385. struct mrq_query_abi_request req = { .mrq = cpu_to_le32(mrq) };
  386. struct mrq_query_abi_response resp;
  387. struct tegra_bpmp_message msg = {
  388. .mrq = MRQ_QUERY_ABI,
  389. .tx = {
  390. .data = &req,
  391. .size = sizeof(req),
  392. },
  393. .rx = {
  394. .data = &resp,
  395. .size = sizeof(resp),
  396. },
  397. };
  398. int err;
  399. err = tegra_bpmp_transfer(bpmp, &msg);
  400. if (err || msg.rx.ret)
  401. return false;
  402. return resp.status == 0;
  403. }
  404. EXPORT_SYMBOL_GPL(tegra_bpmp_mrq_is_supported);
  405. static void tegra_bpmp_mrq_handle_ping(unsigned int mrq,
  406. struct tegra_bpmp_channel *channel,
  407. void *data)
  408. {
  409. struct mrq_ping_request *request;
  410. struct mrq_ping_response response;
  411. request = (struct mrq_ping_request *)channel->ib->data;
  412. memset(&response, 0, sizeof(response));
  413. response.reply = request->challenge << 1;
  414. tegra_bpmp_mrq_return(channel, 0, &response, sizeof(response));
  415. }
  416. static int tegra_bpmp_ping(struct tegra_bpmp *bpmp)
  417. {
  418. struct mrq_ping_response response;
  419. struct mrq_ping_request request;
  420. struct tegra_bpmp_message msg;
  421. unsigned long flags;
  422. ktime_t start, end;
  423. int err;
  424. memset(&request, 0, sizeof(request));
  425. request.challenge = 1;
  426. memset(&response, 0, sizeof(response));
  427. memset(&msg, 0, sizeof(msg));
  428. msg.mrq = MRQ_PING;
  429. msg.tx.data = &request;
  430. msg.tx.size = sizeof(request);
  431. msg.rx.data = &response;
  432. msg.rx.size = sizeof(response);
  433. local_irq_save(flags);
  434. start = ktime_get();
  435. err = tegra_bpmp_transfer_atomic(bpmp, &msg);
  436. end = ktime_get();
  437. local_irq_restore(flags);
  438. if (!err)
  439. dev_dbg(bpmp->dev,
  440. "ping ok: challenge: %u, response: %u, time: %lld\n",
  441. request.challenge, response.reply,
  442. ktime_to_us(ktime_sub(end, start)));
  443. return err;
  444. }
  445. /* deprecated version of tag query */
  446. static int tegra_bpmp_get_firmware_tag_old(struct tegra_bpmp *bpmp, char *tag,
  447. size_t size)
  448. {
  449. struct mrq_query_tag_request request;
  450. struct tegra_bpmp_message msg;
  451. unsigned long flags;
  452. dma_addr_t phys;
  453. void *virt;
  454. int err;
  455. if (size != TAG_SZ)
  456. return -EINVAL;
  457. virt = dma_alloc_coherent(bpmp->dev, TAG_SZ, &phys,
  458. GFP_KERNEL | GFP_DMA32);
  459. if (!virt)
  460. return -ENOMEM;
  461. memset(&request, 0, sizeof(request));
  462. request.addr = phys;
  463. memset(&msg, 0, sizeof(msg));
  464. msg.mrq = MRQ_QUERY_TAG;
  465. msg.tx.data = &request;
  466. msg.tx.size = sizeof(request);
  467. local_irq_save(flags);
  468. err = tegra_bpmp_transfer_atomic(bpmp, &msg);
  469. local_irq_restore(flags);
  470. if (err == 0)
  471. memcpy(tag, virt, TAG_SZ);
  472. dma_free_coherent(bpmp->dev, TAG_SZ, virt, phys);
  473. return err;
  474. }
  475. static int tegra_bpmp_get_firmware_tag(struct tegra_bpmp *bpmp, char *tag,
  476. size_t size)
  477. {
  478. if (tegra_bpmp_mrq_is_supported(bpmp, MRQ_QUERY_FW_TAG)) {
  479. struct mrq_query_fw_tag_response resp;
  480. struct tegra_bpmp_message msg = {
  481. .mrq = MRQ_QUERY_FW_TAG,
  482. .rx = {
  483. .data = &resp,
  484. .size = sizeof(resp),
  485. },
  486. };
  487. int err;
  488. if (size != sizeof(resp.tag))
  489. return -EINVAL;
  490. err = tegra_bpmp_transfer(bpmp, &msg);
  491. if (err)
  492. return err;
  493. if (msg.rx.ret < 0)
  494. return -EINVAL;
  495. memcpy(tag, resp.tag, sizeof(resp.tag));
  496. return 0;
  497. }
  498. return tegra_bpmp_get_firmware_tag_old(bpmp, tag, size);
  499. }
  500. static void tegra_bpmp_channel_signal(struct tegra_bpmp_channel *channel)
  501. {
  502. unsigned long flags = channel->ob->flags;
  503. if ((flags & MSG_RING) == 0)
  504. return;
  505. complete(&channel->completion);
  506. }
  507. void tegra_bpmp_handle_rx(struct tegra_bpmp *bpmp)
  508. {
  509. struct tegra_bpmp_channel *channel;
  510. unsigned int i, count;
  511. unsigned long *busy;
  512. channel = bpmp->rx_channel;
  513. count = bpmp->soc->channels.thread.count;
  514. busy = bpmp->threaded.busy;
  515. if (tegra_bpmp_is_request_ready(channel))
  516. tegra_bpmp_handle_mrq(bpmp, channel->ib->code, channel);
  517. spin_lock(&bpmp->lock);
  518. for_each_set_bit(i, busy, count) {
  519. struct tegra_bpmp_channel *channel;
  520. channel = &bpmp->threaded_channels[i];
  521. if (tegra_bpmp_is_response_ready(channel)) {
  522. tegra_bpmp_channel_signal(channel);
  523. clear_bit(i, busy);
  524. }
  525. }
  526. spin_unlock(&bpmp->lock);
  527. }
  528. static int tegra_bpmp_probe(struct platform_device *pdev)
  529. {
  530. struct tegra_bpmp *bpmp;
  531. char tag[TAG_SZ];
  532. size_t size;
  533. int err;
  534. bpmp = devm_kzalloc(&pdev->dev, sizeof(*bpmp), GFP_KERNEL);
  535. if (!bpmp)
  536. return -ENOMEM;
  537. bpmp->soc = of_device_get_match_data(&pdev->dev);
  538. bpmp->dev = &pdev->dev;
  539. INIT_LIST_HEAD(&bpmp->mrqs);
  540. spin_lock_init(&bpmp->lock);
  541. bpmp->threaded.count = bpmp->soc->channels.thread.count;
  542. sema_init(&bpmp->threaded.lock, bpmp->threaded.count);
  543. size = BITS_TO_LONGS(bpmp->threaded.count) * sizeof(long);
  544. bpmp->threaded.allocated = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
  545. if (!bpmp->threaded.allocated)
  546. return -ENOMEM;
  547. bpmp->threaded.busy = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
  548. if (!bpmp->threaded.busy)
  549. return -ENOMEM;
  550. spin_lock_init(&bpmp->atomic_tx_lock);
  551. bpmp->tx_channel = devm_kzalloc(&pdev->dev, sizeof(*bpmp->tx_channel),
  552. GFP_KERNEL);
  553. if (!bpmp->tx_channel)
  554. return -ENOMEM;
  555. bpmp->rx_channel = devm_kzalloc(&pdev->dev, sizeof(*bpmp->rx_channel),
  556. GFP_KERNEL);
  557. if (!bpmp->rx_channel)
  558. return -ENOMEM;
  559. bpmp->threaded_channels = devm_kcalloc(&pdev->dev, bpmp->threaded.count,
  560. sizeof(*bpmp->threaded_channels),
  561. GFP_KERNEL);
  562. if (!bpmp->threaded_channels)
  563. return -ENOMEM;
  564. err = bpmp->soc->ops->init(bpmp);
  565. if (err < 0)
  566. return err;
  567. err = tegra_bpmp_request_mrq(bpmp, MRQ_PING,
  568. tegra_bpmp_mrq_handle_ping, bpmp);
  569. if (err < 0)
  570. goto deinit;
  571. err = tegra_bpmp_ping(bpmp);
  572. if (err < 0) {
  573. dev_err(&pdev->dev, "failed to ping BPMP: %d\n", err);
  574. goto free_mrq;
  575. }
  576. err = tegra_bpmp_get_firmware_tag(bpmp, tag, sizeof(tag));
  577. if (err < 0) {
  578. dev_err(&pdev->dev, "failed to get firmware tag: %d\n", err);
  579. goto free_mrq;
  580. }
  581. dev_info(&pdev->dev, "firmware: %.*s\n", (int)sizeof(tag), tag);
  582. platform_set_drvdata(pdev, bpmp);
  583. err = of_platform_default_populate(pdev->dev.of_node, NULL, &pdev->dev);
  584. if (err < 0)
  585. goto free_mrq;
  586. if (of_find_property(pdev->dev.of_node, "#clock-cells", NULL)) {
  587. err = tegra_bpmp_init_clocks(bpmp);
  588. if (err < 0)
  589. goto free_mrq;
  590. }
  591. if (of_find_property(pdev->dev.of_node, "#reset-cells", NULL)) {
  592. err = tegra_bpmp_init_resets(bpmp);
  593. if (err < 0)
  594. goto free_mrq;
  595. }
  596. if (of_find_property(pdev->dev.of_node, "#power-domain-cells", NULL)) {
  597. err = tegra_bpmp_init_powergates(bpmp);
  598. if (err < 0)
  599. goto free_mrq;
  600. }
  601. err = tegra_bpmp_init_debugfs(bpmp);
  602. if (err < 0)
  603. dev_err(&pdev->dev, "debugfs initialization failed: %d\n", err);
  604. return 0;
  605. free_mrq:
  606. tegra_bpmp_free_mrq(bpmp, MRQ_PING, bpmp);
  607. deinit:
  608. if (bpmp->soc->ops->deinit)
  609. bpmp->soc->ops->deinit(bpmp);
  610. return err;
  611. }
  612. static int __maybe_unused tegra_bpmp_resume(struct device *dev)
  613. {
  614. struct tegra_bpmp *bpmp = dev_get_drvdata(dev);
  615. if (bpmp->soc->ops->resume)
  616. return bpmp->soc->ops->resume(bpmp);
  617. else
  618. return 0;
  619. }
  620. static const struct dev_pm_ops tegra_bpmp_pm_ops = {
  621. .resume_noirq = tegra_bpmp_resume,
  622. };
  623. #if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) || \
  624. IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC) || \
  625. IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC)
  626. static const struct tegra_bpmp_soc tegra186_soc = {
  627. .channels = {
  628. .cpu_tx = {
  629. .offset = 3,
  630. .timeout = 60 * USEC_PER_SEC,
  631. },
  632. .thread = {
  633. .offset = 0,
  634. .count = 3,
  635. .timeout = 600 * USEC_PER_SEC,
  636. },
  637. .cpu_rx = {
  638. .offset = 13,
  639. .timeout = 0,
  640. },
  641. },
  642. .ops = &tegra186_bpmp_ops,
  643. .num_resets = 193,
  644. };
  645. #endif
  646. #if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
  647. static const struct tegra_bpmp_soc tegra210_soc = {
  648. .channels = {
  649. .cpu_tx = {
  650. .offset = 0,
  651. .count = 1,
  652. .timeout = 60 * USEC_PER_SEC,
  653. },
  654. .thread = {
  655. .offset = 4,
  656. .count = 1,
  657. .timeout = 600 * USEC_PER_SEC,
  658. },
  659. .cpu_rx = {
  660. .offset = 8,
  661. .count = 1,
  662. .timeout = 0,
  663. },
  664. },
  665. .ops = &tegra210_bpmp_ops,
  666. };
  667. #endif
  668. static const struct of_device_id tegra_bpmp_match[] = {
  669. #if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) || \
  670. IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC) || \
  671. IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC)
  672. { .compatible = "nvidia,tegra186-bpmp", .data = &tegra186_soc },
  673. #endif
  674. #if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
  675. { .compatible = "nvidia,tegra210-bpmp", .data = &tegra210_soc },
  676. #endif
  677. { }
  678. };
  679. static struct platform_driver tegra_bpmp_driver = {
  680. .driver = {
  681. .name = "tegra-bpmp",
  682. .of_match_table = tegra_bpmp_match,
  683. .pm = &tegra_bpmp_pm_ops,
  684. .suppress_bind_attrs = true,
  685. },
  686. .probe = tegra_bpmp_probe,
  687. };
  688. builtin_platform_driver(tegra_bpmp_driver);