clock.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * System Control and Management Interface (SCMI) Clock Protocol
  4. *
  5. * Copyright (C) 2018-2022 ARM Ltd.
  6. */
  7. #include <linux/module.h>
  8. #include <linux/limits.h>
  9. #include <linux/sort.h>
  10. #include "protocols.h"
  11. #include "notify.h"
  12. enum scmi_clock_protocol_cmd {
  13. CLOCK_ATTRIBUTES = 0x3,
  14. CLOCK_DESCRIBE_RATES = 0x4,
  15. CLOCK_RATE_SET = 0x5,
  16. CLOCK_RATE_GET = 0x6,
  17. CLOCK_CONFIG_SET = 0x7,
  18. CLOCK_NAME_GET = 0x8,
  19. CLOCK_RATE_NOTIFY = 0x9,
  20. CLOCK_RATE_CHANGE_REQUESTED_NOTIFY = 0xA,
  21. };
  22. struct scmi_msg_resp_clock_protocol_attributes {
  23. __le16 num_clocks;
  24. u8 max_async_req;
  25. u8 reserved;
  26. };
  27. struct scmi_msg_resp_clock_attributes {
  28. __le32 attributes;
  29. #define CLOCK_ENABLE BIT(0)
  30. #define SUPPORTS_RATE_CHANGED_NOTIF(x) ((x) & BIT(31))
  31. #define SUPPORTS_RATE_CHANGE_REQUESTED_NOTIF(x) ((x) & BIT(30))
  32. #define SUPPORTS_EXTENDED_NAMES(x) ((x) & BIT(29))
  33. u8 name[SCMI_SHORT_NAME_MAX_SIZE];
  34. __le32 clock_enable_latency;
  35. };
  36. struct scmi_clock_set_config {
  37. __le32 id;
  38. __le32 attributes;
  39. };
  40. struct scmi_msg_clock_describe_rates {
  41. __le32 id;
  42. __le32 rate_index;
  43. };
  44. struct scmi_msg_resp_clock_describe_rates {
  45. __le32 num_rates_flags;
  46. #define NUM_RETURNED(x) ((x) & 0xfff)
  47. #define RATE_DISCRETE(x) !((x) & BIT(12))
  48. #define NUM_REMAINING(x) ((x) >> 16)
  49. struct {
  50. __le32 value_low;
  51. __le32 value_high;
  52. } rate[];
  53. #define RATE_TO_U64(X) \
  54. ({ \
  55. typeof(X) x = (X); \
  56. le32_to_cpu((x).value_low) | (u64)le32_to_cpu((x).value_high) << 32; \
  57. })
  58. };
  59. struct scmi_clock_set_rate {
  60. __le32 flags;
  61. #define CLOCK_SET_ASYNC BIT(0)
  62. #define CLOCK_SET_IGNORE_RESP BIT(1)
  63. #define CLOCK_SET_ROUND_UP BIT(2)
  64. #define CLOCK_SET_ROUND_AUTO BIT(3)
  65. __le32 id;
  66. __le32 value_low;
  67. __le32 value_high;
  68. };
  69. struct scmi_msg_resp_set_rate_complete {
  70. __le32 id;
  71. __le32 rate_low;
  72. __le32 rate_high;
  73. };
  74. struct scmi_msg_clock_rate_notify {
  75. __le32 clk_id;
  76. __le32 notify_enable;
  77. };
  78. struct scmi_clock_rate_notify_payld {
  79. __le32 agent_id;
  80. __le32 clock_id;
  81. __le32 rate_low;
  82. __le32 rate_high;
  83. };
  84. struct clock_info {
  85. u32 version;
  86. int num_clocks;
  87. int max_async_req;
  88. atomic_t cur_async_req;
  89. struct scmi_clock_info *clk;
  90. };
  91. static enum scmi_clock_protocol_cmd evt_2_cmd[] = {
  92. CLOCK_RATE_NOTIFY,
  93. CLOCK_RATE_CHANGE_REQUESTED_NOTIFY,
  94. };
  95. static int
  96. scmi_clock_protocol_attributes_get(const struct scmi_protocol_handle *ph,
  97. struct clock_info *ci)
  98. {
  99. int ret;
  100. struct scmi_xfer *t;
  101. struct scmi_msg_resp_clock_protocol_attributes *attr;
  102. ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES,
  103. 0, sizeof(*attr), &t);
  104. if (ret)
  105. return ret;
  106. attr = t->rx.buf;
  107. ret = ph->xops->do_xfer(ph, t);
  108. if (!ret) {
  109. ci->num_clocks = le16_to_cpu(attr->num_clocks);
  110. ci->max_async_req = attr->max_async_req;
  111. }
  112. ph->xops->xfer_put(ph, t);
  113. return ret;
  114. }
  115. static int scmi_clock_attributes_get(const struct scmi_protocol_handle *ph,
  116. u32 clk_id, struct scmi_clock_info *clk,
  117. u32 version)
  118. {
  119. int ret;
  120. u32 attributes;
  121. struct scmi_xfer *t;
  122. struct scmi_msg_resp_clock_attributes *attr;
  123. ret = ph->xops->xfer_get_init(ph, CLOCK_ATTRIBUTES,
  124. sizeof(clk_id), sizeof(*attr), &t);
  125. if (ret)
  126. return ret;
  127. put_unaligned_le32(clk_id, t->tx.buf);
  128. attr = t->rx.buf;
  129. ret = ph->xops->do_xfer(ph, t);
  130. if (!ret) {
  131. u32 latency = 0;
  132. attributes = le32_to_cpu(attr->attributes);
  133. strscpy(clk->name, attr->name, SCMI_SHORT_NAME_MAX_SIZE);
  134. /* clock_enable_latency field is present only since SCMI v3.1 */
  135. if (PROTOCOL_REV_MAJOR(version) >= 0x2)
  136. latency = le32_to_cpu(attr->clock_enable_latency);
  137. clk->enable_latency = latency ? : U32_MAX;
  138. }
  139. ph->xops->xfer_put(ph, t);
  140. /*
  141. * If supported overwrite short name with the extended one;
  142. * on error just carry on and use already provided short name.
  143. */
  144. if (!ret && PROTOCOL_REV_MAJOR(version) >= 0x2) {
  145. if (SUPPORTS_EXTENDED_NAMES(attributes))
  146. ph->hops->extended_name_get(ph, CLOCK_NAME_GET, clk_id,
  147. clk->name,
  148. SCMI_MAX_STR_SIZE);
  149. if (SUPPORTS_RATE_CHANGED_NOTIF(attributes))
  150. clk->rate_changed_notifications = true;
  151. if (SUPPORTS_RATE_CHANGE_REQUESTED_NOTIF(attributes))
  152. clk->rate_change_requested_notifications = true;
  153. }
  154. return ret;
  155. }
  156. static int rate_cmp_func(const void *_r1, const void *_r2)
  157. {
  158. const u64 *r1 = _r1, *r2 = _r2;
  159. if (*r1 < *r2)
  160. return -1;
  161. else if (*r1 == *r2)
  162. return 0;
  163. else
  164. return 1;
  165. }
  166. struct scmi_clk_ipriv {
  167. struct device *dev;
  168. u32 clk_id;
  169. struct scmi_clock_info *clk;
  170. };
  171. static void iter_clk_describe_prepare_message(void *message,
  172. const unsigned int desc_index,
  173. const void *priv)
  174. {
  175. struct scmi_msg_clock_describe_rates *msg = message;
  176. const struct scmi_clk_ipriv *p = priv;
  177. msg->id = cpu_to_le32(p->clk_id);
  178. /* Set the number of rates to be skipped/already read */
  179. msg->rate_index = cpu_to_le32(desc_index);
  180. }
  181. static int
  182. iter_clk_describe_update_state(struct scmi_iterator_state *st,
  183. const void *response, void *priv)
  184. {
  185. u32 flags;
  186. struct scmi_clk_ipriv *p = priv;
  187. const struct scmi_msg_resp_clock_describe_rates *r = response;
  188. flags = le32_to_cpu(r->num_rates_flags);
  189. st->num_remaining = NUM_REMAINING(flags);
  190. st->num_returned = NUM_RETURNED(flags);
  191. p->clk->rate_discrete = RATE_DISCRETE(flags);
  192. /* Warn about out of spec replies ... */
  193. if (!p->clk->rate_discrete &&
  194. (st->num_returned != 3 || st->num_remaining != 0)) {
  195. dev_warn(p->dev,
  196. "Out-of-spec CLOCK_DESCRIBE_RATES reply for %s - returned:%d remaining:%d rx_len:%zd\n",
  197. p->clk->name, st->num_returned, st->num_remaining,
  198. st->rx_len);
  199. /*
  200. * A known quirk: a triplet is returned but num_returned != 3
  201. * Check for a safe payload size and fix.
  202. */
  203. if (st->num_returned != 3 && st->num_remaining == 0 &&
  204. st->rx_len == sizeof(*r) + sizeof(__le32) * 2 * 3) {
  205. st->num_returned = 3;
  206. st->num_remaining = 0;
  207. } else {
  208. dev_err(p->dev,
  209. "Cannot fix out-of-spec reply !\n");
  210. return -EPROTO;
  211. }
  212. }
  213. return 0;
  214. }
  215. static int
  216. iter_clk_describe_process_response(const struct scmi_protocol_handle *ph,
  217. const void *response,
  218. struct scmi_iterator_state *st, void *priv)
  219. {
  220. int ret = 0;
  221. struct scmi_clk_ipriv *p = priv;
  222. const struct scmi_msg_resp_clock_describe_rates *r = response;
  223. if (!p->clk->rate_discrete) {
  224. switch (st->desc_index + st->loop_idx) {
  225. case 0:
  226. p->clk->range.min_rate = RATE_TO_U64(r->rate[0]);
  227. break;
  228. case 1:
  229. p->clk->range.max_rate = RATE_TO_U64(r->rate[1]);
  230. break;
  231. case 2:
  232. p->clk->range.step_size = RATE_TO_U64(r->rate[2]);
  233. break;
  234. default:
  235. ret = -EINVAL;
  236. break;
  237. }
  238. } else {
  239. u64 *rate = &p->clk->list.rates[st->desc_index + st->loop_idx];
  240. *rate = RATE_TO_U64(r->rate[st->loop_idx]);
  241. p->clk->list.num_rates++;
  242. }
  243. return ret;
  244. }
  245. static int
  246. scmi_clock_describe_rates_get(const struct scmi_protocol_handle *ph, u32 clk_id,
  247. struct scmi_clock_info *clk)
  248. {
  249. int ret;
  250. void *iter;
  251. struct scmi_iterator_ops ops = {
  252. .prepare_message = iter_clk_describe_prepare_message,
  253. .update_state = iter_clk_describe_update_state,
  254. .process_response = iter_clk_describe_process_response,
  255. };
  256. struct scmi_clk_ipriv cpriv = {
  257. .clk_id = clk_id,
  258. .clk = clk,
  259. .dev = ph->dev,
  260. };
  261. iter = ph->hops->iter_response_init(ph, &ops, SCMI_MAX_NUM_RATES,
  262. CLOCK_DESCRIBE_RATES,
  263. sizeof(struct scmi_msg_clock_describe_rates),
  264. &cpriv);
  265. if (IS_ERR(iter))
  266. return PTR_ERR(iter);
  267. ret = ph->hops->iter_response_run(iter);
  268. if (ret)
  269. return ret;
  270. if (!clk->rate_discrete) {
  271. dev_dbg(ph->dev, "Min %llu Max %llu Step %llu Hz\n",
  272. clk->range.min_rate, clk->range.max_rate,
  273. clk->range.step_size);
  274. } else if (clk->list.num_rates) {
  275. sort(clk->list.rates, clk->list.num_rates,
  276. sizeof(clk->list.rates[0]), rate_cmp_func, NULL);
  277. }
  278. return ret;
  279. }
  280. static int
  281. scmi_clock_rate_get(const struct scmi_protocol_handle *ph,
  282. u32 clk_id, u64 *value)
  283. {
  284. int ret;
  285. struct scmi_xfer *t;
  286. ret = ph->xops->xfer_get_init(ph, CLOCK_RATE_GET,
  287. sizeof(__le32), sizeof(u64), &t);
  288. if (ret)
  289. return ret;
  290. put_unaligned_le32(clk_id, t->tx.buf);
  291. ret = ph->xops->do_xfer(ph, t);
  292. if (!ret)
  293. *value = get_unaligned_le64(t->rx.buf);
  294. ph->xops->xfer_put(ph, t);
  295. return ret;
  296. }
  297. static int scmi_clock_rate_set(const struct scmi_protocol_handle *ph,
  298. u32 clk_id, u64 rate)
  299. {
  300. int ret;
  301. u32 flags = 0;
  302. struct scmi_xfer *t;
  303. struct scmi_clock_set_rate *cfg;
  304. struct clock_info *ci = ph->get_priv(ph);
  305. ret = ph->xops->xfer_get_init(ph, CLOCK_RATE_SET, sizeof(*cfg), 0, &t);
  306. if (ret)
  307. return ret;
  308. if (ci->max_async_req &&
  309. atomic_inc_return(&ci->cur_async_req) < ci->max_async_req)
  310. flags |= CLOCK_SET_ASYNC;
  311. cfg = t->tx.buf;
  312. cfg->flags = cpu_to_le32(flags);
  313. cfg->id = cpu_to_le32(clk_id);
  314. cfg->value_low = cpu_to_le32(rate & 0xffffffff);
  315. cfg->value_high = cpu_to_le32(rate >> 32);
  316. if (flags & CLOCK_SET_ASYNC) {
  317. ret = ph->xops->do_xfer_with_response(ph, t);
  318. if (!ret) {
  319. struct scmi_msg_resp_set_rate_complete *resp;
  320. resp = t->rx.buf;
  321. if (le32_to_cpu(resp->id) == clk_id)
  322. dev_dbg(ph->dev,
  323. "Clk ID %d set async to %llu\n", clk_id,
  324. get_unaligned_le64(&resp->rate_low));
  325. else
  326. ret = -EPROTO;
  327. }
  328. } else {
  329. ret = ph->xops->do_xfer(ph, t);
  330. }
  331. if (ci->max_async_req)
  332. atomic_dec(&ci->cur_async_req);
  333. ph->xops->xfer_put(ph, t);
  334. return ret;
  335. }
  336. static int
  337. scmi_clock_config_set(const struct scmi_protocol_handle *ph, u32 clk_id,
  338. u32 config, bool atomic)
  339. {
  340. int ret;
  341. struct scmi_xfer *t;
  342. struct scmi_clock_set_config *cfg;
  343. ret = ph->xops->xfer_get_init(ph, CLOCK_CONFIG_SET,
  344. sizeof(*cfg), 0, &t);
  345. if (ret)
  346. return ret;
  347. t->hdr.poll_completion = atomic;
  348. cfg = t->tx.buf;
  349. cfg->id = cpu_to_le32(clk_id);
  350. cfg->attributes = cpu_to_le32(config);
  351. ret = ph->xops->do_xfer(ph, t);
  352. ph->xops->xfer_put(ph, t);
  353. return ret;
  354. }
  355. static int scmi_clock_enable(const struct scmi_protocol_handle *ph, u32 clk_id)
  356. {
  357. return scmi_clock_config_set(ph, clk_id, CLOCK_ENABLE, false);
  358. }
  359. static int scmi_clock_disable(const struct scmi_protocol_handle *ph, u32 clk_id)
  360. {
  361. return scmi_clock_config_set(ph, clk_id, 0, false);
  362. }
  363. static int scmi_clock_enable_atomic(const struct scmi_protocol_handle *ph,
  364. u32 clk_id)
  365. {
  366. return scmi_clock_config_set(ph, clk_id, CLOCK_ENABLE, true);
  367. }
  368. static int scmi_clock_disable_atomic(const struct scmi_protocol_handle *ph,
  369. u32 clk_id)
  370. {
  371. return scmi_clock_config_set(ph, clk_id, 0, true);
  372. }
  373. static int scmi_clock_count_get(const struct scmi_protocol_handle *ph)
  374. {
  375. struct clock_info *ci = ph->get_priv(ph);
  376. return ci->num_clocks;
  377. }
  378. static const struct scmi_clock_info *
  379. scmi_clock_info_get(const struct scmi_protocol_handle *ph, u32 clk_id)
  380. {
  381. struct scmi_clock_info *clk;
  382. struct clock_info *ci = ph->get_priv(ph);
  383. if (clk_id >= ci->num_clocks)
  384. return NULL;
  385. clk = ci->clk + clk_id;
  386. if (!clk->name[0])
  387. return NULL;
  388. return clk;
  389. }
  390. static const struct scmi_clk_proto_ops clk_proto_ops = {
  391. .count_get = scmi_clock_count_get,
  392. .info_get = scmi_clock_info_get,
  393. .rate_get = scmi_clock_rate_get,
  394. .rate_set = scmi_clock_rate_set,
  395. .enable = scmi_clock_enable,
  396. .disable = scmi_clock_disable,
  397. .enable_atomic = scmi_clock_enable_atomic,
  398. .disable_atomic = scmi_clock_disable_atomic,
  399. };
  400. static int scmi_clk_rate_notify(const struct scmi_protocol_handle *ph,
  401. u32 clk_id, int message_id, bool enable)
  402. {
  403. int ret;
  404. struct scmi_xfer *t;
  405. struct scmi_msg_clock_rate_notify *notify;
  406. ret = ph->xops->xfer_get_init(ph, message_id, sizeof(*notify), 0, &t);
  407. if (ret)
  408. return ret;
  409. notify = t->tx.buf;
  410. notify->clk_id = cpu_to_le32(clk_id);
  411. notify->notify_enable = enable ? cpu_to_le32(BIT(0)) : 0;
  412. ret = ph->xops->do_xfer(ph, t);
  413. ph->xops->xfer_put(ph, t);
  414. return ret;
  415. }
  416. static int scmi_clk_set_notify_enabled(const struct scmi_protocol_handle *ph,
  417. u8 evt_id, u32 src_id, bool enable)
  418. {
  419. int ret, cmd_id;
  420. if (evt_id >= ARRAY_SIZE(evt_2_cmd))
  421. return -EINVAL;
  422. cmd_id = evt_2_cmd[evt_id];
  423. ret = scmi_clk_rate_notify(ph, src_id, cmd_id, enable);
  424. if (ret)
  425. pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n",
  426. evt_id, src_id, ret);
  427. return ret;
  428. }
  429. static void *scmi_clk_fill_custom_report(const struct scmi_protocol_handle *ph,
  430. u8 evt_id, ktime_t timestamp,
  431. const void *payld, size_t payld_sz,
  432. void *report, u32 *src_id)
  433. {
  434. const struct scmi_clock_rate_notify_payld *p = payld;
  435. struct scmi_clock_rate_notif_report *r = report;
  436. if (sizeof(*p) != payld_sz ||
  437. (evt_id != SCMI_EVENT_CLOCK_RATE_CHANGED &&
  438. evt_id != SCMI_EVENT_CLOCK_RATE_CHANGE_REQUESTED))
  439. return NULL;
  440. r->timestamp = timestamp;
  441. r->agent_id = le32_to_cpu(p->agent_id);
  442. r->clock_id = le32_to_cpu(p->clock_id);
  443. r->rate = get_unaligned_le64(&p->rate_low);
  444. *src_id = r->clock_id;
  445. return r;
  446. }
  447. static int scmi_clk_get_num_sources(const struct scmi_protocol_handle *ph)
  448. {
  449. struct clock_info *ci = ph->get_priv(ph);
  450. if (!ci)
  451. return -EINVAL;
  452. return ci->num_clocks;
  453. }
  454. static const struct scmi_event clk_events[] = {
  455. {
  456. .id = SCMI_EVENT_CLOCK_RATE_CHANGED,
  457. .max_payld_sz = sizeof(struct scmi_clock_rate_notify_payld),
  458. .max_report_sz = sizeof(struct scmi_clock_rate_notif_report),
  459. },
  460. {
  461. .id = SCMI_EVENT_CLOCK_RATE_CHANGE_REQUESTED,
  462. .max_payld_sz = sizeof(struct scmi_clock_rate_notify_payld),
  463. .max_report_sz = sizeof(struct scmi_clock_rate_notif_report),
  464. },
  465. };
  466. static const struct scmi_event_ops clk_event_ops = {
  467. .get_num_sources = scmi_clk_get_num_sources,
  468. .set_notify_enabled = scmi_clk_set_notify_enabled,
  469. .fill_custom_report = scmi_clk_fill_custom_report,
  470. };
  471. static const struct scmi_protocol_events clk_protocol_events = {
  472. .queue_sz = SCMI_PROTO_QUEUE_SZ,
  473. .ops = &clk_event_ops,
  474. .evts = clk_events,
  475. .num_events = ARRAY_SIZE(clk_events),
  476. };
  477. static int scmi_clock_protocol_init(const struct scmi_protocol_handle *ph)
  478. {
  479. u32 version;
  480. int clkid, ret;
  481. struct clock_info *cinfo;
  482. ret = ph->xops->version_get(ph, &version);
  483. if (ret)
  484. return ret;
  485. dev_dbg(ph->dev, "Clock Version %d.%d\n",
  486. PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
  487. cinfo = devm_kzalloc(ph->dev, sizeof(*cinfo), GFP_KERNEL);
  488. if (!cinfo)
  489. return -ENOMEM;
  490. ret = scmi_clock_protocol_attributes_get(ph, cinfo);
  491. if (ret)
  492. return ret;
  493. cinfo->clk = devm_kcalloc(ph->dev, cinfo->num_clocks,
  494. sizeof(*cinfo->clk), GFP_KERNEL);
  495. if (!cinfo->clk)
  496. return -ENOMEM;
  497. for (clkid = 0; clkid < cinfo->num_clocks; clkid++) {
  498. struct scmi_clock_info *clk = cinfo->clk + clkid;
  499. ret = scmi_clock_attributes_get(ph, clkid, clk, version);
  500. if (!ret)
  501. scmi_clock_describe_rates_get(ph, clkid, clk);
  502. }
  503. cinfo->version = version;
  504. return ph->set_priv(ph, cinfo);
  505. }
  506. static const struct scmi_protocol scmi_clock = {
  507. .id = SCMI_PROTOCOL_CLOCK,
  508. .owner = THIS_MODULE,
  509. .instance_init = &scmi_clock_protocol_init,
  510. .ops = &clk_proto_ops,
  511. .events = &clk_protocol_events,
  512. };
  513. DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(clock, scmi_clock)