cpts.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * TI Common Platform Time Sync
  4. *
  5. * Copyright (C) 2012 Richard Cochran <[email protected]>
  6. *
  7. */
  8. #include <linux/clk-provider.h>
  9. #include <linux/err.h>
  10. #include <linux/if.h>
  11. #include <linux/hrtimer.h>
  12. #include <linux/module.h>
  13. #include <linux/net_tstamp.h>
  14. #include <linux/ptp_classify.h>
  15. #include <linux/time.h>
  16. #include <linux/uaccess.h>
  17. #include <linux/workqueue.h>
  18. #include <linux/if_ether.h>
  19. #include <linux/if_vlan.h>
  20. #include "cpts.h"
  21. #define CPTS_SKB_TX_WORK_TIMEOUT 1 /* jiffies */
  22. #define CPTS_SKB_RX_TX_TMO 100 /*ms */
  23. #define CPTS_EVENT_RX_TX_TIMEOUT (100) /* ms */
  24. struct cpts_skb_cb_data {
  25. u32 skb_mtype_seqid;
  26. unsigned long tmo;
  27. };
  28. #define cpts_read32(c, r) readl_relaxed(&c->reg->r)
  29. #define cpts_write32(c, v, r) writel_relaxed(v, &c->reg->r)
  30. static int cpts_event_port(struct cpts_event *event)
  31. {
  32. return (event->high >> PORT_NUMBER_SHIFT) & PORT_NUMBER_MASK;
  33. }
  34. static int event_expired(struct cpts_event *event)
  35. {
  36. return time_after(jiffies, event->tmo);
  37. }
  38. static int event_type(struct cpts_event *event)
  39. {
  40. return (event->high >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
  41. }
  42. static int cpts_fifo_pop(struct cpts *cpts, u32 *high, u32 *low)
  43. {
  44. u32 r = cpts_read32(cpts, intstat_raw);
  45. if (r & TS_PEND_RAW) {
  46. *high = cpts_read32(cpts, event_high);
  47. *low = cpts_read32(cpts, event_low);
  48. cpts_write32(cpts, EVENT_POP, event_pop);
  49. return 0;
  50. }
  51. return -1;
  52. }
  53. static int cpts_purge_events(struct cpts *cpts)
  54. {
  55. struct list_head *this, *next;
  56. struct cpts_event *event;
  57. int removed = 0;
  58. list_for_each_safe(this, next, &cpts->events) {
  59. event = list_entry(this, struct cpts_event, list);
  60. if (event_expired(event)) {
  61. list_del_init(&event->list);
  62. list_add(&event->list, &cpts->pool);
  63. ++removed;
  64. }
  65. }
  66. if (removed)
  67. dev_dbg(cpts->dev, "cpts: event pool cleaned up %d\n", removed);
  68. return removed ? 0 : -1;
  69. }
  70. static void cpts_purge_txq(struct cpts *cpts)
  71. {
  72. struct cpts_skb_cb_data *skb_cb;
  73. struct sk_buff *skb, *tmp;
  74. int removed = 0;
  75. skb_queue_walk_safe(&cpts->txq, skb, tmp) {
  76. skb_cb = (struct cpts_skb_cb_data *)skb->cb;
  77. if (time_after(jiffies, skb_cb->tmo)) {
  78. __skb_unlink(skb, &cpts->txq);
  79. dev_consume_skb_any(skb);
  80. ++removed;
  81. }
  82. }
  83. if (removed)
  84. dev_dbg(cpts->dev, "txq cleaned up %d\n", removed);
  85. }
  86. /*
  87. * Returns zero if matching event type was found.
  88. */
  89. static int cpts_fifo_read(struct cpts *cpts, int match)
  90. {
  91. struct ptp_clock_event pevent;
  92. bool need_schedule = false;
  93. struct cpts_event *event;
  94. unsigned long flags;
  95. int i, type = -1;
  96. u32 hi, lo;
  97. spin_lock_irqsave(&cpts->lock, flags);
  98. for (i = 0; i < CPTS_FIFO_DEPTH; i++) {
  99. if (cpts_fifo_pop(cpts, &hi, &lo))
  100. break;
  101. if (list_empty(&cpts->pool) && cpts_purge_events(cpts)) {
  102. dev_warn(cpts->dev, "cpts: event pool empty\n");
  103. break;
  104. }
  105. event = list_first_entry(&cpts->pool, struct cpts_event, list);
  106. event->high = hi;
  107. event->low = lo;
  108. event->timestamp = timecounter_cyc2time(&cpts->tc, event->low);
  109. type = event_type(event);
  110. dev_dbg(cpts->dev, "CPTS_EV: %d high:%08X low:%08x\n",
  111. type, event->high, event->low);
  112. switch (type) {
  113. case CPTS_EV_PUSH:
  114. WRITE_ONCE(cpts->cur_timestamp, lo);
  115. timecounter_read(&cpts->tc);
  116. if (cpts->mult_new) {
  117. cpts->cc.mult = cpts->mult_new;
  118. cpts->mult_new = 0;
  119. }
  120. if (!cpts->irq_poll)
  121. complete(&cpts->ts_push_complete);
  122. break;
  123. case CPTS_EV_TX:
  124. case CPTS_EV_RX:
  125. event->tmo = jiffies +
  126. msecs_to_jiffies(CPTS_EVENT_RX_TX_TIMEOUT);
  127. list_del_init(&event->list);
  128. list_add_tail(&event->list, &cpts->events);
  129. need_schedule = true;
  130. break;
  131. case CPTS_EV_ROLL:
  132. case CPTS_EV_HALF:
  133. break;
  134. case CPTS_EV_HW:
  135. pevent.timestamp = event->timestamp;
  136. pevent.type = PTP_CLOCK_EXTTS;
  137. pevent.index = cpts_event_port(event) - 1;
  138. ptp_clock_event(cpts->clock, &pevent);
  139. break;
  140. default:
  141. dev_err(cpts->dev, "cpts: unknown event type\n");
  142. break;
  143. }
  144. if (type == match)
  145. break;
  146. }
  147. spin_unlock_irqrestore(&cpts->lock, flags);
  148. if (!cpts->irq_poll && need_schedule)
  149. ptp_schedule_worker(cpts->clock, 0);
  150. return type == match ? 0 : -1;
  151. }
  152. void cpts_misc_interrupt(struct cpts *cpts)
  153. {
  154. cpts_fifo_read(cpts, -1);
  155. }
  156. EXPORT_SYMBOL_GPL(cpts_misc_interrupt);
  157. static u64 cpts_systim_read(const struct cyclecounter *cc)
  158. {
  159. struct cpts *cpts = container_of(cc, struct cpts, cc);
  160. return READ_ONCE(cpts->cur_timestamp);
  161. }
  162. static void cpts_update_cur_time(struct cpts *cpts, int match,
  163. struct ptp_system_timestamp *sts)
  164. {
  165. unsigned long flags;
  166. reinit_completion(&cpts->ts_push_complete);
  167. /* use spin_lock_irqsave() here as it has to run very fast */
  168. spin_lock_irqsave(&cpts->lock, flags);
  169. ptp_read_system_prets(sts);
  170. cpts_write32(cpts, TS_PUSH, ts_push);
  171. cpts_read32(cpts, ts_push);
  172. ptp_read_system_postts(sts);
  173. spin_unlock_irqrestore(&cpts->lock, flags);
  174. if (cpts->irq_poll && cpts_fifo_read(cpts, match) && match != -1)
  175. dev_err(cpts->dev, "cpts: unable to obtain a time stamp\n");
  176. if (!cpts->irq_poll &&
  177. !wait_for_completion_timeout(&cpts->ts_push_complete, HZ))
  178. dev_err(cpts->dev, "cpts: obtain a time stamp timeout\n");
  179. }
  180. /* PTP clock operations */
  181. static int cpts_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
  182. {
  183. struct cpts *cpts = container_of(ptp, struct cpts, info);
  184. int neg_adj = 0;
  185. u32 diff, mult;
  186. u64 adj;
  187. if (ppb < 0) {
  188. neg_adj = 1;
  189. ppb = -ppb;
  190. }
  191. mult = cpts->cc_mult;
  192. adj = mult;
  193. adj *= ppb;
  194. diff = div_u64(adj, 1000000000ULL);
  195. mutex_lock(&cpts->ptp_clk_mutex);
  196. cpts->mult_new = neg_adj ? mult - diff : mult + diff;
  197. cpts_update_cur_time(cpts, CPTS_EV_PUSH, NULL);
  198. mutex_unlock(&cpts->ptp_clk_mutex);
  199. return 0;
  200. }
  201. static int cpts_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
  202. {
  203. struct cpts *cpts = container_of(ptp, struct cpts, info);
  204. mutex_lock(&cpts->ptp_clk_mutex);
  205. timecounter_adjtime(&cpts->tc, delta);
  206. mutex_unlock(&cpts->ptp_clk_mutex);
  207. return 0;
  208. }
  209. static int cpts_ptp_gettimeex(struct ptp_clock_info *ptp,
  210. struct timespec64 *ts,
  211. struct ptp_system_timestamp *sts)
  212. {
  213. struct cpts *cpts = container_of(ptp, struct cpts, info);
  214. u64 ns;
  215. mutex_lock(&cpts->ptp_clk_mutex);
  216. cpts_update_cur_time(cpts, CPTS_EV_PUSH, sts);
  217. ns = timecounter_read(&cpts->tc);
  218. mutex_unlock(&cpts->ptp_clk_mutex);
  219. *ts = ns_to_timespec64(ns);
  220. return 0;
  221. }
  222. static int cpts_ptp_settime(struct ptp_clock_info *ptp,
  223. const struct timespec64 *ts)
  224. {
  225. struct cpts *cpts = container_of(ptp, struct cpts, info);
  226. u64 ns;
  227. ns = timespec64_to_ns(ts);
  228. mutex_lock(&cpts->ptp_clk_mutex);
  229. timecounter_init(&cpts->tc, &cpts->cc, ns);
  230. mutex_unlock(&cpts->ptp_clk_mutex);
  231. return 0;
  232. }
  233. static int cpts_extts_enable(struct cpts *cpts, u32 index, int on)
  234. {
  235. u32 v;
  236. if (((cpts->hw_ts_enable & BIT(index)) >> index) == on)
  237. return 0;
  238. mutex_lock(&cpts->ptp_clk_mutex);
  239. v = cpts_read32(cpts, control);
  240. if (on) {
  241. v |= BIT(8 + index);
  242. cpts->hw_ts_enable |= BIT(index);
  243. } else {
  244. v &= ~BIT(8 + index);
  245. cpts->hw_ts_enable &= ~BIT(index);
  246. }
  247. cpts_write32(cpts, v, control);
  248. mutex_unlock(&cpts->ptp_clk_mutex);
  249. return 0;
  250. }
  251. static int cpts_ptp_enable(struct ptp_clock_info *ptp,
  252. struct ptp_clock_request *rq, int on)
  253. {
  254. struct cpts *cpts = container_of(ptp, struct cpts, info);
  255. switch (rq->type) {
  256. case PTP_CLK_REQ_EXTTS:
  257. return cpts_extts_enable(cpts, rq->extts.index, on);
  258. default:
  259. break;
  260. }
  261. return -EOPNOTSUPP;
  262. }
  263. static bool cpts_match_tx_ts(struct cpts *cpts, struct cpts_event *event)
  264. {
  265. struct sk_buff_head txq_list;
  266. struct sk_buff *skb, *tmp;
  267. unsigned long flags;
  268. bool found = false;
  269. u32 mtype_seqid;
  270. mtype_seqid = event->high &
  271. ((MESSAGE_TYPE_MASK << MESSAGE_TYPE_SHIFT) |
  272. (SEQUENCE_ID_MASK << SEQUENCE_ID_SHIFT) |
  273. (EVENT_TYPE_MASK << EVENT_TYPE_SHIFT));
  274. __skb_queue_head_init(&txq_list);
  275. spin_lock_irqsave(&cpts->txq.lock, flags);
  276. skb_queue_splice_init(&cpts->txq, &txq_list);
  277. spin_unlock_irqrestore(&cpts->txq.lock, flags);
  278. skb_queue_walk_safe(&txq_list, skb, tmp) {
  279. struct skb_shared_hwtstamps ssh;
  280. struct cpts_skb_cb_data *skb_cb =
  281. (struct cpts_skb_cb_data *)skb->cb;
  282. if (mtype_seqid == skb_cb->skb_mtype_seqid) {
  283. memset(&ssh, 0, sizeof(ssh));
  284. ssh.hwtstamp = ns_to_ktime(event->timestamp);
  285. skb_tstamp_tx(skb, &ssh);
  286. found = true;
  287. __skb_unlink(skb, &txq_list);
  288. dev_consume_skb_any(skb);
  289. dev_dbg(cpts->dev, "match tx timestamp mtype_seqid %08x\n",
  290. mtype_seqid);
  291. break;
  292. }
  293. if (time_after(jiffies, skb_cb->tmo)) {
  294. /* timeout any expired skbs over 1s */
  295. dev_dbg(cpts->dev, "expiring tx timestamp from txq\n");
  296. __skb_unlink(skb, &txq_list);
  297. dev_consume_skb_any(skb);
  298. }
  299. }
  300. spin_lock_irqsave(&cpts->txq.lock, flags);
  301. skb_queue_splice(&txq_list, &cpts->txq);
  302. spin_unlock_irqrestore(&cpts->txq.lock, flags);
  303. return found;
  304. }
  305. static void cpts_process_events(struct cpts *cpts)
  306. {
  307. struct list_head *this, *next;
  308. struct cpts_event *event;
  309. LIST_HEAD(events_free);
  310. unsigned long flags;
  311. LIST_HEAD(events);
  312. spin_lock_irqsave(&cpts->lock, flags);
  313. list_splice_init(&cpts->events, &events);
  314. spin_unlock_irqrestore(&cpts->lock, flags);
  315. list_for_each_safe(this, next, &events) {
  316. event = list_entry(this, struct cpts_event, list);
  317. if (cpts_match_tx_ts(cpts, event) ||
  318. time_after(jiffies, event->tmo)) {
  319. list_del_init(&event->list);
  320. list_add(&event->list, &events_free);
  321. }
  322. }
  323. spin_lock_irqsave(&cpts->lock, flags);
  324. list_splice_tail(&events, &cpts->events);
  325. list_splice_tail(&events_free, &cpts->pool);
  326. spin_unlock_irqrestore(&cpts->lock, flags);
  327. }
  328. static long cpts_overflow_check(struct ptp_clock_info *ptp)
  329. {
  330. struct cpts *cpts = container_of(ptp, struct cpts, info);
  331. unsigned long delay = cpts->ov_check_period;
  332. unsigned long flags;
  333. u64 ns;
  334. mutex_lock(&cpts->ptp_clk_mutex);
  335. cpts_update_cur_time(cpts, -1, NULL);
  336. ns = timecounter_read(&cpts->tc);
  337. cpts_process_events(cpts);
  338. spin_lock_irqsave(&cpts->txq.lock, flags);
  339. if (!skb_queue_empty(&cpts->txq)) {
  340. cpts_purge_txq(cpts);
  341. if (!skb_queue_empty(&cpts->txq))
  342. delay = CPTS_SKB_TX_WORK_TIMEOUT;
  343. }
  344. spin_unlock_irqrestore(&cpts->txq.lock, flags);
  345. dev_dbg(cpts->dev, "cpts overflow check at %lld\n", ns);
  346. mutex_unlock(&cpts->ptp_clk_mutex);
  347. return (long)delay;
  348. }
  349. static const struct ptp_clock_info cpts_info = {
  350. .owner = THIS_MODULE,
  351. .name = "CTPS timer",
  352. .max_adj = 1000000,
  353. .n_ext_ts = 0,
  354. .n_pins = 0,
  355. .pps = 0,
  356. .adjfreq = cpts_ptp_adjfreq,
  357. .adjtime = cpts_ptp_adjtime,
  358. .gettimex64 = cpts_ptp_gettimeex,
  359. .settime64 = cpts_ptp_settime,
  360. .enable = cpts_ptp_enable,
  361. .do_aux_work = cpts_overflow_check,
  362. };
  363. static int cpts_skb_get_mtype_seqid(struct sk_buff *skb, u32 *mtype_seqid)
  364. {
  365. unsigned int ptp_class = ptp_classify_raw(skb);
  366. struct ptp_header *hdr;
  367. u8 msgtype;
  368. u16 seqid;
  369. if (ptp_class == PTP_CLASS_NONE)
  370. return 0;
  371. hdr = ptp_parse_header(skb, ptp_class);
  372. if (!hdr)
  373. return 0;
  374. msgtype = ptp_get_msgtype(hdr, ptp_class);
  375. seqid = ntohs(hdr->sequence_id);
  376. *mtype_seqid = (msgtype & MESSAGE_TYPE_MASK) << MESSAGE_TYPE_SHIFT;
  377. *mtype_seqid |= (seqid & SEQUENCE_ID_MASK) << SEQUENCE_ID_SHIFT;
  378. return 1;
  379. }
  380. static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb,
  381. int ev_type, u32 skb_mtype_seqid)
  382. {
  383. struct list_head *this, *next;
  384. struct cpts_event *event;
  385. unsigned long flags;
  386. u32 mtype_seqid;
  387. u64 ns = 0;
  388. cpts_fifo_read(cpts, -1);
  389. spin_lock_irqsave(&cpts->lock, flags);
  390. list_for_each_safe(this, next, &cpts->events) {
  391. event = list_entry(this, struct cpts_event, list);
  392. if (event_expired(event)) {
  393. list_del_init(&event->list);
  394. list_add(&event->list, &cpts->pool);
  395. continue;
  396. }
  397. mtype_seqid = event->high &
  398. ((MESSAGE_TYPE_MASK << MESSAGE_TYPE_SHIFT) |
  399. (SEQUENCE_ID_MASK << SEQUENCE_ID_SHIFT) |
  400. (EVENT_TYPE_MASK << EVENT_TYPE_SHIFT));
  401. if (mtype_seqid == skb_mtype_seqid) {
  402. ns = event->timestamp;
  403. list_del_init(&event->list);
  404. list_add(&event->list, &cpts->pool);
  405. break;
  406. }
  407. }
  408. spin_unlock_irqrestore(&cpts->lock, flags);
  409. return ns;
  410. }
  411. void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb)
  412. {
  413. struct cpts_skb_cb_data *skb_cb = (struct cpts_skb_cb_data *)skb->cb;
  414. struct skb_shared_hwtstamps *ssh;
  415. int ret;
  416. u64 ns;
  417. /* cpts_rx_timestamp() is called before eth_type_trans(), so
  418. * skb MAC Hdr properties are not configured yet. Hence need to
  419. * reset skb MAC header here
  420. */
  421. skb_reset_mac_header(skb);
  422. ret = cpts_skb_get_mtype_seqid(skb, &skb_cb->skb_mtype_seqid);
  423. if (!ret)
  424. return;
  425. skb_cb->skb_mtype_seqid |= (CPTS_EV_RX << EVENT_TYPE_SHIFT);
  426. dev_dbg(cpts->dev, "%s mtype seqid %08x\n",
  427. __func__, skb_cb->skb_mtype_seqid);
  428. ns = cpts_find_ts(cpts, skb, CPTS_EV_RX, skb_cb->skb_mtype_seqid);
  429. if (!ns)
  430. return;
  431. ssh = skb_hwtstamps(skb);
  432. memset(ssh, 0, sizeof(*ssh));
  433. ssh->hwtstamp = ns_to_ktime(ns);
  434. }
  435. EXPORT_SYMBOL_GPL(cpts_rx_timestamp);
  436. void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb)
  437. {
  438. struct cpts_skb_cb_data *skb_cb = (struct cpts_skb_cb_data *)skb->cb;
  439. int ret;
  440. if (!(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
  441. return;
  442. ret = cpts_skb_get_mtype_seqid(skb, &skb_cb->skb_mtype_seqid);
  443. if (!ret)
  444. return;
  445. skb_cb->skb_mtype_seqid |= (CPTS_EV_TX << EVENT_TYPE_SHIFT);
  446. dev_dbg(cpts->dev, "%s mtype seqid %08x\n",
  447. __func__, skb_cb->skb_mtype_seqid);
  448. /* Always defer TX TS processing to PTP worker */
  449. skb_get(skb);
  450. /* get the timestamp for timeouts */
  451. skb_cb->tmo = jiffies + msecs_to_jiffies(CPTS_SKB_RX_TX_TMO);
  452. skb_queue_tail(&cpts->txq, skb);
  453. ptp_schedule_worker(cpts->clock, 0);
  454. }
  455. EXPORT_SYMBOL_GPL(cpts_tx_timestamp);
  456. int cpts_register(struct cpts *cpts)
  457. {
  458. int err, i;
  459. skb_queue_head_init(&cpts->txq);
  460. INIT_LIST_HEAD(&cpts->events);
  461. INIT_LIST_HEAD(&cpts->pool);
  462. for (i = 0; i < CPTS_MAX_EVENTS; i++)
  463. list_add(&cpts->pool_data[i].list, &cpts->pool);
  464. err = clk_enable(cpts->refclk);
  465. if (err)
  466. return err;
  467. cpts_write32(cpts, CPTS_EN, control);
  468. cpts_write32(cpts, TS_PEND_EN, int_enable);
  469. timecounter_init(&cpts->tc, &cpts->cc, ktime_get_real_ns());
  470. cpts->clock = ptp_clock_register(&cpts->info, cpts->dev);
  471. if (IS_ERR(cpts->clock)) {
  472. err = PTR_ERR(cpts->clock);
  473. cpts->clock = NULL;
  474. goto err_ptp;
  475. }
  476. cpts->phc_index = ptp_clock_index(cpts->clock);
  477. ptp_schedule_worker(cpts->clock, cpts->ov_check_period);
  478. return 0;
  479. err_ptp:
  480. clk_disable(cpts->refclk);
  481. return err;
  482. }
  483. EXPORT_SYMBOL_GPL(cpts_register);
  484. void cpts_unregister(struct cpts *cpts)
  485. {
  486. if (WARN_ON(!cpts->clock))
  487. return;
  488. ptp_clock_unregister(cpts->clock);
  489. cpts->clock = NULL;
  490. cpts->phc_index = -1;
  491. cpts_write32(cpts, 0, int_enable);
  492. cpts_write32(cpts, 0, control);
  493. /* Drop all packet */
  494. skb_queue_purge(&cpts->txq);
  495. clk_disable(cpts->refclk);
  496. }
  497. EXPORT_SYMBOL_GPL(cpts_unregister);
  498. static void cpts_calc_mult_shift(struct cpts *cpts)
  499. {
  500. u64 frac, maxsec, ns;
  501. u32 freq;
  502. freq = clk_get_rate(cpts->refclk);
  503. /* Calc the maximum number of seconds which we can run before
  504. * wrapping around.
  505. */
  506. maxsec = cpts->cc.mask;
  507. do_div(maxsec, freq);
  508. /* limit conversation rate to 10 sec as higher values will produce
  509. * too small mult factors and so reduce the conversion accuracy
  510. */
  511. if (maxsec > 10)
  512. maxsec = 10;
  513. /* Calc overflow check period (maxsec / 2) */
  514. cpts->ov_check_period = (HZ * maxsec) / 2;
  515. dev_info(cpts->dev, "cpts: overflow check period %lu (jiffies)\n",
  516. cpts->ov_check_period);
  517. if (cpts->cc.mult || cpts->cc.shift)
  518. return;
  519. clocks_calc_mult_shift(&cpts->cc.mult, &cpts->cc.shift,
  520. freq, NSEC_PER_SEC, maxsec);
  521. frac = 0;
  522. ns = cyclecounter_cyc2ns(&cpts->cc, freq, cpts->cc.mask, &frac);
  523. dev_info(cpts->dev,
  524. "CPTS: ref_clk_freq:%u calc_mult:%u calc_shift:%u error:%lld nsec/sec\n",
  525. freq, cpts->cc.mult, cpts->cc.shift, (ns - NSEC_PER_SEC));
  526. }
  527. static int cpts_of_mux_clk_setup(struct cpts *cpts, struct device_node *node)
  528. {
  529. struct device_node *refclk_np;
  530. const char **parent_names;
  531. unsigned int num_parents;
  532. struct clk_hw *clk_hw;
  533. int ret = -EINVAL;
  534. u32 *mux_table;
  535. refclk_np = of_get_child_by_name(node, "cpts-refclk-mux");
  536. if (!refclk_np)
  537. /* refclk selection supported not for all SoCs */
  538. return 0;
  539. num_parents = of_clk_get_parent_count(refclk_np);
  540. if (num_parents < 1) {
  541. dev_err(cpts->dev, "mux-clock %s must have parents\n",
  542. refclk_np->name);
  543. goto mux_fail;
  544. }
  545. parent_names = devm_kcalloc(cpts->dev, num_parents,
  546. sizeof(*parent_names), GFP_KERNEL);
  547. mux_table = devm_kcalloc(cpts->dev, num_parents, sizeof(*mux_table),
  548. GFP_KERNEL);
  549. if (!mux_table || !parent_names) {
  550. ret = -ENOMEM;
  551. goto mux_fail;
  552. }
  553. of_clk_parent_fill(refclk_np, parent_names, num_parents);
  554. ret = of_property_read_variable_u32_array(refclk_np, "ti,mux-tbl",
  555. mux_table,
  556. num_parents, num_parents);
  557. if (ret < 0)
  558. goto mux_fail;
  559. clk_hw = clk_hw_register_mux_table(cpts->dev, refclk_np->name,
  560. parent_names, num_parents,
  561. 0,
  562. &cpts->reg->rftclk_sel, 0, 0x1F,
  563. 0, mux_table, NULL);
  564. if (IS_ERR(clk_hw)) {
  565. ret = PTR_ERR(clk_hw);
  566. goto mux_fail;
  567. }
  568. ret = devm_add_action_or_reset(cpts->dev,
  569. (void(*)(void *))clk_hw_unregister_mux,
  570. clk_hw);
  571. if (ret) {
  572. dev_err(cpts->dev, "add clkmux unreg action %d", ret);
  573. goto mux_fail;
  574. }
  575. ret = of_clk_add_hw_provider(refclk_np, of_clk_hw_simple_get, clk_hw);
  576. if (ret)
  577. goto mux_fail;
  578. ret = devm_add_action_or_reset(cpts->dev,
  579. (void(*)(void *))of_clk_del_provider,
  580. refclk_np);
  581. if (ret) {
  582. dev_err(cpts->dev, "add clkmux provider unreg action %d", ret);
  583. goto mux_fail;
  584. }
  585. return ret;
  586. mux_fail:
  587. of_node_put(refclk_np);
  588. return ret;
  589. }
  590. static int cpts_of_parse(struct cpts *cpts, struct device_node *node)
  591. {
  592. int ret = -EINVAL;
  593. u32 prop;
  594. if (!of_property_read_u32(node, "cpts_clock_mult", &prop))
  595. cpts->cc.mult = prop;
  596. if (!of_property_read_u32(node, "cpts_clock_shift", &prop))
  597. cpts->cc.shift = prop;
  598. if ((cpts->cc.mult && !cpts->cc.shift) ||
  599. (!cpts->cc.mult && cpts->cc.shift))
  600. goto of_error;
  601. return cpts_of_mux_clk_setup(cpts, node);
  602. of_error:
  603. dev_err(cpts->dev, "CPTS: Missing property in the DT.\n");
  604. return ret;
  605. }
  606. struct cpts *cpts_create(struct device *dev, void __iomem *regs,
  607. struct device_node *node, u32 n_ext_ts)
  608. {
  609. struct cpts *cpts;
  610. int ret;
  611. cpts = devm_kzalloc(dev, sizeof(*cpts), GFP_KERNEL);
  612. if (!cpts)
  613. return ERR_PTR(-ENOMEM);
  614. cpts->dev = dev;
  615. cpts->reg = (struct cpsw_cpts __iomem *)regs;
  616. cpts->irq_poll = true;
  617. spin_lock_init(&cpts->lock);
  618. mutex_init(&cpts->ptp_clk_mutex);
  619. init_completion(&cpts->ts_push_complete);
  620. ret = cpts_of_parse(cpts, node);
  621. if (ret)
  622. return ERR_PTR(ret);
  623. cpts->refclk = devm_get_clk_from_child(dev, node, "cpts");
  624. if (IS_ERR(cpts->refclk))
  625. /* try get clk from dev node for compatibility */
  626. cpts->refclk = devm_clk_get(dev, "cpts");
  627. if (IS_ERR(cpts->refclk)) {
  628. dev_err(dev, "Failed to get cpts refclk %ld\n",
  629. PTR_ERR(cpts->refclk));
  630. return ERR_CAST(cpts->refclk);
  631. }
  632. ret = clk_prepare(cpts->refclk);
  633. if (ret)
  634. return ERR_PTR(ret);
  635. cpts->cc.read = cpts_systim_read;
  636. cpts->cc.mask = CLOCKSOURCE_MASK(32);
  637. cpts->info = cpts_info;
  638. cpts->phc_index = -1;
  639. if (n_ext_ts)
  640. cpts->info.n_ext_ts = n_ext_ts;
  641. cpts_calc_mult_shift(cpts);
  642. /* save cc.mult original value as it can be modified
  643. * by cpts_ptp_adjfreq().
  644. */
  645. cpts->cc_mult = cpts->cc.mult;
  646. return cpts;
  647. }
  648. EXPORT_SYMBOL_GPL(cpts_create);
  649. void cpts_release(struct cpts *cpts)
  650. {
  651. if (!cpts)
  652. return;
  653. if (WARN_ON(!cpts->refclk))
  654. return;
  655. clk_unprepare(cpts->refclk);
  656. }
  657. EXPORT_SYMBOL_GPL(cpts_release);
  658. MODULE_LICENSE("GPL v2");
  659. MODULE_DESCRIPTION("TI CPTS driver");
  660. MODULE_AUTHOR("Richard Cochran <[email protected]>");