am65-cpts.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* TI K3 AM65x Common Platform Time Sync
  3. *
  4. * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com
  5. *
  6. */
  7. #include <linux/clk.h>
  8. #include <linux/clk-provider.h>
  9. #include <linux/err.h>
  10. #include <linux/if_vlan.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/module.h>
  13. #include <linux/netdevice.h>
  14. #include <linux/net_tstamp.h>
  15. #include <linux/of.h>
  16. #include <linux/of_irq.h>
  17. #include <linux/platform_device.h>
  18. #include <linux/pm_runtime.h>
  19. #include <linux/ptp_classify.h>
  20. #include <linux/ptp_clock_kernel.h>
  21. #include "am65-cpts.h"
  22. struct am65_genf_regs {
  23. u32 comp_lo; /* Comparison Low Value 0:31 */
  24. u32 comp_hi; /* Comparison High Value 32:63 */
  25. u32 control; /* control */
  26. u32 length; /* Length */
  27. u32 ppm_low; /* PPM Load Low Value 0:31 */
  28. u32 ppm_hi; /* PPM Load High Value 32:63 */
  29. u32 ts_nudge; /* Nudge value */
  30. } __aligned(32) __packed;
  31. #define AM65_CPTS_GENF_MAX_NUM 9
  32. #define AM65_CPTS_ESTF_MAX_NUM 8
  33. struct am65_cpts_regs {
  34. u32 idver; /* Identification and version */
  35. u32 control; /* Time sync control */
  36. u32 rftclk_sel; /* Reference Clock Select Register */
  37. u32 ts_push; /* Time stamp event push */
  38. u32 ts_load_val_lo; /* Time Stamp Load Low Value 0:31 */
  39. u32 ts_load_en; /* Time stamp load enable */
  40. u32 ts_comp_lo; /* Time Stamp Comparison Low Value 0:31 */
  41. u32 ts_comp_length; /* Time Stamp Comparison Length */
  42. u32 intstat_raw; /* Time sync interrupt status raw */
  43. u32 intstat_masked; /* Time sync interrupt status masked */
  44. u32 int_enable; /* Time sync interrupt enable */
  45. u32 ts_comp_nudge; /* Time Stamp Comparison Nudge Value */
  46. u32 event_pop; /* Event interrupt pop */
  47. u32 event_0; /* Event Time Stamp lo 0:31 */
  48. u32 event_1; /* Event Type Fields */
  49. u32 event_2; /* Event Type Fields domain */
  50. u32 event_3; /* Event Time Stamp hi 32:63 */
  51. u32 ts_load_val_hi; /* Time Stamp Load High Value 32:63 */
  52. u32 ts_comp_hi; /* Time Stamp Comparison High Value 32:63 */
  53. u32 ts_add_val; /* Time Stamp Add value */
  54. u32 ts_ppm_low; /* Time Stamp PPM Load Low Value 0:31 */
  55. u32 ts_ppm_hi; /* Time Stamp PPM Load High Value 32:63 */
  56. u32 ts_nudge; /* Time Stamp Nudge value */
  57. u32 reserv[33];
  58. struct am65_genf_regs genf[AM65_CPTS_GENF_MAX_NUM];
  59. struct am65_genf_regs estf[AM65_CPTS_ESTF_MAX_NUM];
  60. };
  61. /* CONTROL_REG */
  62. #define AM65_CPTS_CONTROL_EN BIT(0)
  63. #define AM65_CPTS_CONTROL_INT_TEST BIT(1)
  64. #define AM65_CPTS_CONTROL_TS_COMP_POLARITY BIT(2)
  65. #define AM65_CPTS_CONTROL_TSTAMP_EN BIT(3)
  66. #define AM65_CPTS_CONTROL_SEQUENCE_EN BIT(4)
  67. #define AM65_CPTS_CONTROL_64MODE BIT(5)
  68. #define AM65_CPTS_CONTROL_TS_COMP_TOG BIT(6)
  69. #define AM65_CPTS_CONTROL_TS_PPM_DIR BIT(7)
  70. #define AM65_CPTS_CONTROL_HW1_TS_PUSH_EN BIT(8)
  71. #define AM65_CPTS_CONTROL_HW2_TS_PUSH_EN BIT(9)
  72. #define AM65_CPTS_CONTROL_HW3_TS_PUSH_EN BIT(10)
  73. #define AM65_CPTS_CONTROL_HW4_TS_PUSH_EN BIT(11)
  74. #define AM65_CPTS_CONTROL_HW5_TS_PUSH_EN BIT(12)
  75. #define AM65_CPTS_CONTROL_HW6_TS_PUSH_EN BIT(13)
  76. #define AM65_CPTS_CONTROL_HW7_TS_PUSH_EN BIT(14)
  77. #define AM65_CPTS_CONTROL_HW8_TS_PUSH_EN BIT(15)
  78. #define AM65_CPTS_CONTROL_HW1_TS_PUSH_OFFSET (8)
  79. #define AM65_CPTS_CONTROL_TX_GENF_CLR_EN BIT(17)
  80. #define AM65_CPTS_CONTROL_TS_SYNC_SEL_MASK (0xF)
  81. #define AM65_CPTS_CONTROL_TS_SYNC_SEL_SHIFT (28)
  82. /* RFTCLK_SEL_REG */
  83. #define AM65_CPTS_RFTCLK_SEL_MASK (0x1F)
  84. /* TS_PUSH_REG */
  85. #define AM65_CPTS_TS_PUSH BIT(0)
  86. /* TS_LOAD_EN_REG */
  87. #define AM65_CPTS_TS_LOAD_EN BIT(0)
  88. /* INTSTAT_RAW_REG */
  89. #define AM65_CPTS_INTSTAT_RAW_TS_PEND BIT(0)
  90. /* INTSTAT_MASKED_REG */
  91. #define AM65_CPTS_INTSTAT_MASKED_TS_PEND BIT(0)
  92. /* INT_ENABLE_REG */
  93. #define AM65_CPTS_INT_ENABLE_TS_PEND_EN BIT(0)
  94. /* TS_COMP_NUDGE_REG */
  95. #define AM65_CPTS_TS_COMP_NUDGE_MASK (0xFF)
  96. /* EVENT_POP_REG */
  97. #define AM65_CPTS_EVENT_POP BIT(0)
  98. /* EVENT_1_REG */
  99. #define AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK GENMASK(15, 0)
  100. #define AM65_CPTS_EVENT_1_MESSAGE_TYPE_MASK GENMASK(19, 16)
  101. #define AM65_CPTS_EVENT_1_MESSAGE_TYPE_SHIFT (16)
  102. #define AM65_CPTS_EVENT_1_EVENT_TYPE_MASK GENMASK(23, 20)
  103. #define AM65_CPTS_EVENT_1_EVENT_TYPE_SHIFT (20)
  104. #define AM65_CPTS_EVENT_1_PORT_NUMBER_MASK GENMASK(28, 24)
  105. #define AM65_CPTS_EVENT_1_PORT_NUMBER_SHIFT (24)
  106. /* EVENT_2_REG */
  107. #define AM65_CPTS_EVENT_2_REG_DOMAIN_MASK (0xFF)
  108. #define AM65_CPTS_EVENT_2_REG_DOMAIN_SHIFT (0)
  109. enum {
  110. AM65_CPTS_EV_PUSH, /* Time Stamp Push Event */
  111. AM65_CPTS_EV_ROLL, /* Time Stamp Rollover Event */
  112. AM65_CPTS_EV_HALF, /* Time Stamp Half Rollover Event */
  113. AM65_CPTS_EV_HW, /* Hardware Time Stamp Push Event */
  114. AM65_CPTS_EV_RX, /* Ethernet Receive Event */
  115. AM65_CPTS_EV_TX, /* Ethernet Transmit Event */
  116. AM65_CPTS_EV_TS_COMP, /* Time Stamp Compare Event */
  117. AM65_CPTS_EV_HOST, /* Host Transmit Event */
  118. };
  119. struct am65_cpts_event {
  120. struct list_head list;
  121. unsigned long tmo;
  122. u32 event1;
  123. u32 event2;
  124. u64 timestamp;
  125. };
  126. #define AM65_CPTS_FIFO_DEPTH (16)
  127. #define AM65_CPTS_MAX_EVENTS (32)
  128. #define AM65_CPTS_EVENT_RX_TX_TIMEOUT (20) /* ms */
  129. #define AM65_CPTS_SKB_TX_WORK_TIMEOUT 1 /* jiffies */
  130. #define AM65_CPTS_MIN_PPM 0x400
  131. struct am65_cpts {
  132. struct device *dev;
  133. struct am65_cpts_regs __iomem *reg;
  134. struct ptp_clock_info ptp_info;
  135. struct ptp_clock *ptp_clock;
  136. int phc_index;
  137. struct clk_hw *clk_mux_hw;
  138. struct device_node *clk_mux_np;
  139. struct clk *refclk;
  140. u32 refclk_freq;
  141. struct list_head events;
  142. struct list_head pool;
  143. struct am65_cpts_event pool_data[AM65_CPTS_MAX_EVENTS];
  144. spinlock_t lock; /* protects events lists*/
  145. u32 ext_ts_inputs;
  146. u32 genf_num;
  147. u32 ts_add_val;
  148. int irq;
  149. struct mutex ptp_clk_lock; /* PHC access sync */
  150. u64 timestamp;
  151. u32 genf_enable;
  152. u32 hw_ts_enable;
  153. struct sk_buff_head txq;
  154. };
  155. struct am65_cpts_skb_cb_data {
  156. unsigned long tmo;
  157. u32 skb_mtype_seqid;
  158. };
  159. #define am65_cpts_write32(c, v, r) writel(v, &(c)->reg->r)
  160. #define am65_cpts_read32(c, r) readl(&(c)->reg->r)
  161. static void am65_cpts_settime(struct am65_cpts *cpts, u64 start_tstamp)
  162. {
  163. u32 val;
  164. val = upper_32_bits(start_tstamp);
  165. am65_cpts_write32(cpts, val, ts_load_val_hi);
  166. val = lower_32_bits(start_tstamp);
  167. am65_cpts_write32(cpts, val, ts_load_val_lo);
  168. am65_cpts_write32(cpts, AM65_CPTS_TS_LOAD_EN, ts_load_en);
  169. }
  170. static void am65_cpts_set_add_val(struct am65_cpts *cpts)
  171. {
  172. /* select coefficient according to the rate */
  173. cpts->ts_add_val = (NSEC_PER_SEC / cpts->refclk_freq - 1) & 0x7;
  174. am65_cpts_write32(cpts, cpts->ts_add_val, ts_add_val);
  175. }
  176. static void am65_cpts_disable(struct am65_cpts *cpts)
  177. {
  178. am65_cpts_write32(cpts, 0, control);
  179. am65_cpts_write32(cpts, 0, int_enable);
  180. }
  181. static int am65_cpts_event_get_port(struct am65_cpts_event *event)
  182. {
  183. return (event->event1 & AM65_CPTS_EVENT_1_PORT_NUMBER_MASK) >>
  184. AM65_CPTS_EVENT_1_PORT_NUMBER_SHIFT;
  185. }
  186. static int am65_cpts_event_get_type(struct am65_cpts_event *event)
  187. {
  188. return (event->event1 & AM65_CPTS_EVENT_1_EVENT_TYPE_MASK) >>
  189. AM65_CPTS_EVENT_1_EVENT_TYPE_SHIFT;
  190. }
  191. static int am65_cpts_cpts_purge_events(struct am65_cpts *cpts)
  192. {
  193. struct list_head *this, *next;
  194. struct am65_cpts_event *event;
  195. int removed = 0;
  196. list_for_each_safe(this, next, &cpts->events) {
  197. event = list_entry(this, struct am65_cpts_event, list);
  198. if (time_after(jiffies, event->tmo)) {
  199. list_del_init(&event->list);
  200. list_add(&event->list, &cpts->pool);
  201. ++removed;
  202. }
  203. }
  204. if (removed)
  205. dev_dbg(cpts->dev, "event pool cleaned up %d\n", removed);
  206. return removed ? 0 : -1;
  207. }
  208. static bool am65_cpts_fifo_pop_event(struct am65_cpts *cpts,
  209. struct am65_cpts_event *event)
  210. {
  211. u32 r = am65_cpts_read32(cpts, intstat_raw);
  212. if (r & AM65_CPTS_INTSTAT_RAW_TS_PEND) {
  213. event->timestamp = am65_cpts_read32(cpts, event_0);
  214. event->event1 = am65_cpts_read32(cpts, event_1);
  215. event->event2 = am65_cpts_read32(cpts, event_2);
  216. event->timestamp |= (u64)am65_cpts_read32(cpts, event_3) << 32;
  217. am65_cpts_write32(cpts, AM65_CPTS_EVENT_POP, event_pop);
  218. return false;
  219. }
  220. return true;
  221. }
  222. static int am65_cpts_fifo_read(struct am65_cpts *cpts)
  223. {
  224. struct ptp_clock_event pevent;
  225. struct am65_cpts_event *event;
  226. bool schedule = false;
  227. int i, type, ret = 0;
  228. unsigned long flags;
  229. spin_lock_irqsave(&cpts->lock, flags);
  230. for (i = 0; i < AM65_CPTS_FIFO_DEPTH; i++) {
  231. event = list_first_entry_or_null(&cpts->pool,
  232. struct am65_cpts_event, list);
  233. if (!event) {
  234. if (am65_cpts_cpts_purge_events(cpts)) {
  235. dev_err(cpts->dev, "cpts: event pool empty\n");
  236. ret = -1;
  237. goto out;
  238. }
  239. continue;
  240. }
  241. if (am65_cpts_fifo_pop_event(cpts, event))
  242. break;
  243. type = am65_cpts_event_get_type(event);
  244. switch (type) {
  245. case AM65_CPTS_EV_PUSH:
  246. cpts->timestamp = event->timestamp;
  247. dev_dbg(cpts->dev, "AM65_CPTS_EV_PUSH t:%llu\n",
  248. cpts->timestamp);
  249. break;
  250. case AM65_CPTS_EV_RX:
  251. case AM65_CPTS_EV_TX:
  252. event->tmo = jiffies +
  253. msecs_to_jiffies(AM65_CPTS_EVENT_RX_TX_TIMEOUT);
  254. list_del_init(&event->list);
  255. list_add_tail(&event->list, &cpts->events);
  256. dev_dbg(cpts->dev,
  257. "AM65_CPTS_EV_TX e1:%08x e2:%08x t:%lld\n",
  258. event->event1, event->event2,
  259. event->timestamp);
  260. schedule = true;
  261. break;
  262. case AM65_CPTS_EV_HW:
  263. pevent.index = am65_cpts_event_get_port(event) - 1;
  264. pevent.timestamp = event->timestamp;
  265. pevent.type = PTP_CLOCK_EXTTS;
  266. dev_dbg(cpts->dev, "AM65_CPTS_EV_HW p:%d t:%llu\n",
  267. pevent.index, event->timestamp);
  268. ptp_clock_event(cpts->ptp_clock, &pevent);
  269. break;
  270. case AM65_CPTS_EV_HOST:
  271. break;
  272. case AM65_CPTS_EV_ROLL:
  273. case AM65_CPTS_EV_HALF:
  274. case AM65_CPTS_EV_TS_COMP:
  275. dev_dbg(cpts->dev,
  276. "AM65_CPTS_EVT: %d e1:%08x e2:%08x t:%lld\n",
  277. type,
  278. event->event1, event->event2,
  279. event->timestamp);
  280. break;
  281. default:
  282. dev_err(cpts->dev, "cpts: unknown event type\n");
  283. ret = -1;
  284. goto out;
  285. }
  286. }
  287. out:
  288. spin_unlock_irqrestore(&cpts->lock, flags);
  289. if (schedule)
  290. ptp_schedule_worker(cpts->ptp_clock, 0);
  291. return ret;
  292. }
  293. static u64 am65_cpts_gettime(struct am65_cpts *cpts,
  294. struct ptp_system_timestamp *sts)
  295. {
  296. unsigned long flags;
  297. u64 val = 0;
  298. /* temporarily disable cpts interrupt to avoid intentional
  299. * doubled read. Interrupt can be in-flight - it's Ok.
  300. */
  301. am65_cpts_write32(cpts, 0, int_enable);
  302. /* use spin_lock_irqsave() here as it has to run very fast */
  303. spin_lock_irqsave(&cpts->lock, flags);
  304. ptp_read_system_prets(sts);
  305. am65_cpts_write32(cpts, AM65_CPTS_TS_PUSH, ts_push);
  306. am65_cpts_read32(cpts, ts_push);
  307. ptp_read_system_postts(sts);
  308. spin_unlock_irqrestore(&cpts->lock, flags);
  309. am65_cpts_fifo_read(cpts);
  310. am65_cpts_write32(cpts, AM65_CPTS_INT_ENABLE_TS_PEND_EN, int_enable);
  311. val = cpts->timestamp;
  312. return val;
  313. }
  314. static irqreturn_t am65_cpts_interrupt(int irq, void *dev_id)
  315. {
  316. struct am65_cpts *cpts = dev_id;
  317. if (am65_cpts_fifo_read(cpts))
  318. dev_dbg(cpts->dev, "cpts: unable to obtain a time stamp\n");
  319. return IRQ_HANDLED;
  320. }
  321. /* PTP clock operations */
  322. static int am65_cpts_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
  323. {
  324. struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info);
  325. int neg_adj = 0;
  326. u64 adj_period;
  327. u32 val;
  328. if (ppb < 0) {
  329. neg_adj = 1;
  330. ppb = -ppb;
  331. }
  332. /* base freq = 1GHz = 1 000 000 000
  333. * ppb_norm = ppb * base_freq / clock_freq;
  334. * ppm_norm = ppb_norm / 1000
  335. * adj_period = 1 000 000 / ppm_norm
  336. * adj_period = 1 000 000 000 / ppb_norm
  337. * adj_period = 1 000 000 000 / (ppb * base_freq / clock_freq)
  338. * adj_period = (1 000 000 000 * clock_freq) / (ppb * base_freq)
  339. * adj_period = clock_freq / ppb
  340. */
  341. adj_period = div_u64(cpts->refclk_freq, ppb);
  342. mutex_lock(&cpts->ptp_clk_lock);
  343. val = am65_cpts_read32(cpts, control);
  344. if (neg_adj)
  345. val |= AM65_CPTS_CONTROL_TS_PPM_DIR;
  346. else
  347. val &= ~AM65_CPTS_CONTROL_TS_PPM_DIR;
  348. am65_cpts_write32(cpts, val, control);
  349. val = upper_32_bits(adj_period) & 0x3FF;
  350. am65_cpts_write32(cpts, val, ts_ppm_hi);
  351. val = lower_32_bits(adj_period);
  352. am65_cpts_write32(cpts, val, ts_ppm_low);
  353. mutex_unlock(&cpts->ptp_clk_lock);
  354. return 0;
  355. }
  356. static int am65_cpts_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
  357. {
  358. struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info);
  359. s64 ns;
  360. mutex_lock(&cpts->ptp_clk_lock);
  361. ns = am65_cpts_gettime(cpts, NULL);
  362. ns += delta;
  363. am65_cpts_settime(cpts, ns);
  364. mutex_unlock(&cpts->ptp_clk_lock);
  365. return 0;
  366. }
  367. static int am65_cpts_ptp_gettimex(struct ptp_clock_info *ptp,
  368. struct timespec64 *ts,
  369. struct ptp_system_timestamp *sts)
  370. {
  371. struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info);
  372. u64 ns;
  373. mutex_lock(&cpts->ptp_clk_lock);
  374. ns = am65_cpts_gettime(cpts, sts);
  375. mutex_unlock(&cpts->ptp_clk_lock);
  376. *ts = ns_to_timespec64(ns);
  377. return 0;
  378. }
  379. u64 am65_cpts_ns_gettime(struct am65_cpts *cpts)
  380. {
  381. u64 ns;
  382. /* reuse ptp_clk_lock as it serialize ts push */
  383. mutex_lock(&cpts->ptp_clk_lock);
  384. ns = am65_cpts_gettime(cpts, NULL);
  385. mutex_unlock(&cpts->ptp_clk_lock);
  386. return ns;
  387. }
  388. EXPORT_SYMBOL_GPL(am65_cpts_ns_gettime);
  389. static int am65_cpts_ptp_settime(struct ptp_clock_info *ptp,
  390. const struct timespec64 *ts)
  391. {
  392. struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info);
  393. u64 ns;
  394. ns = timespec64_to_ns(ts);
  395. mutex_lock(&cpts->ptp_clk_lock);
  396. am65_cpts_settime(cpts, ns);
  397. mutex_unlock(&cpts->ptp_clk_lock);
  398. return 0;
  399. }
  400. static void am65_cpts_extts_enable_hw(struct am65_cpts *cpts, u32 index, int on)
  401. {
  402. u32 v;
  403. v = am65_cpts_read32(cpts, control);
  404. if (on) {
  405. v |= BIT(AM65_CPTS_CONTROL_HW1_TS_PUSH_OFFSET + index);
  406. cpts->hw_ts_enable |= BIT(index);
  407. } else {
  408. v &= ~BIT(AM65_CPTS_CONTROL_HW1_TS_PUSH_OFFSET + index);
  409. cpts->hw_ts_enable &= ~BIT(index);
  410. }
  411. am65_cpts_write32(cpts, v, control);
  412. }
  413. static int am65_cpts_extts_enable(struct am65_cpts *cpts, u32 index, int on)
  414. {
  415. if (!!(cpts->hw_ts_enable & BIT(index)) == !!on)
  416. return 0;
  417. mutex_lock(&cpts->ptp_clk_lock);
  418. am65_cpts_extts_enable_hw(cpts, index, on);
  419. mutex_unlock(&cpts->ptp_clk_lock);
  420. dev_dbg(cpts->dev, "%s: ExtTS:%u %s\n",
  421. __func__, index, on ? "enabled" : "disabled");
  422. return 0;
  423. }
  424. int am65_cpts_estf_enable(struct am65_cpts *cpts, int idx,
  425. struct am65_cpts_estf_cfg *cfg)
  426. {
  427. u64 cycles;
  428. u32 val;
  429. cycles = cfg->ns_period * cpts->refclk_freq;
  430. cycles = DIV_ROUND_UP(cycles, NSEC_PER_SEC);
  431. if (cycles > U32_MAX)
  432. return -EINVAL;
  433. /* according to TRM should be zeroed */
  434. am65_cpts_write32(cpts, 0, estf[idx].length);
  435. val = upper_32_bits(cfg->ns_start);
  436. am65_cpts_write32(cpts, val, estf[idx].comp_hi);
  437. val = lower_32_bits(cfg->ns_start);
  438. am65_cpts_write32(cpts, val, estf[idx].comp_lo);
  439. val = lower_32_bits(cycles);
  440. am65_cpts_write32(cpts, val, estf[idx].length);
  441. dev_dbg(cpts->dev, "%s: ESTF:%u enabled\n", __func__, idx);
  442. return 0;
  443. }
  444. EXPORT_SYMBOL_GPL(am65_cpts_estf_enable);
  445. void am65_cpts_estf_disable(struct am65_cpts *cpts, int idx)
  446. {
  447. am65_cpts_write32(cpts, 0, estf[idx].length);
  448. dev_dbg(cpts->dev, "%s: ESTF:%u disabled\n", __func__, idx);
  449. }
  450. EXPORT_SYMBOL_GPL(am65_cpts_estf_disable);
  451. static void am65_cpts_perout_enable_hw(struct am65_cpts *cpts,
  452. struct ptp_perout_request *req, int on)
  453. {
  454. u64 ns_period, ns_start, cycles;
  455. struct timespec64 ts;
  456. u32 val;
  457. if (on) {
  458. ts.tv_sec = req->period.sec;
  459. ts.tv_nsec = req->period.nsec;
  460. ns_period = timespec64_to_ns(&ts);
  461. cycles = (ns_period * cpts->refclk_freq) / NSEC_PER_SEC;
  462. ts.tv_sec = req->start.sec;
  463. ts.tv_nsec = req->start.nsec;
  464. ns_start = timespec64_to_ns(&ts);
  465. val = upper_32_bits(ns_start);
  466. am65_cpts_write32(cpts, val, genf[req->index].comp_hi);
  467. val = lower_32_bits(ns_start);
  468. am65_cpts_write32(cpts, val, genf[req->index].comp_lo);
  469. val = lower_32_bits(cycles);
  470. am65_cpts_write32(cpts, val, genf[req->index].length);
  471. cpts->genf_enable |= BIT(req->index);
  472. } else {
  473. am65_cpts_write32(cpts, 0, genf[req->index].length);
  474. cpts->genf_enable &= ~BIT(req->index);
  475. }
  476. }
  477. static int am65_cpts_perout_enable(struct am65_cpts *cpts,
  478. struct ptp_perout_request *req, int on)
  479. {
  480. if (!!(cpts->genf_enable & BIT(req->index)) == !!on)
  481. return 0;
  482. mutex_lock(&cpts->ptp_clk_lock);
  483. am65_cpts_perout_enable_hw(cpts, req, on);
  484. mutex_unlock(&cpts->ptp_clk_lock);
  485. dev_dbg(cpts->dev, "%s: GenF:%u %s\n",
  486. __func__, req->index, on ? "enabled" : "disabled");
  487. return 0;
  488. }
  489. static int am65_cpts_ptp_enable(struct ptp_clock_info *ptp,
  490. struct ptp_clock_request *rq, int on)
  491. {
  492. struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info);
  493. switch (rq->type) {
  494. case PTP_CLK_REQ_EXTTS:
  495. return am65_cpts_extts_enable(cpts, rq->extts.index, on);
  496. case PTP_CLK_REQ_PEROUT:
  497. return am65_cpts_perout_enable(cpts, &rq->perout, on);
  498. default:
  499. break;
  500. }
  501. return -EOPNOTSUPP;
  502. }
  503. static long am65_cpts_ts_work(struct ptp_clock_info *ptp);
  504. static struct ptp_clock_info am65_ptp_info = {
  505. .owner = THIS_MODULE,
  506. .name = "CTPS timer",
  507. .adjfreq = am65_cpts_ptp_adjfreq,
  508. .adjtime = am65_cpts_ptp_adjtime,
  509. .gettimex64 = am65_cpts_ptp_gettimex,
  510. .settime64 = am65_cpts_ptp_settime,
  511. .enable = am65_cpts_ptp_enable,
  512. .do_aux_work = am65_cpts_ts_work,
  513. };
  514. static bool am65_cpts_match_tx_ts(struct am65_cpts *cpts,
  515. struct am65_cpts_event *event)
  516. {
  517. struct sk_buff_head txq_list;
  518. struct sk_buff *skb, *tmp;
  519. unsigned long flags;
  520. bool found = false;
  521. u32 mtype_seqid;
  522. mtype_seqid = event->event1 &
  523. (AM65_CPTS_EVENT_1_MESSAGE_TYPE_MASK |
  524. AM65_CPTS_EVENT_1_EVENT_TYPE_MASK |
  525. AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK);
  526. __skb_queue_head_init(&txq_list);
  527. spin_lock_irqsave(&cpts->txq.lock, flags);
  528. skb_queue_splice_init(&cpts->txq, &txq_list);
  529. spin_unlock_irqrestore(&cpts->txq.lock, flags);
  530. /* no need to grab txq.lock as access is always done under cpts->lock */
  531. skb_queue_walk_safe(&txq_list, skb, tmp) {
  532. struct skb_shared_hwtstamps ssh;
  533. struct am65_cpts_skb_cb_data *skb_cb =
  534. (struct am65_cpts_skb_cb_data *)skb->cb;
  535. if (mtype_seqid == skb_cb->skb_mtype_seqid) {
  536. u64 ns = event->timestamp;
  537. memset(&ssh, 0, sizeof(ssh));
  538. ssh.hwtstamp = ns_to_ktime(ns);
  539. skb_tstamp_tx(skb, &ssh);
  540. found = true;
  541. __skb_unlink(skb, &txq_list);
  542. dev_consume_skb_any(skb);
  543. dev_dbg(cpts->dev,
  544. "match tx timestamp mtype_seqid %08x\n",
  545. mtype_seqid);
  546. break;
  547. }
  548. if (time_after(jiffies, skb_cb->tmo)) {
  549. /* timeout any expired skbs over 100 ms */
  550. dev_dbg(cpts->dev,
  551. "expiring tx timestamp mtype_seqid %08x\n",
  552. mtype_seqid);
  553. __skb_unlink(skb, &txq_list);
  554. dev_consume_skb_any(skb);
  555. }
  556. }
  557. spin_lock_irqsave(&cpts->txq.lock, flags);
  558. skb_queue_splice(&txq_list, &cpts->txq);
  559. spin_unlock_irqrestore(&cpts->txq.lock, flags);
  560. return found;
  561. }
  562. static void am65_cpts_find_ts(struct am65_cpts *cpts)
  563. {
  564. struct am65_cpts_event *event;
  565. struct list_head *this, *next;
  566. LIST_HEAD(events_free);
  567. unsigned long flags;
  568. LIST_HEAD(events);
  569. spin_lock_irqsave(&cpts->lock, flags);
  570. list_splice_init(&cpts->events, &events);
  571. spin_unlock_irqrestore(&cpts->lock, flags);
  572. list_for_each_safe(this, next, &events) {
  573. event = list_entry(this, struct am65_cpts_event, list);
  574. if (am65_cpts_match_tx_ts(cpts, event) ||
  575. time_after(jiffies, event->tmo)) {
  576. list_del_init(&event->list);
  577. list_add(&event->list, &events_free);
  578. }
  579. }
  580. spin_lock_irqsave(&cpts->lock, flags);
  581. list_splice_tail(&events, &cpts->events);
  582. list_splice_tail(&events_free, &cpts->pool);
  583. spin_unlock_irqrestore(&cpts->lock, flags);
  584. }
  585. static long am65_cpts_ts_work(struct ptp_clock_info *ptp)
  586. {
  587. struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info);
  588. unsigned long flags;
  589. long delay = -1;
  590. am65_cpts_find_ts(cpts);
  591. spin_lock_irqsave(&cpts->txq.lock, flags);
  592. if (!skb_queue_empty(&cpts->txq))
  593. delay = AM65_CPTS_SKB_TX_WORK_TIMEOUT;
  594. spin_unlock_irqrestore(&cpts->txq.lock, flags);
  595. return delay;
  596. }
  597. /**
  598. * am65_cpts_rx_enable - enable rx timestamping
  599. * @cpts: cpts handle
  600. * @en: enable
  601. *
  602. * This functions enables rx packets timestamping. The CPTS can timestamp all
  603. * rx packets.
  604. */
  605. void am65_cpts_rx_enable(struct am65_cpts *cpts, bool en)
  606. {
  607. u32 val;
  608. mutex_lock(&cpts->ptp_clk_lock);
  609. val = am65_cpts_read32(cpts, control);
  610. if (en)
  611. val |= AM65_CPTS_CONTROL_TSTAMP_EN;
  612. else
  613. val &= ~AM65_CPTS_CONTROL_TSTAMP_EN;
  614. am65_cpts_write32(cpts, val, control);
  615. mutex_unlock(&cpts->ptp_clk_lock);
  616. }
  617. EXPORT_SYMBOL_GPL(am65_cpts_rx_enable);
  618. static int am65_skb_get_mtype_seqid(struct sk_buff *skb, u32 *mtype_seqid)
  619. {
  620. unsigned int ptp_class = ptp_classify_raw(skb);
  621. struct ptp_header *hdr;
  622. u8 msgtype;
  623. u16 seqid;
  624. if (ptp_class == PTP_CLASS_NONE)
  625. return 0;
  626. hdr = ptp_parse_header(skb, ptp_class);
  627. if (!hdr)
  628. return 0;
  629. msgtype = ptp_get_msgtype(hdr, ptp_class);
  630. seqid = ntohs(hdr->sequence_id);
  631. *mtype_seqid = (msgtype << AM65_CPTS_EVENT_1_MESSAGE_TYPE_SHIFT) &
  632. AM65_CPTS_EVENT_1_MESSAGE_TYPE_MASK;
  633. *mtype_seqid |= (seqid & AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK);
  634. return 1;
  635. }
  636. /**
  637. * am65_cpts_tx_timestamp - save tx packet for timestamping
  638. * @cpts: cpts handle
  639. * @skb: packet
  640. *
  641. * This functions saves tx packet for timestamping if packet can be timestamped.
  642. * The future processing is done in from PTP auxiliary worker.
  643. */
  644. void am65_cpts_tx_timestamp(struct am65_cpts *cpts, struct sk_buff *skb)
  645. {
  646. struct am65_cpts_skb_cb_data *skb_cb = (void *)skb->cb;
  647. if (!(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
  648. return;
  649. /* add frame to queue for processing later.
  650. * The periodic FIFO check will handle this.
  651. */
  652. skb_get(skb);
  653. /* get the timestamp for timeouts */
  654. skb_cb->tmo = jiffies + msecs_to_jiffies(100);
  655. skb_queue_tail(&cpts->txq, skb);
  656. ptp_schedule_worker(cpts->ptp_clock, 0);
  657. }
  658. EXPORT_SYMBOL_GPL(am65_cpts_tx_timestamp);
  659. /**
  660. * am65_cpts_prep_tx_timestamp - check and prepare tx packet for timestamping
  661. * @cpts: cpts handle
  662. * @skb: packet
  663. *
  664. * This functions should be called from .xmit().
  665. * It checks if packet can be timestamped, fills internal cpts data
  666. * in skb-cb and marks packet as SKBTX_IN_PROGRESS.
  667. */
  668. void am65_cpts_prep_tx_timestamp(struct am65_cpts *cpts, struct sk_buff *skb)
  669. {
  670. struct am65_cpts_skb_cb_data *skb_cb = (void *)skb->cb;
  671. int ret;
  672. if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
  673. return;
  674. ret = am65_skb_get_mtype_seqid(skb, &skb_cb->skb_mtype_seqid);
  675. if (!ret)
  676. return;
  677. skb_cb->skb_mtype_seqid |= (AM65_CPTS_EV_TX <<
  678. AM65_CPTS_EVENT_1_EVENT_TYPE_SHIFT);
  679. skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
  680. }
  681. EXPORT_SYMBOL_GPL(am65_cpts_prep_tx_timestamp);
  682. int am65_cpts_phc_index(struct am65_cpts *cpts)
  683. {
  684. return cpts->phc_index;
  685. }
  686. EXPORT_SYMBOL_GPL(am65_cpts_phc_index);
  687. static void cpts_free_clk_mux(void *data)
  688. {
  689. struct am65_cpts *cpts = data;
  690. of_clk_del_provider(cpts->clk_mux_np);
  691. clk_hw_unregister_mux(cpts->clk_mux_hw);
  692. of_node_put(cpts->clk_mux_np);
  693. }
  694. static int cpts_of_mux_clk_setup(struct am65_cpts *cpts,
  695. struct device_node *node)
  696. {
  697. unsigned int num_parents;
  698. const char **parent_names;
  699. char *clk_mux_name;
  700. void __iomem *reg;
  701. int ret = -EINVAL;
  702. cpts->clk_mux_np = of_get_child_by_name(node, "refclk-mux");
  703. if (!cpts->clk_mux_np)
  704. return 0;
  705. num_parents = of_clk_get_parent_count(cpts->clk_mux_np);
  706. if (num_parents < 1) {
  707. dev_err(cpts->dev, "mux-clock %pOF must have parents\n",
  708. cpts->clk_mux_np);
  709. goto mux_fail;
  710. }
  711. parent_names = devm_kcalloc(cpts->dev, sizeof(char *), num_parents,
  712. GFP_KERNEL);
  713. if (!parent_names) {
  714. ret = -ENOMEM;
  715. goto mux_fail;
  716. }
  717. of_clk_parent_fill(cpts->clk_mux_np, parent_names, num_parents);
  718. clk_mux_name = devm_kasprintf(cpts->dev, GFP_KERNEL, "%s.%pOFn",
  719. dev_name(cpts->dev), cpts->clk_mux_np);
  720. if (!clk_mux_name) {
  721. ret = -ENOMEM;
  722. goto mux_fail;
  723. }
  724. reg = &cpts->reg->rftclk_sel;
  725. /* dev must be NULL to avoid recursive incrementing
  726. * of module refcnt
  727. */
  728. cpts->clk_mux_hw = clk_hw_register_mux(NULL, clk_mux_name,
  729. parent_names, num_parents,
  730. 0, reg, 0, 5, 0, NULL);
  731. if (IS_ERR(cpts->clk_mux_hw)) {
  732. ret = PTR_ERR(cpts->clk_mux_hw);
  733. goto mux_fail;
  734. }
  735. ret = of_clk_add_hw_provider(cpts->clk_mux_np, of_clk_hw_simple_get,
  736. cpts->clk_mux_hw);
  737. if (ret)
  738. goto clk_hw_register;
  739. ret = devm_add_action_or_reset(cpts->dev, cpts_free_clk_mux, cpts);
  740. if (ret)
  741. dev_err(cpts->dev, "failed to add clkmux reset action %d", ret);
  742. return ret;
  743. clk_hw_register:
  744. clk_hw_unregister_mux(cpts->clk_mux_hw);
  745. mux_fail:
  746. of_node_put(cpts->clk_mux_np);
  747. return ret;
  748. }
  749. static int am65_cpts_of_parse(struct am65_cpts *cpts, struct device_node *node)
  750. {
  751. u32 prop[2];
  752. if (!of_property_read_u32(node, "ti,cpts-ext-ts-inputs", &prop[0]))
  753. cpts->ext_ts_inputs = prop[0];
  754. if (!of_property_read_u32(node, "ti,cpts-periodic-outputs", &prop[0]))
  755. cpts->genf_num = prop[0];
  756. return cpts_of_mux_clk_setup(cpts, node);
  757. }
  758. void am65_cpts_release(struct am65_cpts *cpts)
  759. {
  760. ptp_clock_unregister(cpts->ptp_clock);
  761. am65_cpts_disable(cpts);
  762. clk_disable_unprepare(cpts->refclk);
  763. }
  764. EXPORT_SYMBOL_GPL(am65_cpts_release);
  765. struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs,
  766. struct device_node *node)
  767. {
  768. struct am65_cpts *cpts;
  769. int ret, i;
  770. cpts = devm_kzalloc(dev, sizeof(*cpts), GFP_KERNEL);
  771. if (!cpts)
  772. return ERR_PTR(-ENOMEM);
  773. cpts->dev = dev;
  774. cpts->reg = (struct am65_cpts_regs __iomem *)regs;
  775. cpts->irq = of_irq_get_byname(node, "cpts");
  776. if (cpts->irq <= 0) {
  777. ret = cpts->irq ?: -ENXIO;
  778. dev_err_probe(dev, ret, "Failed to get IRQ number\n");
  779. return ERR_PTR(ret);
  780. }
  781. ret = am65_cpts_of_parse(cpts, node);
  782. if (ret)
  783. return ERR_PTR(ret);
  784. mutex_init(&cpts->ptp_clk_lock);
  785. INIT_LIST_HEAD(&cpts->events);
  786. INIT_LIST_HEAD(&cpts->pool);
  787. spin_lock_init(&cpts->lock);
  788. skb_queue_head_init(&cpts->txq);
  789. for (i = 0; i < AM65_CPTS_MAX_EVENTS; i++)
  790. list_add(&cpts->pool_data[i].list, &cpts->pool);
  791. cpts->refclk = devm_get_clk_from_child(dev, node, "cpts");
  792. if (IS_ERR(cpts->refclk)) {
  793. ret = PTR_ERR(cpts->refclk);
  794. dev_err_probe(dev, ret, "Failed to get refclk\n");
  795. return ERR_PTR(ret);
  796. }
  797. ret = clk_prepare_enable(cpts->refclk);
  798. if (ret) {
  799. dev_err(dev, "Failed to enable refclk %d\n", ret);
  800. return ERR_PTR(ret);
  801. }
  802. cpts->refclk_freq = clk_get_rate(cpts->refclk);
  803. am65_ptp_info.max_adj = cpts->refclk_freq / AM65_CPTS_MIN_PPM;
  804. cpts->ptp_info = am65_ptp_info;
  805. if (cpts->ext_ts_inputs)
  806. cpts->ptp_info.n_ext_ts = cpts->ext_ts_inputs;
  807. if (cpts->genf_num)
  808. cpts->ptp_info.n_per_out = cpts->genf_num;
  809. am65_cpts_set_add_val(cpts);
  810. am65_cpts_write32(cpts, AM65_CPTS_CONTROL_EN |
  811. AM65_CPTS_CONTROL_64MODE |
  812. AM65_CPTS_CONTROL_TX_GENF_CLR_EN,
  813. control);
  814. am65_cpts_write32(cpts, AM65_CPTS_INT_ENABLE_TS_PEND_EN, int_enable);
  815. /* set time to the current system time */
  816. am65_cpts_settime(cpts, ktime_to_ns(ktime_get_real()));
  817. cpts->ptp_clock = ptp_clock_register(&cpts->ptp_info, cpts->dev);
  818. if (IS_ERR_OR_NULL(cpts->ptp_clock)) {
  819. dev_err(dev, "Failed to register ptp clk %ld\n",
  820. PTR_ERR(cpts->ptp_clock));
  821. ret = cpts->ptp_clock ? PTR_ERR(cpts->ptp_clock) : -ENODEV;
  822. goto refclk_disable;
  823. }
  824. cpts->phc_index = ptp_clock_index(cpts->ptp_clock);
  825. ret = devm_request_threaded_irq(dev, cpts->irq, NULL,
  826. am65_cpts_interrupt,
  827. IRQF_ONESHOT, dev_name(dev), cpts);
  828. if (ret < 0) {
  829. dev_err(cpts->dev, "error attaching irq %d\n", ret);
  830. goto reset_ptpclk;
  831. }
  832. dev_info(dev, "CPTS ver 0x%08x, freq:%u, add_val:%u\n",
  833. am65_cpts_read32(cpts, idver),
  834. cpts->refclk_freq, cpts->ts_add_val);
  835. return cpts;
  836. reset_ptpclk:
  837. am65_cpts_release(cpts);
  838. refclk_disable:
  839. clk_disable_unprepare(cpts->refclk);
  840. return ERR_PTR(ret);
  841. }
  842. EXPORT_SYMBOL_GPL(am65_cpts_create);
  843. static int am65_cpts_probe(struct platform_device *pdev)
  844. {
  845. struct device_node *node = pdev->dev.of_node;
  846. struct device *dev = &pdev->dev;
  847. struct am65_cpts *cpts;
  848. void __iomem *base;
  849. base = devm_platform_ioremap_resource_byname(pdev, "cpts");
  850. if (IS_ERR(base))
  851. return PTR_ERR(base);
  852. cpts = am65_cpts_create(dev, base, node);
  853. return PTR_ERR_OR_ZERO(cpts);
  854. }
  855. static const struct of_device_id am65_cpts_of_match[] = {
  856. { .compatible = "ti,am65-cpts", },
  857. { .compatible = "ti,j721e-cpts", },
  858. {},
  859. };
  860. MODULE_DEVICE_TABLE(of, am65_cpts_of_match);
  861. static struct platform_driver am65_cpts_driver = {
  862. .probe = am65_cpts_probe,
  863. .driver = {
  864. .name = "am65-cpts",
  865. .of_match_table = am65_cpts_of_match,
  866. },
  867. };
  868. module_platform_driver(am65_cpts_driver);
  869. MODULE_LICENSE("GPL v2");
  870. MODULE_AUTHOR("Grygorii Strashko <[email protected]>");
  871. MODULE_DESCRIPTION("TI K3 AM65 CPTS driver");