ptp_dte.c 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. // Copyright 2017 Broadcom
  3. #include <linux/err.h>
  4. #include <linux/io.h>
  5. #include <linux/module.h>
  6. #include <linux/mod_devicetable.h>
  7. #include <linux/platform_device.h>
  8. #include <linux/ptp_clock_kernel.h>
  9. #include <linux/types.h>
  10. #define DTE_NCO_LOW_TIME_REG 0x00
  11. #define DTE_NCO_TIME_REG 0x04
  12. #define DTE_NCO_OVERFLOW_REG 0x08
  13. #define DTE_NCO_INC_REG 0x0c
  14. #define DTE_NCO_SUM2_MASK 0xffffffff
  15. #define DTE_NCO_SUM2_SHIFT 4ULL
  16. #define DTE_NCO_SUM3_MASK 0xff
  17. #define DTE_NCO_SUM3_SHIFT 36ULL
  18. #define DTE_NCO_SUM3_WR_SHIFT 8
  19. #define DTE_NCO_TS_WRAP_MASK 0xfff
  20. #define DTE_NCO_TS_WRAP_LSHIFT 32
  21. #define DTE_NCO_INC_DEFAULT 0x80000000
  22. #define DTE_NUM_REGS_TO_RESTORE 4
  23. /* Full wrap around is 44bits in ns (~4.887 hrs) */
  24. #define DTE_WRAP_AROUND_NSEC_SHIFT 44
  25. /* 44 bits NCO */
  26. #define DTE_NCO_MAX_NS 0xFFFFFFFFFFFLL
  27. /* 125MHz with 3.29 reg cfg */
  28. #define DTE_PPB_ADJ(ppb) (u32)(div64_u64((((u64)abs(ppb) * BIT(28)) +\
  29. 62500000ULL), 125000000ULL))
  30. /* ptp dte priv structure */
  31. struct ptp_dte {
  32. void __iomem *regs;
  33. struct ptp_clock *ptp_clk;
  34. struct ptp_clock_info caps;
  35. struct device *dev;
  36. u32 ts_ovf_last;
  37. u32 ts_wrap_cnt;
  38. spinlock_t lock;
  39. u32 reg_val[DTE_NUM_REGS_TO_RESTORE];
  40. };
  41. static void dte_write_nco(void __iomem *regs, s64 ns)
  42. {
  43. u32 sum2, sum3;
  44. sum2 = (u32)((ns >> DTE_NCO_SUM2_SHIFT) & DTE_NCO_SUM2_MASK);
  45. /* compensate for ignoring sum1 */
  46. if (sum2 != DTE_NCO_SUM2_MASK)
  47. sum2++;
  48. /* to write sum3, bits [15:8] needs to be written */
  49. sum3 = (u32)(((ns >> DTE_NCO_SUM3_SHIFT) & DTE_NCO_SUM3_MASK) <<
  50. DTE_NCO_SUM3_WR_SHIFT);
  51. writel(0, (regs + DTE_NCO_LOW_TIME_REG));
  52. writel(sum2, (regs + DTE_NCO_TIME_REG));
  53. writel(sum3, (regs + DTE_NCO_OVERFLOW_REG));
  54. }
  55. static s64 dte_read_nco(void __iomem *regs)
  56. {
  57. u32 sum2, sum3;
  58. s64 ns;
  59. /*
  60. * ignoring sum1 (4 bits) gives a 16ns resolution, which
  61. * works due to the async register read.
  62. */
  63. sum3 = readl(regs + DTE_NCO_OVERFLOW_REG) & DTE_NCO_SUM3_MASK;
  64. sum2 = readl(regs + DTE_NCO_TIME_REG);
  65. ns = ((s64)sum3 << DTE_NCO_SUM3_SHIFT) |
  66. ((s64)sum2 << DTE_NCO_SUM2_SHIFT);
  67. return ns;
  68. }
  69. static void dte_write_nco_delta(struct ptp_dte *ptp_dte, s64 delta)
  70. {
  71. s64 ns;
  72. ns = dte_read_nco(ptp_dte->regs);
  73. /* handle wraparound conditions */
  74. if ((delta < 0) && (abs(delta) > ns)) {
  75. if (ptp_dte->ts_wrap_cnt) {
  76. ns += DTE_NCO_MAX_NS + delta;
  77. ptp_dte->ts_wrap_cnt--;
  78. } else {
  79. ns = 0;
  80. }
  81. } else {
  82. ns += delta;
  83. if (ns > DTE_NCO_MAX_NS) {
  84. ptp_dte->ts_wrap_cnt++;
  85. ns -= DTE_NCO_MAX_NS;
  86. }
  87. }
  88. dte_write_nco(ptp_dte->regs, ns);
  89. ptp_dte->ts_ovf_last = (ns >> DTE_NCO_TS_WRAP_LSHIFT) &
  90. DTE_NCO_TS_WRAP_MASK;
  91. }
  92. static s64 dte_read_nco_with_ovf(struct ptp_dte *ptp_dte)
  93. {
  94. u32 ts_ovf;
  95. s64 ns = 0;
  96. ns = dte_read_nco(ptp_dte->regs);
  97. /*Timestamp overflow: 8 LSB bits of sum3, 4 MSB bits of sum2 */
  98. ts_ovf = (ns >> DTE_NCO_TS_WRAP_LSHIFT) & DTE_NCO_TS_WRAP_MASK;
  99. /* Check for wrap around */
  100. if (ts_ovf < ptp_dte->ts_ovf_last)
  101. ptp_dte->ts_wrap_cnt++;
  102. ptp_dte->ts_ovf_last = ts_ovf;
  103. /* adjust for wraparounds */
  104. ns += (s64)(BIT_ULL(DTE_WRAP_AROUND_NSEC_SHIFT) * ptp_dte->ts_wrap_cnt);
  105. return ns;
  106. }
  107. static int ptp_dte_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
  108. {
  109. u32 nco_incr;
  110. unsigned long flags;
  111. struct ptp_dte *ptp_dte = container_of(ptp, struct ptp_dte, caps);
  112. if (abs(ppb) > ptp_dte->caps.max_adj) {
  113. dev_err(ptp_dte->dev, "ppb adj too big\n");
  114. return -EINVAL;
  115. }
  116. if (ppb < 0)
  117. nco_incr = DTE_NCO_INC_DEFAULT - DTE_PPB_ADJ(ppb);
  118. else
  119. nco_incr = DTE_NCO_INC_DEFAULT + DTE_PPB_ADJ(ppb);
  120. spin_lock_irqsave(&ptp_dte->lock, flags);
  121. writel(nco_incr, ptp_dte->regs + DTE_NCO_INC_REG);
  122. spin_unlock_irqrestore(&ptp_dte->lock, flags);
  123. return 0;
  124. }
  125. static int ptp_dte_adjtime(struct ptp_clock_info *ptp, s64 delta)
  126. {
  127. unsigned long flags;
  128. struct ptp_dte *ptp_dte = container_of(ptp, struct ptp_dte, caps);
  129. spin_lock_irqsave(&ptp_dte->lock, flags);
  130. dte_write_nco_delta(ptp_dte, delta);
  131. spin_unlock_irqrestore(&ptp_dte->lock, flags);
  132. return 0;
  133. }
  134. static int ptp_dte_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
  135. {
  136. unsigned long flags;
  137. struct ptp_dte *ptp_dte = container_of(ptp, struct ptp_dte, caps);
  138. spin_lock_irqsave(&ptp_dte->lock, flags);
  139. *ts = ns_to_timespec64(dte_read_nco_with_ovf(ptp_dte));
  140. spin_unlock_irqrestore(&ptp_dte->lock, flags);
  141. return 0;
  142. }
  143. static int ptp_dte_settime(struct ptp_clock_info *ptp,
  144. const struct timespec64 *ts)
  145. {
  146. unsigned long flags;
  147. struct ptp_dte *ptp_dte = container_of(ptp, struct ptp_dte, caps);
  148. spin_lock_irqsave(&ptp_dte->lock, flags);
  149. /* Disable nco increment */
  150. writel(0, ptp_dte->regs + DTE_NCO_INC_REG);
  151. dte_write_nco(ptp_dte->regs, timespec64_to_ns(ts));
  152. /* reset overflow and wrap counter */
  153. ptp_dte->ts_ovf_last = 0;
  154. ptp_dte->ts_wrap_cnt = 0;
  155. /* Enable nco increment */
  156. writel(DTE_NCO_INC_DEFAULT, ptp_dte->regs + DTE_NCO_INC_REG);
  157. spin_unlock_irqrestore(&ptp_dte->lock, flags);
  158. return 0;
  159. }
  160. static int ptp_dte_enable(struct ptp_clock_info *ptp,
  161. struct ptp_clock_request *rq, int on)
  162. {
  163. return -EOPNOTSUPP;
  164. }
  165. static const struct ptp_clock_info ptp_dte_caps = {
  166. .owner = THIS_MODULE,
  167. .name = "DTE PTP timer",
  168. .max_adj = 50000000,
  169. .n_ext_ts = 0,
  170. .n_pins = 0,
  171. .pps = 0,
  172. .adjfreq = ptp_dte_adjfreq,
  173. .adjtime = ptp_dte_adjtime,
  174. .gettime64 = ptp_dte_gettime,
  175. .settime64 = ptp_dte_settime,
  176. .enable = ptp_dte_enable,
  177. };
  178. static int ptp_dte_probe(struct platform_device *pdev)
  179. {
  180. struct ptp_dte *ptp_dte;
  181. struct device *dev = &pdev->dev;
  182. ptp_dte = devm_kzalloc(dev, sizeof(struct ptp_dte), GFP_KERNEL);
  183. if (!ptp_dte)
  184. return -ENOMEM;
  185. ptp_dte->regs = devm_platform_ioremap_resource(pdev, 0);
  186. if (IS_ERR(ptp_dte->regs))
  187. return PTR_ERR(ptp_dte->regs);
  188. spin_lock_init(&ptp_dte->lock);
  189. ptp_dte->dev = dev;
  190. ptp_dte->caps = ptp_dte_caps;
  191. ptp_dte->ptp_clk = ptp_clock_register(&ptp_dte->caps, &pdev->dev);
  192. if (IS_ERR(ptp_dte->ptp_clk)) {
  193. dev_err(dev,
  194. "%s: Failed to register ptp clock\n", __func__);
  195. return PTR_ERR(ptp_dte->ptp_clk);
  196. }
  197. platform_set_drvdata(pdev, ptp_dte);
  198. dev_info(dev, "ptp clk probe done\n");
  199. return 0;
  200. }
  201. static int ptp_dte_remove(struct platform_device *pdev)
  202. {
  203. struct ptp_dte *ptp_dte = platform_get_drvdata(pdev);
  204. u8 i;
  205. ptp_clock_unregister(ptp_dte->ptp_clk);
  206. for (i = 0; i < DTE_NUM_REGS_TO_RESTORE; i++)
  207. writel(0, ptp_dte->regs + (i * sizeof(u32)));
  208. return 0;
  209. }
  210. #ifdef CONFIG_PM_SLEEP
  211. static int ptp_dte_suspend(struct device *dev)
  212. {
  213. struct ptp_dte *ptp_dte = dev_get_drvdata(dev);
  214. u8 i;
  215. for (i = 0; i < DTE_NUM_REGS_TO_RESTORE; i++) {
  216. ptp_dte->reg_val[i] =
  217. readl(ptp_dte->regs + (i * sizeof(u32)));
  218. }
  219. /* disable the nco */
  220. writel(0, ptp_dte->regs + DTE_NCO_INC_REG);
  221. return 0;
  222. }
  223. static int ptp_dte_resume(struct device *dev)
  224. {
  225. struct ptp_dte *ptp_dte = dev_get_drvdata(dev);
  226. u8 i;
  227. for (i = 0; i < DTE_NUM_REGS_TO_RESTORE; i++) {
  228. if ((i * sizeof(u32)) != DTE_NCO_OVERFLOW_REG)
  229. writel(ptp_dte->reg_val[i],
  230. (ptp_dte->regs + (i * sizeof(u32))));
  231. else
  232. writel(((ptp_dte->reg_val[i] &
  233. DTE_NCO_SUM3_MASK) << DTE_NCO_SUM3_WR_SHIFT),
  234. (ptp_dte->regs + (i * sizeof(u32))));
  235. }
  236. return 0;
  237. }
  238. static const struct dev_pm_ops ptp_dte_pm_ops = {
  239. .suspend = ptp_dte_suspend,
  240. .resume = ptp_dte_resume
  241. };
  242. #define PTP_DTE_PM_OPS (&ptp_dte_pm_ops)
  243. #else
  244. #define PTP_DTE_PM_OPS NULL
  245. #endif
  246. static const struct of_device_id ptp_dte_of_match[] = {
  247. { .compatible = "brcm,ptp-dte", },
  248. {},
  249. };
  250. MODULE_DEVICE_TABLE(of, ptp_dte_of_match);
  251. static struct platform_driver ptp_dte_driver = {
  252. .driver = {
  253. .name = "ptp-dte",
  254. .pm = PTP_DTE_PM_OPS,
  255. .of_match_table = ptp_dte_of_match,
  256. },
  257. .probe = ptp_dte_probe,
  258. .remove = ptp_dte_remove,
  259. };
  260. module_platform_driver(ptp_dte_driver);
  261. MODULE_AUTHOR("Broadcom");
  262. MODULE_DESCRIPTION("Broadcom DTE PTP Clock driver");
  263. MODULE_LICENSE("GPL v2");