intel-qep.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Intel Quadrature Encoder Peripheral driver
  4. *
  5. * Copyright (C) 2019-2021 Intel Corporation
  6. *
  7. * Author: Felipe Balbi (Intel)
  8. * Author: Jarkko Nikula <[email protected]>
  9. * Author: Raymond Tan <[email protected]>
  10. */
  11. #include <linux/counter.h>
  12. #include <linux/kernel.h>
  13. #include <linux/module.h>
  14. #include <linux/mutex.h>
  15. #include <linux/pci.h>
  16. #include <linux/pm_runtime.h>
  17. #define INTEL_QEPCON 0x00
  18. #define INTEL_QEPFLT 0x04
  19. #define INTEL_QEPCOUNT 0x08
  20. #define INTEL_QEPMAX 0x0c
  21. #define INTEL_QEPWDT 0x10
  22. #define INTEL_QEPCAPDIV 0x14
  23. #define INTEL_QEPCNTR 0x18
  24. #define INTEL_QEPCAPBUF 0x1c
  25. #define INTEL_QEPINT_STAT 0x20
  26. #define INTEL_QEPINT_MASK 0x24
  27. /* QEPCON */
  28. #define INTEL_QEPCON_EN BIT(0)
  29. #define INTEL_QEPCON_FLT_EN BIT(1)
  30. #define INTEL_QEPCON_EDGE_A BIT(2)
  31. #define INTEL_QEPCON_EDGE_B BIT(3)
  32. #define INTEL_QEPCON_EDGE_INDX BIT(4)
  33. #define INTEL_QEPCON_SWPAB BIT(5)
  34. #define INTEL_QEPCON_OP_MODE BIT(6)
  35. #define INTEL_QEPCON_PH_ERR BIT(7)
  36. #define INTEL_QEPCON_COUNT_RST_MODE BIT(8)
  37. #define INTEL_QEPCON_INDX_GATING_MASK GENMASK(10, 9)
  38. #define INTEL_QEPCON_INDX_GATING(n) (((n) & 3) << 9)
  39. #define INTEL_QEPCON_INDX_PAL_PBL INTEL_QEPCON_INDX_GATING(0)
  40. #define INTEL_QEPCON_INDX_PAL_PBH INTEL_QEPCON_INDX_GATING(1)
  41. #define INTEL_QEPCON_INDX_PAH_PBL INTEL_QEPCON_INDX_GATING(2)
  42. #define INTEL_QEPCON_INDX_PAH_PBH INTEL_QEPCON_INDX_GATING(3)
  43. #define INTEL_QEPCON_CAP_MODE BIT(11)
  44. #define INTEL_QEPCON_FIFO_THRE_MASK GENMASK(14, 12)
  45. #define INTEL_QEPCON_FIFO_THRE(n) ((((n) - 1) & 7) << 12)
  46. #define INTEL_QEPCON_FIFO_EMPTY BIT(15)
  47. /* QEPFLT */
  48. #define INTEL_QEPFLT_MAX_COUNT(n) ((n) & 0x1fffff)
  49. /* QEPINT */
  50. #define INTEL_QEPINT_FIFOCRIT BIT(5)
  51. #define INTEL_QEPINT_FIFOENTRY BIT(4)
  52. #define INTEL_QEPINT_QEPDIR BIT(3)
  53. #define INTEL_QEPINT_QEPRST_UP BIT(2)
  54. #define INTEL_QEPINT_QEPRST_DOWN BIT(1)
  55. #define INTEL_QEPINT_WDT BIT(0)
  56. #define INTEL_QEPINT_MASK_ALL GENMASK(5, 0)
  57. #define INTEL_QEP_CLK_PERIOD_NS 10
  58. struct intel_qep {
  59. struct mutex lock;
  60. struct device *dev;
  61. void __iomem *regs;
  62. bool enabled;
  63. /* Context save registers */
  64. u32 qepcon;
  65. u32 qepflt;
  66. u32 qepmax;
  67. };
  68. static inline u32 intel_qep_readl(struct intel_qep *qep, u32 offset)
  69. {
  70. return readl(qep->regs + offset);
  71. }
  72. static inline void intel_qep_writel(struct intel_qep *qep,
  73. u32 offset, u32 value)
  74. {
  75. writel(value, qep->regs + offset);
  76. }
  77. static void intel_qep_init(struct intel_qep *qep)
  78. {
  79. u32 reg;
  80. reg = intel_qep_readl(qep, INTEL_QEPCON);
  81. reg &= ~INTEL_QEPCON_EN;
  82. intel_qep_writel(qep, INTEL_QEPCON, reg);
  83. qep->enabled = false;
  84. /*
  85. * Make sure peripheral is disabled by flushing the write with
  86. * a dummy read
  87. */
  88. reg = intel_qep_readl(qep, INTEL_QEPCON);
  89. reg &= ~(INTEL_QEPCON_OP_MODE | INTEL_QEPCON_FLT_EN);
  90. reg |= INTEL_QEPCON_EDGE_A | INTEL_QEPCON_EDGE_B |
  91. INTEL_QEPCON_EDGE_INDX | INTEL_QEPCON_COUNT_RST_MODE;
  92. intel_qep_writel(qep, INTEL_QEPCON, reg);
  93. intel_qep_writel(qep, INTEL_QEPINT_MASK, INTEL_QEPINT_MASK_ALL);
  94. }
  95. static int intel_qep_count_read(struct counter_device *counter,
  96. struct counter_count *count, u64 *val)
  97. {
  98. struct intel_qep *const qep = counter_priv(counter);
  99. pm_runtime_get_sync(qep->dev);
  100. *val = intel_qep_readl(qep, INTEL_QEPCOUNT);
  101. pm_runtime_put(qep->dev);
  102. return 0;
  103. }
  104. static const enum counter_function intel_qep_count_functions[] = {
  105. COUNTER_FUNCTION_QUADRATURE_X4,
  106. };
  107. static int intel_qep_function_read(struct counter_device *counter,
  108. struct counter_count *count,
  109. enum counter_function *function)
  110. {
  111. *function = COUNTER_FUNCTION_QUADRATURE_X4;
  112. return 0;
  113. }
  114. static const enum counter_synapse_action intel_qep_synapse_actions[] = {
  115. COUNTER_SYNAPSE_ACTION_BOTH_EDGES,
  116. };
  117. static int intel_qep_action_read(struct counter_device *counter,
  118. struct counter_count *count,
  119. struct counter_synapse *synapse,
  120. enum counter_synapse_action *action)
  121. {
  122. *action = COUNTER_SYNAPSE_ACTION_BOTH_EDGES;
  123. return 0;
  124. }
  125. static const struct counter_ops intel_qep_counter_ops = {
  126. .count_read = intel_qep_count_read,
  127. .function_read = intel_qep_function_read,
  128. .action_read = intel_qep_action_read,
  129. };
  130. #define INTEL_QEP_SIGNAL(_id, _name) { \
  131. .id = (_id), \
  132. .name = (_name), \
  133. }
  134. static struct counter_signal intel_qep_signals[] = {
  135. INTEL_QEP_SIGNAL(0, "Phase A"),
  136. INTEL_QEP_SIGNAL(1, "Phase B"),
  137. INTEL_QEP_SIGNAL(2, "Index"),
  138. };
  139. #define INTEL_QEP_SYNAPSE(_signal_id) { \
  140. .actions_list = intel_qep_synapse_actions, \
  141. .num_actions = ARRAY_SIZE(intel_qep_synapse_actions), \
  142. .signal = &intel_qep_signals[(_signal_id)], \
  143. }
  144. static struct counter_synapse intel_qep_count_synapses[] = {
  145. INTEL_QEP_SYNAPSE(0),
  146. INTEL_QEP_SYNAPSE(1),
  147. INTEL_QEP_SYNAPSE(2),
  148. };
  149. static int intel_qep_ceiling_read(struct counter_device *counter,
  150. struct counter_count *count, u64 *ceiling)
  151. {
  152. struct intel_qep *qep = counter_priv(counter);
  153. pm_runtime_get_sync(qep->dev);
  154. *ceiling = intel_qep_readl(qep, INTEL_QEPMAX);
  155. pm_runtime_put(qep->dev);
  156. return 0;
  157. }
  158. static int intel_qep_ceiling_write(struct counter_device *counter,
  159. struct counter_count *count, u64 max)
  160. {
  161. struct intel_qep *qep = counter_priv(counter);
  162. int ret = 0;
  163. /* Intel QEP ceiling configuration only supports 32-bit values */
  164. if (max != (u32)max)
  165. return -ERANGE;
  166. mutex_lock(&qep->lock);
  167. if (qep->enabled) {
  168. ret = -EBUSY;
  169. goto out;
  170. }
  171. pm_runtime_get_sync(qep->dev);
  172. intel_qep_writel(qep, INTEL_QEPMAX, max);
  173. pm_runtime_put(qep->dev);
  174. out:
  175. mutex_unlock(&qep->lock);
  176. return ret;
  177. }
  178. static int intel_qep_enable_read(struct counter_device *counter,
  179. struct counter_count *count, u8 *enable)
  180. {
  181. struct intel_qep *qep = counter_priv(counter);
  182. *enable = qep->enabled;
  183. return 0;
  184. }
  185. static int intel_qep_enable_write(struct counter_device *counter,
  186. struct counter_count *count, u8 val)
  187. {
  188. struct intel_qep *qep = counter_priv(counter);
  189. u32 reg;
  190. bool changed;
  191. mutex_lock(&qep->lock);
  192. changed = val ^ qep->enabled;
  193. if (!changed)
  194. goto out;
  195. pm_runtime_get_sync(qep->dev);
  196. reg = intel_qep_readl(qep, INTEL_QEPCON);
  197. if (val) {
  198. /* Enable peripheral and keep runtime PM always on */
  199. reg |= INTEL_QEPCON_EN;
  200. pm_runtime_get_noresume(qep->dev);
  201. } else {
  202. /* Let runtime PM be idle and disable peripheral */
  203. pm_runtime_put_noidle(qep->dev);
  204. reg &= ~INTEL_QEPCON_EN;
  205. }
  206. intel_qep_writel(qep, INTEL_QEPCON, reg);
  207. pm_runtime_put(qep->dev);
  208. qep->enabled = val;
  209. out:
  210. mutex_unlock(&qep->lock);
  211. return 0;
  212. }
  213. static int intel_qep_spike_filter_ns_read(struct counter_device *counter,
  214. struct counter_count *count,
  215. u64 *length)
  216. {
  217. struct intel_qep *qep = counter_priv(counter);
  218. u32 reg;
  219. pm_runtime_get_sync(qep->dev);
  220. reg = intel_qep_readl(qep, INTEL_QEPCON);
  221. if (!(reg & INTEL_QEPCON_FLT_EN)) {
  222. pm_runtime_put(qep->dev);
  223. return 0;
  224. }
  225. reg = INTEL_QEPFLT_MAX_COUNT(intel_qep_readl(qep, INTEL_QEPFLT));
  226. pm_runtime_put(qep->dev);
  227. *length = (reg + 2) * INTEL_QEP_CLK_PERIOD_NS;
  228. return 0;
  229. }
  230. static int intel_qep_spike_filter_ns_write(struct counter_device *counter,
  231. struct counter_count *count,
  232. u64 length)
  233. {
  234. struct intel_qep *qep = counter_priv(counter);
  235. u32 reg;
  236. bool enable;
  237. int ret = 0;
  238. /*
  239. * Spike filter length is (MAX_COUNT + 2) clock periods.
  240. * Disable filter when userspace writes 0, enable for valid
  241. * nanoseconds values and error out otherwise.
  242. */
  243. do_div(length, INTEL_QEP_CLK_PERIOD_NS);
  244. if (length == 0) {
  245. enable = false;
  246. length = 0;
  247. } else if (length >= 2) {
  248. enable = true;
  249. length -= 2;
  250. } else {
  251. return -EINVAL;
  252. }
  253. if (length > INTEL_QEPFLT_MAX_COUNT(length))
  254. return -ERANGE;
  255. mutex_lock(&qep->lock);
  256. if (qep->enabled) {
  257. ret = -EBUSY;
  258. goto out;
  259. }
  260. pm_runtime_get_sync(qep->dev);
  261. reg = intel_qep_readl(qep, INTEL_QEPCON);
  262. if (enable)
  263. reg |= INTEL_QEPCON_FLT_EN;
  264. else
  265. reg &= ~INTEL_QEPCON_FLT_EN;
  266. intel_qep_writel(qep, INTEL_QEPFLT, length);
  267. intel_qep_writel(qep, INTEL_QEPCON, reg);
  268. pm_runtime_put(qep->dev);
  269. out:
  270. mutex_unlock(&qep->lock);
  271. return ret;
  272. }
  273. static int intel_qep_preset_enable_read(struct counter_device *counter,
  274. struct counter_count *count,
  275. u8 *preset_enable)
  276. {
  277. struct intel_qep *qep = counter_priv(counter);
  278. u32 reg;
  279. pm_runtime_get_sync(qep->dev);
  280. reg = intel_qep_readl(qep, INTEL_QEPCON);
  281. pm_runtime_put(qep->dev);
  282. *preset_enable = !(reg & INTEL_QEPCON_COUNT_RST_MODE);
  283. return 0;
  284. }
  285. static int intel_qep_preset_enable_write(struct counter_device *counter,
  286. struct counter_count *count, u8 val)
  287. {
  288. struct intel_qep *qep = counter_priv(counter);
  289. u32 reg;
  290. int ret = 0;
  291. mutex_lock(&qep->lock);
  292. if (qep->enabled) {
  293. ret = -EBUSY;
  294. goto out;
  295. }
  296. pm_runtime_get_sync(qep->dev);
  297. reg = intel_qep_readl(qep, INTEL_QEPCON);
  298. if (val)
  299. reg &= ~INTEL_QEPCON_COUNT_RST_MODE;
  300. else
  301. reg |= INTEL_QEPCON_COUNT_RST_MODE;
  302. intel_qep_writel(qep, INTEL_QEPCON, reg);
  303. pm_runtime_put(qep->dev);
  304. out:
  305. mutex_unlock(&qep->lock);
  306. return ret;
  307. }
  308. static struct counter_comp intel_qep_count_ext[] = {
  309. COUNTER_COMP_ENABLE(intel_qep_enable_read, intel_qep_enable_write),
  310. COUNTER_COMP_CEILING(intel_qep_ceiling_read, intel_qep_ceiling_write),
  311. COUNTER_COMP_PRESET_ENABLE(intel_qep_preset_enable_read,
  312. intel_qep_preset_enable_write),
  313. COUNTER_COMP_COUNT_U64("spike_filter_ns",
  314. intel_qep_spike_filter_ns_read,
  315. intel_qep_spike_filter_ns_write),
  316. };
  317. static struct counter_count intel_qep_counter_count[] = {
  318. {
  319. .id = 0,
  320. .name = "Channel 1 Count",
  321. .functions_list = intel_qep_count_functions,
  322. .num_functions = ARRAY_SIZE(intel_qep_count_functions),
  323. .synapses = intel_qep_count_synapses,
  324. .num_synapses = ARRAY_SIZE(intel_qep_count_synapses),
  325. .ext = intel_qep_count_ext,
  326. .num_ext = ARRAY_SIZE(intel_qep_count_ext),
  327. },
  328. };
  329. static int intel_qep_probe(struct pci_dev *pci, const struct pci_device_id *id)
  330. {
  331. struct counter_device *counter;
  332. struct intel_qep *qep;
  333. struct device *dev = &pci->dev;
  334. void __iomem *regs;
  335. int ret;
  336. counter = devm_counter_alloc(dev, sizeof(*qep));
  337. if (!counter)
  338. return -ENOMEM;
  339. qep = counter_priv(counter);
  340. ret = pcim_enable_device(pci);
  341. if (ret)
  342. return ret;
  343. pci_set_master(pci);
  344. ret = pcim_iomap_regions(pci, BIT(0), pci_name(pci));
  345. if (ret)
  346. return ret;
  347. regs = pcim_iomap_table(pci)[0];
  348. if (!regs)
  349. return -ENOMEM;
  350. qep->dev = dev;
  351. qep->regs = regs;
  352. mutex_init(&qep->lock);
  353. intel_qep_init(qep);
  354. pci_set_drvdata(pci, qep);
  355. counter->name = pci_name(pci);
  356. counter->parent = dev;
  357. counter->ops = &intel_qep_counter_ops;
  358. counter->counts = intel_qep_counter_count;
  359. counter->num_counts = ARRAY_SIZE(intel_qep_counter_count);
  360. counter->signals = intel_qep_signals;
  361. counter->num_signals = ARRAY_SIZE(intel_qep_signals);
  362. qep->enabled = false;
  363. pm_runtime_put(dev);
  364. pm_runtime_allow(dev);
  365. ret = devm_counter_add(&pci->dev, counter);
  366. if (ret < 0)
  367. return dev_err_probe(&pci->dev, ret, "Failed to add counter\n");
  368. return 0;
  369. }
  370. static void intel_qep_remove(struct pci_dev *pci)
  371. {
  372. struct intel_qep *qep = pci_get_drvdata(pci);
  373. struct device *dev = &pci->dev;
  374. pm_runtime_forbid(dev);
  375. if (!qep->enabled)
  376. pm_runtime_get(dev);
  377. intel_qep_writel(qep, INTEL_QEPCON, 0);
  378. }
  379. static int __maybe_unused intel_qep_suspend(struct device *dev)
  380. {
  381. struct pci_dev *pdev = to_pci_dev(dev);
  382. struct intel_qep *qep = pci_get_drvdata(pdev);
  383. qep->qepcon = intel_qep_readl(qep, INTEL_QEPCON);
  384. qep->qepflt = intel_qep_readl(qep, INTEL_QEPFLT);
  385. qep->qepmax = intel_qep_readl(qep, INTEL_QEPMAX);
  386. return 0;
  387. }
  388. static int __maybe_unused intel_qep_resume(struct device *dev)
  389. {
  390. struct pci_dev *pdev = to_pci_dev(dev);
  391. struct intel_qep *qep = pci_get_drvdata(pdev);
  392. /*
  393. * Make sure peripheral is disabled when restoring registers and
  394. * control register bits that are writable only when the peripheral
  395. * is disabled
  396. */
  397. intel_qep_writel(qep, INTEL_QEPCON, 0);
  398. intel_qep_readl(qep, INTEL_QEPCON);
  399. intel_qep_writel(qep, INTEL_QEPFLT, qep->qepflt);
  400. intel_qep_writel(qep, INTEL_QEPMAX, qep->qepmax);
  401. intel_qep_writel(qep, INTEL_QEPINT_MASK, INTEL_QEPINT_MASK_ALL);
  402. /* Restore all other control register bits except enable status */
  403. intel_qep_writel(qep, INTEL_QEPCON, qep->qepcon & ~INTEL_QEPCON_EN);
  404. intel_qep_readl(qep, INTEL_QEPCON);
  405. /* Restore enable status */
  406. intel_qep_writel(qep, INTEL_QEPCON, qep->qepcon);
  407. return 0;
  408. }
  409. static UNIVERSAL_DEV_PM_OPS(intel_qep_pm_ops,
  410. intel_qep_suspend, intel_qep_resume, NULL);
  411. static const struct pci_device_id intel_qep_id_table[] = {
  412. /* EHL */
  413. { PCI_VDEVICE(INTEL, 0x4bc3), },
  414. { PCI_VDEVICE(INTEL, 0x4b81), },
  415. { PCI_VDEVICE(INTEL, 0x4b82), },
  416. { PCI_VDEVICE(INTEL, 0x4b83), },
  417. { } /* Terminating Entry */
  418. };
  419. MODULE_DEVICE_TABLE(pci, intel_qep_id_table);
  420. static struct pci_driver intel_qep_driver = {
  421. .name = "intel-qep",
  422. .id_table = intel_qep_id_table,
  423. .probe = intel_qep_probe,
  424. .remove = intel_qep_remove,
  425. .driver = {
  426. .pm = &intel_qep_pm_ops,
  427. }
  428. };
  429. module_pci_driver(intel_qep_driver);
  430. MODULE_AUTHOR("Felipe Balbi (Intel)");
  431. MODULE_AUTHOR("Jarkko Nikula <[email protected]>");
  432. MODULE_AUTHOR("Raymond Tan <[email protected]>");
  433. MODULE_LICENSE("GPL");
  434. MODULE_DESCRIPTION("Intel Quadrature Encoder Peripheral driver");
  435. MODULE_IMPORT_NS(COUNTER);