omap-iommu.c 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * OMAP IOMMU quirks for various TI SoCs
  4. *
  5. * Copyright (C) 2015-2019 Texas Instruments Incorporated - https://www.ti.com/
  6. * Suman Anna <[email protected]>
  7. */
  8. #include <linux/platform_device.h>
  9. #include <linux/err.h>
  10. #include <linux/clk.h>
  11. #include <linux/list.h>
  12. #include "clockdomain.h"
  13. #include "powerdomain.h"
  14. #include "common.h"
  15. struct pwrdm_link {
  16. struct device *dev;
  17. struct powerdomain *pwrdm;
  18. struct list_head node;
  19. };
  20. static DEFINE_SPINLOCK(iommu_lock);
  21. static struct clockdomain *emu_clkdm;
  22. static atomic_t emu_count;
  23. static void omap_iommu_dra7_emu_swsup_config(struct platform_device *pdev,
  24. bool enable)
  25. {
  26. struct device_node *np = pdev->dev.of_node;
  27. unsigned long flags;
  28. if (!of_device_is_compatible(np, "ti,dra7-dsp-iommu"))
  29. return;
  30. if (!emu_clkdm) {
  31. emu_clkdm = clkdm_lookup("emu_clkdm");
  32. if (WARN_ON_ONCE(!emu_clkdm))
  33. return;
  34. }
  35. spin_lock_irqsave(&iommu_lock, flags);
  36. if (enable && (atomic_inc_return(&emu_count) == 1))
  37. clkdm_deny_idle(emu_clkdm);
  38. else if (!enable && (atomic_dec_return(&emu_count) == 0))
  39. clkdm_allow_idle(emu_clkdm);
  40. spin_unlock_irqrestore(&iommu_lock, flags);
  41. }
  42. static struct powerdomain *_get_pwrdm(struct device *dev)
  43. {
  44. struct clk *clk;
  45. struct clk_hw_omap *hwclk;
  46. struct clockdomain *clkdm;
  47. struct powerdomain *pwrdm = NULL;
  48. struct pwrdm_link *entry;
  49. unsigned long flags;
  50. static LIST_HEAD(cache);
  51. spin_lock_irqsave(&iommu_lock, flags);
  52. list_for_each_entry(entry, &cache, node) {
  53. if (entry->dev == dev) {
  54. pwrdm = entry->pwrdm;
  55. break;
  56. }
  57. }
  58. spin_unlock_irqrestore(&iommu_lock, flags);
  59. if (pwrdm)
  60. return pwrdm;
  61. clk = of_clk_get(dev->of_node->parent, 0);
  62. if (IS_ERR(clk)) {
  63. dev_err(dev, "no fck found\n");
  64. return NULL;
  65. }
  66. hwclk = to_clk_hw_omap(__clk_get_hw(clk));
  67. clk_put(clk);
  68. if (!hwclk || !hwclk->clkdm_name) {
  69. dev_err(dev, "no hwclk data\n");
  70. return NULL;
  71. }
  72. clkdm = clkdm_lookup(hwclk->clkdm_name);
  73. if (!clkdm) {
  74. dev_err(dev, "clkdm not found: %s\n", hwclk->clkdm_name);
  75. return NULL;
  76. }
  77. pwrdm = clkdm_get_pwrdm(clkdm);
  78. if (!pwrdm) {
  79. dev_err(dev, "pwrdm not found: %s\n", clkdm->name);
  80. return NULL;
  81. }
  82. entry = kmalloc(sizeof(*entry), GFP_KERNEL);
  83. if (entry) {
  84. entry->dev = dev;
  85. entry->pwrdm = pwrdm;
  86. spin_lock_irqsave(&iommu_lock, flags);
  87. list_add(&entry->node, &cache);
  88. spin_unlock_irqrestore(&iommu_lock, flags);
  89. }
  90. return pwrdm;
  91. }
  92. int omap_iommu_set_pwrdm_constraint(struct platform_device *pdev, bool request,
  93. u8 *pwrst)
  94. {
  95. struct powerdomain *pwrdm;
  96. u8 next_pwrst;
  97. int ret = 0;
  98. pwrdm = _get_pwrdm(&pdev->dev);
  99. if (!pwrdm)
  100. return -ENODEV;
  101. if (request) {
  102. *pwrst = pwrdm_read_next_pwrst(pwrdm);
  103. omap_iommu_dra7_emu_swsup_config(pdev, true);
  104. }
  105. if (*pwrst > PWRDM_POWER_RET)
  106. goto out;
  107. next_pwrst = request ? PWRDM_POWER_ON : *pwrst;
  108. ret = pwrdm_set_next_pwrst(pwrdm, next_pwrst);
  109. out:
  110. if (!request)
  111. omap_iommu_dra7_emu_swsup_config(pdev, false);
  112. return ret;
  113. }