mips-cpc.c 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Copyright (C) 2013 Imagination Technologies
  4. * Author: Paul Burton <[email protected]>
  5. */
  6. #include <linux/bitfield.h>
  7. #include <linux/errno.h>
  8. #include <linux/percpu.h>
  9. #include <linux/of.h>
  10. #include <linux/of_address.h>
  11. #include <linux/spinlock.h>
  12. #include <asm/mips-cps.h>
  13. void __iomem *mips_cpc_base;
  14. static DEFINE_PER_CPU_ALIGNED(spinlock_t, cpc_core_lock);
  15. static DEFINE_PER_CPU_ALIGNED(unsigned long, cpc_core_lock_flags);
  16. phys_addr_t __weak mips_cpc_default_phys_base(void)
  17. {
  18. struct device_node *cpc_node;
  19. struct resource res;
  20. int err;
  21. cpc_node = of_find_compatible_node(of_root, NULL, "mti,mips-cpc");
  22. if (cpc_node) {
  23. err = of_address_to_resource(cpc_node, 0, &res);
  24. of_node_put(cpc_node);
  25. if (!err)
  26. return res.start;
  27. }
  28. return 0;
  29. }
  30. /**
  31. * mips_cpc_phys_base - retrieve the physical base address of the CPC
  32. *
  33. * This function returns the physical base address of the Cluster Power
  34. * Controller memory mapped registers, or 0 if no Cluster Power Controller
  35. * is present.
  36. */
  37. static phys_addr_t mips_cpc_phys_base(void)
  38. {
  39. unsigned long cpc_base;
  40. if (!mips_cm_present())
  41. return 0;
  42. if (!(read_gcr_cpc_status() & CM_GCR_CPC_STATUS_EX))
  43. return 0;
  44. /* If the CPC is already enabled, leave it so */
  45. cpc_base = read_gcr_cpc_base();
  46. if (cpc_base & CM_GCR_CPC_BASE_CPCEN)
  47. return cpc_base & CM_GCR_CPC_BASE_CPCBASE;
  48. /* Otherwise, use the default address */
  49. cpc_base = mips_cpc_default_phys_base();
  50. if (!cpc_base)
  51. return cpc_base;
  52. /* Enable the CPC, mapped at the default address */
  53. write_gcr_cpc_base(cpc_base | CM_GCR_CPC_BASE_CPCEN);
  54. return cpc_base;
  55. }
  56. int mips_cpc_probe(void)
  57. {
  58. phys_addr_t addr;
  59. unsigned int cpu;
  60. for_each_possible_cpu(cpu)
  61. spin_lock_init(&per_cpu(cpc_core_lock, cpu));
  62. addr = mips_cpc_phys_base();
  63. if (!addr)
  64. return -ENODEV;
  65. mips_cpc_base = ioremap(addr, 0x8000);
  66. if (!mips_cpc_base)
  67. return -ENXIO;
  68. return 0;
  69. }
  70. void mips_cpc_lock_other(unsigned int core)
  71. {
  72. unsigned int curr_core;
  73. if (mips_cm_revision() >= CM_REV_CM3)
  74. /* Systems with CM >= 3 lock the CPC via mips_cm_lock_other */
  75. return;
  76. preempt_disable();
  77. curr_core = cpu_core(&current_cpu_data);
  78. spin_lock_irqsave(&per_cpu(cpc_core_lock, curr_core),
  79. per_cpu(cpc_core_lock_flags, curr_core));
  80. write_cpc_cl_other(FIELD_PREP(CPC_Cx_OTHER_CORENUM, core));
  81. /*
  82. * Ensure the core-other region reflects the appropriate core &
  83. * VP before any accesses to it occur.
  84. */
  85. mb();
  86. }
  87. void mips_cpc_unlock_other(void)
  88. {
  89. unsigned int curr_core;
  90. if (mips_cm_revision() >= CM_REV_CM3)
  91. /* Systems with CM >= 3 lock the CPC via mips_cm_lock_other */
  92. return;
  93. curr_core = cpu_core(&current_cpu_data);
  94. spin_unlock_irqrestore(&per_cpu(cpc_core_lock, curr_core),
  95. per_cpu(cpc_core_lock_flags, curr_core));
  96. preempt_enable();
  97. }