cpu_ops_sbi.c 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * HSM extension and cpu_ops implementation.
  4. *
  5. * Copyright (c) 2020 Western Digital Corporation or its affiliates.
  6. */
  7. #include <linux/init.h>
  8. #include <linux/mm.h>
  9. #include <linux/sched/task_stack.h>
  10. #include <asm/cpu_ops.h>
  11. #include <asm/cpu_ops_sbi.h>
  12. #include <asm/sbi.h>
  13. #include <asm/smp.h>
  14. extern char secondary_start_sbi[];
  15. const struct cpu_operations cpu_ops_sbi;
  16. /*
  17. * Ordered booting via HSM brings one cpu at a time. However, cpu hotplug can
  18. * be invoked from multiple threads in parallel. Define a per cpu data
  19. * to handle that.
  20. */
  21. static DEFINE_PER_CPU(struct sbi_hart_boot_data, boot_data);
  22. static int sbi_hsm_hart_start(unsigned long hartid, unsigned long saddr,
  23. unsigned long priv)
  24. {
  25. struct sbiret ret;
  26. ret = sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_START,
  27. hartid, saddr, priv, 0, 0, 0);
  28. if (ret.error)
  29. return sbi_err_map_linux_errno(ret.error);
  30. else
  31. return 0;
  32. }
  33. #ifdef CONFIG_HOTPLUG_CPU
  34. static int sbi_hsm_hart_stop(void)
  35. {
  36. struct sbiret ret;
  37. ret = sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_STOP, 0, 0, 0, 0, 0, 0);
  38. if (ret.error)
  39. return sbi_err_map_linux_errno(ret.error);
  40. else
  41. return 0;
  42. }
  43. static int sbi_hsm_hart_get_status(unsigned long hartid)
  44. {
  45. struct sbiret ret;
  46. ret = sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_STATUS,
  47. hartid, 0, 0, 0, 0, 0);
  48. if (ret.error)
  49. return sbi_err_map_linux_errno(ret.error);
  50. else
  51. return ret.value;
  52. }
  53. #endif
  54. static int sbi_cpu_start(unsigned int cpuid, struct task_struct *tidle)
  55. {
  56. unsigned long boot_addr = __pa_symbol(secondary_start_sbi);
  57. unsigned long hartid = cpuid_to_hartid_map(cpuid);
  58. unsigned long hsm_data;
  59. struct sbi_hart_boot_data *bdata = &per_cpu(boot_data, cpuid);
  60. /* Make sure tidle is updated */
  61. smp_mb();
  62. bdata->task_ptr = tidle;
  63. bdata->stack_ptr = task_stack_page(tidle) + THREAD_SIZE;
  64. /* Make sure boot data is updated */
  65. smp_mb();
  66. hsm_data = __pa(bdata);
  67. return sbi_hsm_hart_start(hartid, boot_addr, hsm_data);
  68. }
  69. static int sbi_cpu_prepare(unsigned int cpuid)
  70. {
  71. if (!cpu_ops_sbi.cpu_start) {
  72. pr_err("cpu start method not defined for CPU [%d]\n", cpuid);
  73. return -ENODEV;
  74. }
  75. return 0;
  76. }
  77. #ifdef CONFIG_HOTPLUG_CPU
  78. static int sbi_cpu_disable(unsigned int cpuid)
  79. {
  80. if (!cpu_ops_sbi.cpu_stop)
  81. return -EOPNOTSUPP;
  82. return 0;
  83. }
  84. static void sbi_cpu_stop(void)
  85. {
  86. int ret;
  87. ret = sbi_hsm_hart_stop();
  88. pr_crit("Unable to stop the cpu %u (%d)\n", smp_processor_id(), ret);
  89. }
  90. static int sbi_cpu_is_stopped(unsigned int cpuid)
  91. {
  92. int rc;
  93. unsigned long hartid = cpuid_to_hartid_map(cpuid);
  94. rc = sbi_hsm_hart_get_status(hartid);
  95. if (rc == SBI_HSM_STATE_STOPPED)
  96. return 0;
  97. return rc;
  98. }
  99. #endif
  100. const struct cpu_operations cpu_ops_sbi = {
  101. .name = "sbi",
  102. .cpu_prepare = sbi_cpu_prepare,
  103. .cpu_start = sbi_cpu_start,
  104. #ifdef CONFIG_HOTPLUG_CPU
  105. .cpu_disable = sbi_cpu_disable,
  106. .cpu_stop = sbi_cpu_stop,
  107. .cpu_is_stopped = sbi_cpu_is_stopped,
  108. #endif
  109. };