mips-cm.h 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459
  1. /* SPDX-License-Identifier: GPL-2.0-or-later */
  2. /*
  3. * Copyright (C) 2013 Imagination Technologies
  4. * Author: Paul Burton <[email protected]>
  5. */
  6. #ifndef __MIPS_ASM_MIPS_CPS_H__
  7. # error Please include asm/mips-cps.h rather than asm/mips-cm.h
  8. #endif
  9. #ifndef __MIPS_ASM_MIPS_CM_H__
  10. #define __MIPS_ASM_MIPS_CM_H__
  11. #include <linux/bitfield.h>
  12. #include <linux/bitops.h>
  13. #include <linux/errno.h>
  14. /* The base address of the CM GCR block */
  15. extern void __iomem *mips_gcr_base;
  16. /* The base address of the CM L2-only sync region */
  17. extern void __iomem *mips_cm_l2sync_base;
  18. /**
  19. * __mips_cm_phys_base - retrieve the physical base address of the CM
  20. *
  21. * This function returns the physical base address of the Coherence Manager
  22. * global control block, or 0 if no Coherence Manager is present. It provides
  23. * a default implementation which reads the CMGCRBase register where available,
  24. * and may be overridden by platforms which determine this address in a
  25. * different way by defining a function with the same prototype except for the
  26. * name mips_cm_phys_base (without underscores).
  27. */
  28. extern phys_addr_t __mips_cm_phys_base(void);
  29. /*
  30. * mips_cm_is64 - determine CM register width
  31. *
  32. * The CM register width is determined by the version of the CM, with CM3
  33. * introducing 64 bit GCRs and all prior CM versions having 32 bit GCRs.
  34. * However we may run a kernel built for MIPS32 on a system with 64 bit GCRs,
  35. * or vice-versa. This variable indicates the width of the memory accesses
  36. * that the kernel will perform to GCRs, which may differ from the actual
  37. * width of the GCRs.
  38. *
  39. * It's set to 0 for 32-bit accesses and 1 for 64-bit accesses.
  40. */
  41. extern int mips_cm_is64;
  42. /**
  43. * mips_cm_error_report - Report CM cache errors
  44. */
  45. #ifdef CONFIG_MIPS_CM
  46. extern void mips_cm_error_report(void);
  47. #else
  48. static inline void mips_cm_error_report(void) {}
  49. #endif
  50. /**
  51. * mips_cm_probe - probe for a Coherence Manager
  52. *
  53. * Attempt to detect the presence of a Coherence Manager. Returns 0 if a CM
  54. * is successfully detected, else -errno.
  55. */
  56. #ifdef CONFIG_MIPS_CM
  57. extern int mips_cm_probe(void);
  58. #else
  59. static inline int mips_cm_probe(void)
  60. {
  61. return -ENODEV;
  62. }
  63. #endif
  64. /**
  65. * mips_cm_present - determine whether a Coherence Manager is present
  66. *
  67. * Returns true if a CM is present in the system, else false.
  68. */
  69. static inline bool mips_cm_present(void)
  70. {
  71. #ifdef CONFIG_MIPS_CM
  72. return mips_gcr_base != NULL;
  73. #else
  74. return false;
  75. #endif
  76. }
  77. /**
  78. * mips_cm_has_l2sync - determine whether an L2-only sync region is present
  79. *
  80. * Returns true if the system implements an L2-only sync region, else false.
  81. */
  82. static inline bool mips_cm_has_l2sync(void)
  83. {
  84. #ifdef CONFIG_MIPS_CM
  85. return mips_cm_l2sync_base != NULL;
  86. #else
  87. return false;
  88. #endif
  89. }
  90. /* Offsets to register blocks from the CM base address */
  91. #define MIPS_CM_GCB_OFS 0x0000 /* Global Control Block */
  92. #define MIPS_CM_CLCB_OFS 0x2000 /* Core Local Control Block */
  93. #define MIPS_CM_COCB_OFS 0x4000 /* Core Other Control Block */
  94. #define MIPS_CM_GDB_OFS 0x6000 /* Global Debug Block */
  95. /* Total size of the CM memory mapped registers */
  96. #define MIPS_CM_GCR_SIZE 0x8000
  97. /* Size of the L2-only sync region */
  98. #define MIPS_CM_L2SYNC_SIZE 0x1000
  99. #define GCR_ACCESSOR_RO(sz, off, name) \
  100. CPS_ACCESSOR_RO(gcr, sz, MIPS_CM_GCB_OFS + off, name) \
  101. CPS_ACCESSOR_RO(gcr, sz, MIPS_CM_COCB_OFS + off, redir_##name)
  102. #define GCR_ACCESSOR_RW(sz, off, name) \
  103. CPS_ACCESSOR_RW(gcr, sz, MIPS_CM_GCB_OFS + off, name) \
  104. CPS_ACCESSOR_RW(gcr, sz, MIPS_CM_COCB_OFS + off, redir_##name)
  105. #define GCR_CX_ACCESSOR_RO(sz, off, name) \
  106. CPS_ACCESSOR_RO(gcr, sz, MIPS_CM_CLCB_OFS + off, cl_##name) \
  107. CPS_ACCESSOR_RO(gcr, sz, MIPS_CM_COCB_OFS + off, co_##name)
  108. #define GCR_CX_ACCESSOR_RW(sz, off, name) \
  109. CPS_ACCESSOR_RW(gcr, sz, MIPS_CM_CLCB_OFS + off, cl_##name) \
  110. CPS_ACCESSOR_RW(gcr, sz, MIPS_CM_COCB_OFS + off, co_##name)
  111. /* GCR_CONFIG - Information about the system */
  112. GCR_ACCESSOR_RO(64, 0x000, config)
  113. #define CM_GCR_CONFIG_CLUSTER_COH_CAPABLE BIT_ULL(43)
  114. #define CM_GCR_CONFIG_CLUSTER_ID GENMASK_ULL(39, 32)
  115. #define CM_GCR_CONFIG_NUM_CLUSTERS GENMASK(29, 23)
  116. #define CM_GCR_CONFIG_NUMIOCU GENMASK(15, 8)
  117. #define CM_GCR_CONFIG_PCORES GENMASK(7, 0)
  118. /* GCR_BASE - Base address of the Global Configuration Registers (GCRs) */
  119. GCR_ACCESSOR_RW(64, 0x008, base)
  120. #define CM_GCR_BASE_GCRBASE GENMASK_ULL(47, 15)
  121. #define CM_GCR_BASE_CMDEFTGT GENMASK(1, 0)
  122. #define CM_GCR_BASE_CMDEFTGT_MEM 0
  123. #define CM_GCR_BASE_CMDEFTGT_RESERVED 1
  124. #define CM_GCR_BASE_CMDEFTGT_IOCU0 2
  125. #define CM_GCR_BASE_CMDEFTGT_IOCU1 3
  126. /* GCR_ACCESS - Controls core/IOCU access to GCRs */
  127. GCR_ACCESSOR_RW(32, 0x020, access)
  128. #define CM_GCR_ACCESS_ACCESSEN GENMASK(7, 0)
  129. /* GCR_REV - Indicates the Coherence Manager revision */
  130. GCR_ACCESSOR_RO(32, 0x030, rev)
  131. #define CM_GCR_REV_MAJOR GENMASK(15, 8)
  132. #define CM_GCR_REV_MINOR GENMASK(7, 0)
  133. #define CM_ENCODE_REV(major, minor) \
  134. (FIELD_PREP(CM_GCR_REV_MAJOR, major) | \
  135. FIELD_PREP(CM_GCR_REV_MINOR, minor))
  136. #define CM_REV_CM2 CM_ENCODE_REV(6, 0)
  137. #define CM_REV_CM2_5 CM_ENCODE_REV(7, 0)
  138. #define CM_REV_CM3 CM_ENCODE_REV(8, 0)
  139. #define CM_REV_CM3_5 CM_ENCODE_REV(9, 0)
  140. /* GCR_ERR_CONTROL - Control error checking logic */
  141. GCR_ACCESSOR_RW(32, 0x038, err_control)
  142. #define CM_GCR_ERR_CONTROL_L2_ECC_EN BIT(1)
  143. #define CM_GCR_ERR_CONTROL_L2_ECC_SUPPORT BIT(0)
  144. /* GCR_ERR_MASK - Control which errors are reported as interrupts */
  145. GCR_ACCESSOR_RW(64, 0x040, error_mask)
  146. /* GCR_ERR_CAUSE - Indicates the type of error that occurred */
  147. GCR_ACCESSOR_RW(64, 0x048, error_cause)
  148. #define CM_GCR_ERROR_CAUSE_ERRTYPE GENMASK(31, 27)
  149. #define CM3_GCR_ERROR_CAUSE_ERRTYPE GENMASK_ULL(63, 58)
  150. #define CM_GCR_ERROR_CAUSE_ERRINFO GENMASK(26, 0)
  151. /* GCR_ERR_ADDR - Indicates the address associated with an error */
  152. GCR_ACCESSOR_RW(64, 0x050, error_addr)
  153. /* GCR_ERR_MULT - Indicates when multiple errors have occurred */
  154. GCR_ACCESSOR_RW(64, 0x058, error_mult)
  155. #define CM_GCR_ERROR_MULT_ERR2ND GENMASK(4, 0)
  156. /* GCR_L2_ONLY_SYNC_BASE - Base address of the L2 cache-only sync region */
  157. GCR_ACCESSOR_RW(64, 0x070, l2_only_sync_base)
  158. #define CM_GCR_L2_ONLY_SYNC_BASE_SYNCBASE GENMASK(31, 12)
  159. #define CM_GCR_L2_ONLY_SYNC_BASE_SYNCEN BIT(0)
  160. /* GCR_GIC_BASE - Base address of the Global Interrupt Controller (GIC) */
  161. GCR_ACCESSOR_RW(64, 0x080, gic_base)
  162. #define CM_GCR_GIC_BASE_GICBASE GENMASK(31, 17)
  163. #define CM_GCR_GIC_BASE_GICEN BIT(0)
  164. /* GCR_CPC_BASE - Base address of the Cluster Power Controller (CPC) */
  165. GCR_ACCESSOR_RW(64, 0x088, cpc_base)
  166. #define CM_GCR_CPC_BASE_CPCBASE GENMASK(31, 15)
  167. #define CM_GCR_CPC_BASE_CPCEN BIT(0)
  168. /* GCR_REGn_BASE - Base addresses of CM address regions */
  169. GCR_ACCESSOR_RW(64, 0x090, reg0_base)
  170. GCR_ACCESSOR_RW(64, 0x0a0, reg1_base)
  171. GCR_ACCESSOR_RW(64, 0x0b0, reg2_base)
  172. GCR_ACCESSOR_RW(64, 0x0c0, reg3_base)
  173. #define CM_GCR_REGn_BASE_BASEADDR GENMASK(31, 16)
  174. /* GCR_REGn_MASK - Size & destination of CM address regions */
  175. GCR_ACCESSOR_RW(64, 0x098, reg0_mask)
  176. GCR_ACCESSOR_RW(64, 0x0a8, reg1_mask)
  177. GCR_ACCESSOR_RW(64, 0x0b8, reg2_mask)
  178. GCR_ACCESSOR_RW(64, 0x0c8, reg3_mask)
  179. #define CM_GCR_REGn_MASK_ADDRMASK GENMASK(31, 16)
  180. #define CM_GCR_REGn_MASK_CCAOVR GENMASK(7, 5)
  181. #define CM_GCR_REGn_MASK_CCAOVREN BIT(4)
  182. #define CM_GCR_REGn_MASK_DROPL2 BIT(2)
  183. #define CM_GCR_REGn_MASK_CMTGT GENMASK(1, 0)
  184. #define CM_GCR_REGn_MASK_CMTGT_DISABLED 0x0
  185. #define CM_GCR_REGn_MASK_CMTGT_MEM 0x1
  186. #define CM_GCR_REGn_MASK_CMTGT_IOCU0 0x2
  187. #define CM_GCR_REGn_MASK_CMTGT_IOCU1 0x3
  188. /* GCR_GIC_STATUS - Indicates presence of a Global Interrupt Controller (GIC) */
  189. GCR_ACCESSOR_RO(32, 0x0d0, gic_status)
  190. #define CM_GCR_GIC_STATUS_EX BIT(0)
  191. /* GCR_CPC_STATUS - Indicates presence of a Cluster Power Controller (CPC) */
  192. GCR_ACCESSOR_RO(32, 0x0f0, cpc_status)
  193. #define CM_GCR_CPC_STATUS_EX BIT(0)
  194. /* GCR_L2_CONFIG - Indicates L2 cache configuration when Config5.L2C=1 */
  195. GCR_ACCESSOR_RW(32, 0x130, l2_config)
  196. #define CM_GCR_L2_CONFIG_BYPASS BIT(20)
  197. #define CM_GCR_L2_CONFIG_SET_SIZE GENMASK(15, 12)
  198. #define CM_GCR_L2_CONFIG_LINE_SIZE GENMASK(11, 8)
  199. #define CM_GCR_L2_CONFIG_ASSOC GENMASK(7, 0)
  200. /* GCR_SYS_CONFIG2 - Further information about the system */
  201. GCR_ACCESSOR_RO(32, 0x150, sys_config2)
  202. #define CM_GCR_SYS_CONFIG2_MAXVPW GENMASK(3, 0)
  203. /* GCR_L2_PFT_CONTROL - Controls hardware L2 prefetching */
  204. GCR_ACCESSOR_RW(32, 0x300, l2_pft_control)
  205. #define CM_GCR_L2_PFT_CONTROL_PAGEMASK GENMASK(31, 12)
  206. #define CM_GCR_L2_PFT_CONTROL_PFTEN BIT(8)
  207. #define CM_GCR_L2_PFT_CONTROL_NPFT GENMASK(7, 0)
  208. /* GCR_L2_PFT_CONTROL_B - Controls hardware L2 prefetching */
  209. GCR_ACCESSOR_RW(32, 0x308, l2_pft_control_b)
  210. #define CM_GCR_L2_PFT_CONTROL_B_CEN BIT(8)
  211. #define CM_GCR_L2_PFT_CONTROL_B_PORTID GENMASK(7, 0)
  212. /* GCR_L2SM_COP - L2 cache op state machine control */
  213. GCR_ACCESSOR_RW(32, 0x620, l2sm_cop)
  214. #define CM_GCR_L2SM_COP_PRESENT BIT(31)
  215. #define CM_GCR_L2SM_COP_RESULT GENMASK(8, 6)
  216. #define CM_GCR_L2SM_COP_RESULT_DONTCARE 0
  217. #define CM_GCR_L2SM_COP_RESULT_DONE_OK 1
  218. #define CM_GCR_L2SM_COP_RESULT_DONE_ERROR 2
  219. #define CM_GCR_L2SM_COP_RESULT_ABORT_OK 3
  220. #define CM_GCR_L2SM_COP_RESULT_ABORT_ERROR 4
  221. #define CM_GCR_L2SM_COP_RUNNING BIT(5)
  222. #define CM_GCR_L2SM_COP_TYPE GENMASK(4, 2)
  223. #define CM_GCR_L2SM_COP_TYPE_IDX_WBINV 0
  224. #define CM_GCR_L2SM_COP_TYPE_IDX_STORETAG 1
  225. #define CM_GCR_L2SM_COP_TYPE_IDX_STORETAGDATA 2
  226. #define CM_GCR_L2SM_COP_TYPE_HIT_INV 4
  227. #define CM_GCR_L2SM_COP_TYPE_HIT_WBINV 5
  228. #define CM_GCR_L2SM_COP_TYPE_HIT_WB 6
  229. #define CM_GCR_L2SM_COP_TYPE_FETCHLOCK 7
  230. #define CM_GCR_L2SM_COP_CMD GENMASK(1, 0)
  231. #define CM_GCR_L2SM_COP_CMD_START 1 /* only when idle */
  232. #define CM_GCR_L2SM_COP_CMD_ABORT 3 /* only when running */
  233. /* GCR_L2SM_TAG_ADDR_COP - L2 cache op state machine address control */
  234. GCR_ACCESSOR_RW(64, 0x628, l2sm_tag_addr_cop)
  235. #define CM_GCR_L2SM_TAG_ADDR_COP_NUM_LINES GENMASK_ULL(63, 48)
  236. #define CM_GCR_L2SM_TAG_ADDR_COP_START_TAG GENMASK_ULL(47, 6)
  237. /* GCR_BEV_BASE - Controls the location of the BEV for powered up cores */
  238. GCR_ACCESSOR_RW(64, 0x680, bev_base)
  239. /* GCR_Cx_RESET_RELEASE - Controls core reset for CM 1.x */
  240. GCR_CX_ACCESSOR_RW(32, 0x000, reset_release)
  241. /* GCR_Cx_COHERENCE - Controls core coherence */
  242. GCR_CX_ACCESSOR_RW(32, 0x008, coherence)
  243. #define CM_GCR_Cx_COHERENCE_COHDOMAINEN GENMASK(7, 0)
  244. #define CM3_GCR_Cx_COHERENCE_COHEN BIT(0)
  245. /* GCR_Cx_CONFIG - Information about a core's configuration */
  246. GCR_CX_ACCESSOR_RO(32, 0x010, config)
  247. #define CM_GCR_Cx_CONFIG_IOCUTYPE GENMASK(11, 10)
  248. #define CM_GCR_Cx_CONFIG_PVPE GENMASK(9, 0)
  249. /* GCR_Cx_OTHER - Configure the core-other/redirect GCR block */
  250. GCR_CX_ACCESSOR_RW(32, 0x018, other)
  251. #define CM_GCR_Cx_OTHER_CORENUM GENMASK(31, 16) /* CM < 3 */
  252. #define CM_GCR_Cx_OTHER_CLUSTER_EN BIT(31) /* CM >= 3.5 */
  253. #define CM_GCR_Cx_OTHER_GIC_EN BIT(30) /* CM >= 3.5 */
  254. #define CM_GCR_Cx_OTHER_BLOCK GENMASK(25, 24) /* CM >= 3.5 */
  255. #define CM_GCR_Cx_OTHER_BLOCK_LOCAL 0
  256. #define CM_GCR_Cx_OTHER_BLOCK_GLOBAL 1
  257. #define CM_GCR_Cx_OTHER_BLOCK_USER 2
  258. #define CM_GCR_Cx_OTHER_BLOCK_GLOBAL_HIGH 3
  259. #define CM_GCR_Cx_OTHER_CLUSTER GENMASK(21, 16) /* CM >= 3.5 */
  260. #define CM3_GCR_Cx_OTHER_CORE GENMASK(13, 8) /* CM >= 3 */
  261. #define CM_GCR_Cx_OTHER_CORE_CM 32
  262. #define CM3_GCR_Cx_OTHER_VP GENMASK(2, 0) /* CM >= 3 */
  263. /* GCR_Cx_RESET_BASE - Configure where powered up cores will fetch from */
  264. GCR_CX_ACCESSOR_RW(32, 0x020, reset_base)
  265. #define CM_GCR_Cx_RESET_BASE_BEVEXCBASE GENMASK(31, 12)
  266. /* GCR_Cx_ID - Identify the current core */
  267. GCR_CX_ACCESSOR_RO(32, 0x028, id)
  268. #define CM_GCR_Cx_ID_CLUSTER GENMASK(15, 8)
  269. #define CM_GCR_Cx_ID_CORE GENMASK(7, 0)
  270. /* GCR_Cx_RESET_EXT_BASE - Configure behaviour when cores reset or power up */
  271. GCR_CX_ACCESSOR_RW(32, 0x030, reset_ext_base)
  272. #define CM_GCR_Cx_RESET_EXT_BASE_EVARESET BIT(31)
  273. #define CM_GCR_Cx_RESET_EXT_BASE_UEB BIT(30)
  274. #define CM_GCR_Cx_RESET_EXT_BASE_BEVEXCMASK GENMASK(27, 20)
  275. #define CM_GCR_Cx_RESET_EXT_BASE_BEVEXCPA GENMASK(7, 1)
  276. #define CM_GCR_Cx_RESET_EXT_BASE_PRESENT BIT(0)
  277. /**
  278. * mips_cm_l2sync - perform an L2-only sync operation
  279. *
  280. * If an L2-only sync region is present in the system then this function
  281. * performs and L2-only sync and returns zero. Otherwise it returns -ENODEV.
  282. */
  283. static inline int mips_cm_l2sync(void)
  284. {
  285. if (!mips_cm_has_l2sync())
  286. return -ENODEV;
  287. writel(0, mips_cm_l2sync_base);
  288. return 0;
  289. }
  290. /**
  291. * mips_cm_revision() - return CM revision
  292. *
  293. * Return: The revision of the CM, from GCR_REV, or 0 if no CM is present. The
  294. * return value should be checked against the CM_REV_* macros.
  295. */
  296. static inline int mips_cm_revision(void)
  297. {
  298. if (!mips_cm_present())
  299. return 0;
  300. return read_gcr_rev();
  301. }
  302. /**
  303. * mips_cm_max_vp_width() - return the width in bits of VP indices
  304. *
  305. * Return: the width, in bits, of VP indices in fields that combine core & VP
  306. * indices.
  307. */
  308. static inline unsigned int mips_cm_max_vp_width(void)
  309. {
  310. extern int smp_num_siblings;
  311. if (mips_cm_revision() >= CM_REV_CM3)
  312. return FIELD_GET(CM_GCR_SYS_CONFIG2_MAXVPW,
  313. read_gcr_sys_config2());
  314. if (mips_cm_present()) {
  315. /*
  316. * We presume that all cores in the system will have the same
  317. * number of VP(E)s, and if that ever changes then this will
  318. * need revisiting.
  319. */
  320. return FIELD_GET(CM_GCR_Cx_CONFIG_PVPE, read_gcr_cl_config()) + 1;
  321. }
  322. if (IS_ENABLED(CONFIG_SMP))
  323. return smp_num_siblings;
  324. return 1;
  325. }
  326. /**
  327. * mips_cm_vp_id() - calculate the hardware VP ID for a CPU
  328. * @cpu: the CPU whose VP ID to calculate
  329. *
  330. * Hardware such as the GIC uses identifiers for VPs which may not match the
  331. * CPU numbers used by Linux. This function calculates the hardware VP
  332. * identifier corresponding to a given CPU.
  333. *
  334. * Return: the VP ID for the CPU.
  335. */
  336. static inline unsigned int mips_cm_vp_id(unsigned int cpu)
  337. {
  338. unsigned int core = cpu_core(&cpu_data[cpu]);
  339. unsigned int vp = cpu_vpe_id(&cpu_data[cpu]);
  340. return (core * mips_cm_max_vp_width()) + vp;
  341. }
  342. #ifdef CONFIG_MIPS_CM
  343. /**
  344. * mips_cm_lock_other - lock access to redirect/other region
  345. * @cluster: the other cluster to be accessed
  346. * @core: the other core to be accessed
  347. * @vp: the VP within the other core to be accessed
  348. * @block: the register block to be accessed
  349. *
  350. * Configure the redirect/other region for the local core/VP (depending upon
  351. * the CM revision) to target the specified @cluster, @core, @vp & register
  352. * @block. Must be called before using the redirect/other region, and followed
  353. * by a call to mips_cm_unlock_other() when access to the redirect/other region
  354. * is complete.
  355. *
  356. * This function acquires a spinlock such that code between it &
  357. * mips_cm_unlock_other() calls cannot be pre-empted by anything which may
  358. * reconfigure the redirect/other region, and cannot be interfered with by
  359. * another VP in the core. As such calls to this function should not be nested.
  360. */
  361. extern void mips_cm_lock_other(unsigned int cluster, unsigned int core,
  362. unsigned int vp, unsigned int block);
  363. /**
  364. * mips_cm_unlock_other - unlock access to redirect/other region
  365. *
  366. * Must be called after mips_cm_lock_other() once all required access to the
  367. * redirect/other region has been completed.
  368. */
  369. extern void mips_cm_unlock_other(void);
  370. #else /* !CONFIG_MIPS_CM */
  371. static inline void mips_cm_lock_other(unsigned int cluster, unsigned int core,
  372. unsigned int vp, unsigned int block) { }
  373. static inline void mips_cm_unlock_other(void) { }
  374. #endif /* !CONFIG_MIPS_CM */
  375. /**
  376. * mips_cm_lock_other_cpu - lock access to redirect/other region
  377. * @cpu: the other CPU whose register we want to access
  378. *
  379. * Configure the redirect/other region for the local core/VP (depending upon
  380. * the CM revision) to target the specified @cpu & register @block. This is
  381. * equivalent to calling mips_cm_lock_other() but accepts a Linux CPU number
  382. * for convenience.
  383. */
  384. static inline void mips_cm_lock_other_cpu(unsigned int cpu, unsigned int block)
  385. {
  386. struct cpuinfo_mips *d = &cpu_data[cpu];
  387. mips_cm_lock_other(cpu_cluster(d), cpu_core(d), cpu_vpe_id(d), block);
  388. }
  389. #endif /* __MIPS_ASM_MIPS_CM_H__ */