nv.c 33 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157
  1. /*
  2. * Copyright 2019 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. */
  23. #include <linux/firmware.h>
  24. #include <linux/slab.h>
  25. #include <linux/module.h>
  26. #include <linux/pci.h>
  27. #include <drm/amdgpu_drm.h>
  28. #include "amdgpu.h"
  29. #include "amdgpu_atombios.h"
  30. #include "amdgpu_ih.h"
  31. #include "amdgpu_uvd.h"
  32. #include "amdgpu_vce.h"
  33. #include "amdgpu_ucode.h"
  34. #include "amdgpu_psp.h"
  35. #include "atom.h"
  36. #include "amd_pcie.h"
  37. #include "gc/gc_10_1_0_offset.h"
  38. #include "gc/gc_10_1_0_sh_mask.h"
  39. #include "mp/mp_11_0_offset.h"
  40. #include "soc15.h"
  41. #include "soc15_common.h"
  42. #include "gmc_v10_0.h"
  43. #include "gfxhub_v2_0.h"
  44. #include "mmhub_v2_0.h"
  45. #include "nbio_v2_3.h"
  46. #include "nbio_v7_2.h"
  47. #include "hdp_v5_0.h"
  48. #include "nv.h"
  49. #include "navi10_ih.h"
  50. #include "gfx_v10_0.h"
  51. #include "sdma_v5_0.h"
  52. #include "sdma_v5_2.h"
  53. #include "vcn_v2_0.h"
  54. #include "jpeg_v2_0.h"
  55. #include "vcn_v3_0.h"
  56. #include "jpeg_v3_0.h"
  57. #include "amdgpu_vkms.h"
  58. #include "mes_v10_1.h"
  59. #include "mxgpu_nv.h"
  60. #include "smuio_v11_0.h"
  61. #include "smuio_v11_0_6.h"
  62. static const struct amd_ip_funcs nv_common_ip_funcs;
  63. /* Navi */
  64. static const struct amdgpu_video_codec_info nv_video_codecs_encode_array[] =
  65. {
  66. {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
  67. {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
  68. };
  69. static const struct amdgpu_video_codecs nv_video_codecs_encode =
  70. {
  71. .codec_count = ARRAY_SIZE(nv_video_codecs_encode_array),
  72. .codec_array = nv_video_codecs_encode_array,
  73. };
  74. /* Navi1x */
  75. static const struct amdgpu_video_codec_info nv_video_codecs_decode_array[] =
  76. {
  77. {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
  78. {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
  79. {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
  80. {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
  81. {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
  82. {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
  83. {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
  84. };
  85. static const struct amdgpu_video_codecs nv_video_codecs_decode =
  86. {
  87. .codec_count = ARRAY_SIZE(nv_video_codecs_decode_array),
  88. .codec_array = nv_video_codecs_decode_array,
  89. };
  90. /* Sienna Cichlid */
  91. static const struct amdgpu_video_codec_info sc_video_codecs_decode_array[] =
  92. {
  93. {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
  94. {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
  95. {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
  96. {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
  97. {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
  98. {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
  99. {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
  100. {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
  101. };
  102. static const struct amdgpu_video_codecs sc_video_codecs_decode =
  103. {
  104. .codec_count = ARRAY_SIZE(sc_video_codecs_decode_array),
  105. .codec_array = sc_video_codecs_decode_array,
  106. };
  107. /* SRIOV Sienna Cichlid, not const since data is controlled by host */
  108. static struct amdgpu_video_codec_info sriov_sc_video_codecs_encode_array[] =
  109. {
  110. {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
  111. {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
  112. };
  113. static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array[] =
  114. {
  115. {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
  116. {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
  117. {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
  118. {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
  119. {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
  120. {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
  121. {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
  122. {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
  123. };
  124. static struct amdgpu_video_codecs sriov_sc_video_codecs_encode =
  125. {
  126. .codec_count = ARRAY_SIZE(sriov_sc_video_codecs_encode_array),
  127. .codec_array = sriov_sc_video_codecs_encode_array,
  128. };
  129. static struct amdgpu_video_codecs sriov_sc_video_codecs_decode =
  130. {
  131. .codec_count = ARRAY_SIZE(sriov_sc_video_codecs_decode_array),
  132. .codec_array = sriov_sc_video_codecs_decode_array,
  133. };
  134. /* Beige Goby*/
  135. static const struct amdgpu_video_codec_info bg_video_codecs_decode_array[] = {
  136. {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
  137. {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
  138. {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
  139. };
  140. static const struct amdgpu_video_codecs bg_video_codecs_decode = {
  141. .codec_count = ARRAY_SIZE(bg_video_codecs_decode_array),
  142. .codec_array = bg_video_codecs_decode_array,
  143. };
  144. static const struct amdgpu_video_codecs bg_video_codecs_encode = {
  145. .codec_count = 0,
  146. .codec_array = NULL,
  147. };
  148. /* Yellow Carp*/
  149. static const struct amdgpu_video_codec_info yc_video_codecs_decode_array[] = {
  150. {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
  151. {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
  152. {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
  153. {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
  154. {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
  155. };
  156. static const struct amdgpu_video_codecs yc_video_codecs_decode = {
  157. .codec_count = ARRAY_SIZE(yc_video_codecs_decode_array),
  158. .codec_array = yc_video_codecs_decode_array,
  159. };
  160. static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode,
  161. const struct amdgpu_video_codecs **codecs)
  162. {
  163. switch (adev->ip_versions[UVD_HWIP][0]) {
  164. case IP_VERSION(3, 0, 0):
  165. case IP_VERSION(3, 0, 64):
  166. case IP_VERSION(3, 0, 192):
  167. if (amdgpu_sriov_vf(adev)) {
  168. if (encode)
  169. *codecs = &sriov_sc_video_codecs_encode;
  170. else
  171. *codecs = &sriov_sc_video_codecs_decode;
  172. } else {
  173. if (encode)
  174. *codecs = &nv_video_codecs_encode;
  175. else
  176. *codecs = &sc_video_codecs_decode;
  177. }
  178. return 0;
  179. case IP_VERSION(3, 0, 16):
  180. case IP_VERSION(3, 0, 2):
  181. if (encode)
  182. *codecs = &nv_video_codecs_encode;
  183. else
  184. *codecs = &sc_video_codecs_decode;
  185. return 0;
  186. case IP_VERSION(3, 1, 1):
  187. case IP_VERSION(3, 1, 2):
  188. if (encode)
  189. *codecs = &nv_video_codecs_encode;
  190. else
  191. *codecs = &yc_video_codecs_decode;
  192. return 0;
  193. case IP_VERSION(3, 0, 33):
  194. if (encode)
  195. *codecs = &bg_video_codecs_encode;
  196. else
  197. *codecs = &bg_video_codecs_decode;
  198. return 0;
  199. case IP_VERSION(2, 0, 0):
  200. case IP_VERSION(2, 0, 2):
  201. if (encode)
  202. *codecs = &nv_video_codecs_encode;
  203. else
  204. *codecs = &nv_video_codecs_decode;
  205. return 0;
  206. default:
  207. return -EINVAL;
  208. }
  209. }
  210. /*
  211. * Indirect registers accessor
  212. */
  213. static u32 nv_pcie_rreg(struct amdgpu_device *adev, u32 reg)
  214. {
  215. unsigned long address, data;
  216. address = adev->nbio.funcs->get_pcie_index_offset(adev);
  217. data = adev->nbio.funcs->get_pcie_data_offset(adev);
  218. return amdgpu_device_indirect_rreg(adev, address, data, reg);
  219. }
  220. static void nv_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
  221. {
  222. unsigned long address, data;
  223. address = adev->nbio.funcs->get_pcie_index_offset(adev);
  224. data = adev->nbio.funcs->get_pcie_data_offset(adev);
  225. amdgpu_device_indirect_wreg(adev, address, data, reg, v);
  226. }
  227. static u64 nv_pcie_rreg64(struct amdgpu_device *adev, u32 reg)
  228. {
  229. unsigned long address, data;
  230. address = adev->nbio.funcs->get_pcie_index_offset(adev);
  231. data = adev->nbio.funcs->get_pcie_data_offset(adev);
  232. return amdgpu_device_indirect_rreg64(adev, address, data, reg);
  233. }
  234. static void nv_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v)
  235. {
  236. unsigned long address, data;
  237. address = adev->nbio.funcs->get_pcie_index_offset(adev);
  238. data = adev->nbio.funcs->get_pcie_data_offset(adev);
  239. amdgpu_device_indirect_wreg64(adev, address, data, reg, v);
  240. }
  241. static u32 nv_didt_rreg(struct amdgpu_device *adev, u32 reg)
  242. {
  243. unsigned long flags, address, data;
  244. u32 r;
  245. address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
  246. data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
  247. spin_lock_irqsave(&adev->didt_idx_lock, flags);
  248. WREG32(address, (reg));
  249. r = RREG32(data);
  250. spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
  251. return r;
  252. }
  253. static void nv_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
  254. {
  255. unsigned long flags, address, data;
  256. address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
  257. data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
  258. spin_lock_irqsave(&adev->didt_idx_lock, flags);
  259. WREG32(address, (reg));
  260. WREG32(data, (v));
  261. spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
  262. }
  263. static u32 nv_get_config_memsize(struct amdgpu_device *adev)
  264. {
  265. return adev->nbio.funcs->get_memsize(adev);
  266. }
  267. static u32 nv_get_xclk(struct amdgpu_device *adev)
  268. {
  269. return adev->clock.spll.reference_freq;
  270. }
  271. void nv_grbm_select(struct amdgpu_device *adev,
  272. u32 me, u32 pipe, u32 queue, u32 vmid)
  273. {
  274. u32 grbm_gfx_cntl = 0;
  275. grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe);
  276. grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me);
  277. grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
  278. grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
  279. WREG32_SOC15(GC, 0, mmGRBM_GFX_CNTL, grbm_gfx_cntl);
  280. }
  281. static void nv_vga_set_state(struct amdgpu_device *adev, bool state)
  282. {
  283. /* todo */
  284. }
  285. static bool nv_read_disabled_bios(struct amdgpu_device *adev)
  286. {
  287. /* todo */
  288. return false;
  289. }
  290. static struct soc15_allowed_register_entry nv_allowed_read_registers[] = {
  291. { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)},
  292. { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)},
  293. { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)},
  294. { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)},
  295. { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)},
  296. { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)},
  297. { SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)},
  298. { SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)},
  299. { SOC15_REG_ENTRY(GC, 0, mmCP_STAT)},
  300. { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)},
  301. { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)},
  302. { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)},
  303. { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)},
  304. { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)},
  305. { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)},
  306. { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_BUSY_STAT)},
  307. { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)},
  308. { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)},
  309. { SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)},
  310. };
  311. static uint32_t nv_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
  312. u32 sh_num, u32 reg_offset)
  313. {
  314. uint32_t val;
  315. mutex_lock(&adev->grbm_idx_mutex);
  316. if (se_num != 0xffffffff || sh_num != 0xffffffff)
  317. amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
  318. val = RREG32(reg_offset);
  319. if (se_num != 0xffffffff || sh_num != 0xffffffff)
  320. amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
  321. mutex_unlock(&adev->grbm_idx_mutex);
  322. return val;
  323. }
  324. static uint32_t nv_get_register_value(struct amdgpu_device *adev,
  325. bool indexed, u32 se_num,
  326. u32 sh_num, u32 reg_offset)
  327. {
  328. if (indexed) {
  329. return nv_read_indexed_register(adev, se_num, sh_num, reg_offset);
  330. } else {
  331. if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG))
  332. return adev->gfx.config.gb_addr_config;
  333. return RREG32(reg_offset);
  334. }
  335. }
  336. static int nv_read_register(struct amdgpu_device *adev, u32 se_num,
  337. u32 sh_num, u32 reg_offset, u32 *value)
  338. {
  339. uint32_t i;
  340. struct soc15_allowed_register_entry *en;
  341. *value = 0;
  342. for (i = 0; i < ARRAY_SIZE(nv_allowed_read_registers); i++) {
  343. en = &nv_allowed_read_registers[i];
  344. if (!adev->reg_offset[en->hwip][en->inst])
  345. continue;
  346. else if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
  347. + en->reg_offset))
  348. continue;
  349. *value = nv_get_register_value(adev,
  350. nv_allowed_read_registers[i].grbm_indexed,
  351. se_num, sh_num, reg_offset);
  352. return 0;
  353. }
  354. return -EINVAL;
  355. }
  356. static int nv_asic_mode2_reset(struct amdgpu_device *adev)
  357. {
  358. u32 i;
  359. int ret = 0;
  360. amdgpu_atombios_scratch_regs_engine_hung(adev, true);
  361. /* disable BM */
  362. pci_clear_master(adev->pdev);
  363. amdgpu_device_cache_pci_state(adev->pdev);
  364. ret = amdgpu_dpm_mode2_reset(adev);
  365. if (ret)
  366. dev_err(adev->dev, "GPU mode2 reset failed\n");
  367. amdgpu_device_load_pci_state(adev->pdev);
  368. /* wait for asic to come out of reset */
  369. for (i = 0; i < adev->usec_timeout; i++) {
  370. u32 memsize = adev->nbio.funcs->get_memsize(adev);
  371. if (memsize != 0xffffffff)
  372. break;
  373. udelay(1);
  374. }
  375. amdgpu_atombios_scratch_regs_engine_hung(adev, false);
  376. return ret;
  377. }
  378. static enum amd_reset_method
  379. nv_asic_reset_method(struct amdgpu_device *adev)
  380. {
  381. if (amdgpu_reset_method == AMD_RESET_METHOD_MODE1 ||
  382. amdgpu_reset_method == AMD_RESET_METHOD_MODE2 ||
  383. amdgpu_reset_method == AMD_RESET_METHOD_BACO ||
  384. amdgpu_reset_method == AMD_RESET_METHOD_PCI)
  385. return amdgpu_reset_method;
  386. if (amdgpu_reset_method != -1)
  387. dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n",
  388. amdgpu_reset_method);
  389. switch (adev->ip_versions[MP1_HWIP][0]) {
  390. case IP_VERSION(11, 5, 0):
  391. case IP_VERSION(13, 0, 1):
  392. case IP_VERSION(13, 0, 3):
  393. case IP_VERSION(13, 0, 5):
  394. case IP_VERSION(13, 0, 8):
  395. return AMD_RESET_METHOD_MODE2;
  396. case IP_VERSION(11, 0, 7):
  397. case IP_VERSION(11, 0, 11):
  398. case IP_VERSION(11, 0, 12):
  399. case IP_VERSION(11, 0, 13):
  400. return AMD_RESET_METHOD_MODE1;
  401. default:
  402. if (amdgpu_dpm_is_baco_supported(adev))
  403. return AMD_RESET_METHOD_BACO;
  404. else
  405. return AMD_RESET_METHOD_MODE1;
  406. }
  407. }
  408. static int nv_asic_reset(struct amdgpu_device *adev)
  409. {
  410. int ret = 0;
  411. switch (nv_asic_reset_method(adev)) {
  412. case AMD_RESET_METHOD_PCI:
  413. dev_info(adev->dev, "PCI reset\n");
  414. ret = amdgpu_device_pci_reset(adev);
  415. break;
  416. case AMD_RESET_METHOD_BACO:
  417. dev_info(adev->dev, "BACO reset\n");
  418. ret = amdgpu_dpm_baco_reset(adev);
  419. break;
  420. case AMD_RESET_METHOD_MODE2:
  421. dev_info(adev->dev, "MODE2 reset\n");
  422. ret = nv_asic_mode2_reset(adev);
  423. break;
  424. default:
  425. dev_info(adev->dev, "MODE1 reset\n");
  426. ret = amdgpu_device_mode1_reset(adev);
  427. break;
  428. }
  429. return ret;
  430. }
  431. static int nv_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
  432. {
  433. /* todo */
  434. return 0;
  435. }
  436. static int nv_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
  437. {
  438. /* todo */
  439. return 0;
  440. }
  441. static void nv_pcie_gen3_enable(struct amdgpu_device *adev)
  442. {
  443. if (pci_is_root_bus(adev->pdev->bus))
  444. return;
  445. if (amdgpu_pcie_gen2 == 0)
  446. return;
  447. if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
  448. CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
  449. return;
  450. /* todo */
  451. }
  452. static void nv_program_aspm(struct amdgpu_device *adev)
  453. {
  454. if (!amdgpu_device_should_use_aspm(adev) || !amdgpu_device_aspm_support_quirk())
  455. return;
  456. if (!(adev->flags & AMD_IS_APU) &&
  457. (adev->nbio.funcs->program_aspm))
  458. adev->nbio.funcs->program_aspm(adev);
  459. }
  460. static void nv_enable_doorbell_aperture(struct amdgpu_device *adev,
  461. bool enable)
  462. {
  463. adev->nbio.funcs->enable_doorbell_aperture(adev, enable);
  464. adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable);
  465. }
  466. const struct amdgpu_ip_block_version nv_common_ip_block =
  467. {
  468. .type = AMD_IP_BLOCK_TYPE_COMMON,
  469. .major = 1,
  470. .minor = 0,
  471. .rev = 0,
  472. .funcs = &nv_common_ip_funcs,
  473. };
  474. void nv_set_virt_ops(struct amdgpu_device *adev)
  475. {
  476. adev->virt.ops = &xgpu_nv_virt_ops;
  477. }
  478. static uint32_t nv_get_rev_id(struct amdgpu_device *adev)
  479. {
  480. return adev->nbio.funcs->get_rev_id(adev);
  481. }
  482. static bool nv_need_full_reset(struct amdgpu_device *adev)
  483. {
  484. return true;
  485. }
  486. static bool nv_need_reset_on_init(struct amdgpu_device *adev)
  487. {
  488. u32 sol_reg;
  489. if (adev->flags & AMD_IS_APU)
  490. return false;
  491. /* Check sOS sign of life register to confirm sys driver and sOS
  492. * are already been loaded.
  493. */
  494. sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
  495. if (sol_reg)
  496. return true;
  497. return false;
  498. }
  499. static uint64_t nv_get_pcie_replay_count(struct amdgpu_device *adev)
  500. {
  501. /* TODO
  502. * dummy implement for pcie_replay_count sysfs interface
  503. * */
  504. return 0;
  505. }
  506. static void nv_init_doorbell_index(struct amdgpu_device *adev)
  507. {
  508. adev->doorbell_index.kiq = AMDGPU_NAVI10_DOORBELL_KIQ;
  509. adev->doorbell_index.mec_ring0 = AMDGPU_NAVI10_DOORBELL_MEC_RING0;
  510. adev->doorbell_index.mec_ring1 = AMDGPU_NAVI10_DOORBELL_MEC_RING1;
  511. adev->doorbell_index.mec_ring2 = AMDGPU_NAVI10_DOORBELL_MEC_RING2;
  512. adev->doorbell_index.mec_ring3 = AMDGPU_NAVI10_DOORBELL_MEC_RING3;
  513. adev->doorbell_index.mec_ring4 = AMDGPU_NAVI10_DOORBELL_MEC_RING4;
  514. adev->doorbell_index.mec_ring5 = AMDGPU_NAVI10_DOORBELL_MEC_RING5;
  515. adev->doorbell_index.mec_ring6 = AMDGPU_NAVI10_DOORBELL_MEC_RING6;
  516. adev->doorbell_index.mec_ring7 = AMDGPU_NAVI10_DOORBELL_MEC_RING7;
  517. adev->doorbell_index.userqueue_start = AMDGPU_NAVI10_DOORBELL_USERQUEUE_START;
  518. adev->doorbell_index.userqueue_end = AMDGPU_NAVI10_DOORBELL_USERQUEUE_END;
  519. adev->doorbell_index.gfx_ring0 = AMDGPU_NAVI10_DOORBELL_GFX_RING0;
  520. adev->doorbell_index.gfx_ring1 = AMDGPU_NAVI10_DOORBELL_GFX_RING1;
  521. adev->doorbell_index.gfx_userqueue_start =
  522. AMDGPU_NAVI10_DOORBELL_GFX_USERQUEUE_START;
  523. adev->doorbell_index.gfx_userqueue_end =
  524. AMDGPU_NAVI10_DOORBELL_GFX_USERQUEUE_END;
  525. adev->doorbell_index.mes_ring0 = AMDGPU_NAVI10_DOORBELL_MES_RING0;
  526. adev->doorbell_index.mes_ring1 = AMDGPU_NAVI10_DOORBELL_MES_RING1;
  527. adev->doorbell_index.sdma_engine[0] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE0;
  528. adev->doorbell_index.sdma_engine[1] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE1;
  529. adev->doorbell_index.sdma_engine[2] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE2;
  530. adev->doorbell_index.sdma_engine[3] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE3;
  531. adev->doorbell_index.ih = AMDGPU_NAVI10_DOORBELL_IH;
  532. adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_NAVI10_DOORBELL64_VCN0_1;
  533. adev->doorbell_index.vcn.vcn_ring2_3 = AMDGPU_NAVI10_DOORBELL64_VCN2_3;
  534. adev->doorbell_index.vcn.vcn_ring4_5 = AMDGPU_NAVI10_DOORBELL64_VCN4_5;
  535. adev->doorbell_index.vcn.vcn_ring6_7 = AMDGPU_NAVI10_DOORBELL64_VCN6_7;
  536. adev->doorbell_index.first_non_cp = AMDGPU_NAVI10_DOORBELL64_FIRST_NON_CP;
  537. adev->doorbell_index.last_non_cp = AMDGPU_NAVI10_DOORBELL64_LAST_NON_CP;
  538. adev->doorbell_index.max_assignment = AMDGPU_NAVI10_DOORBELL_MAX_ASSIGNMENT << 1;
  539. adev->doorbell_index.sdma_doorbell_range = 20;
  540. }
  541. static void nv_pre_asic_init(struct amdgpu_device *adev)
  542. {
  543. }
  544. static int nv_update_umd_stable_pstate(struct amdgpu_device *adev,
  545. bool enter)
  546. {
  547. if (enter)
  548. amdgpu_gfx_rlc_enter_safe_mode(adev);
  549. else
  550. amdgpu_gfx_rlc_exit_safe_mode(adev);
  551. if (adev->gfx.funcs->update_perfmon_mgcg)
  552. adev->gfx.funcs->update_perfmon_mgcg(adev, !enter);
  553. if (!(adev->flags & AMD_IS_APU) &&
  554. (adev->nbio.funcs->enable_aspm) &&
  555. amdgpu_device_should_use_aspm(adev))
  556. adev->nbio.funcs->enable_aspm(adev, !enter);
  557. return 0;
  558. }
  559. static const struct amdgpu_asic_funcs nv_asic_funcs =
  560. {
  561. .read_disabled_bios = &nv_read_disabled_bios,
  562. .read_bios_from_rom = &amdgpu_soc15_read_bios_from_rom,
  563. .read_register = &nv_read_register,
  564. .reset = &nv_asic_reset,
  565. .reset_method = &nv_asic_reset_method,
  566. .set_vga_state = &nv_vga_set_state,
  567. .get_xclk = &nv_get_xclk,
  568. .set_uvd_clocks = &nv_set_uvd_clocks,
  569. .set_vce_clocks = &nv_set_vce_clocks,
  570. .get_config_memsize = &nv_get_config_memsize,
  571. .init_doorbell_index = &nv_init_doorbell_index,
  572. .need_full_reset = &nv_need_full_reset,
  573. .need_reset_on_init = &nv_need_reset_on_init,
  574. .get_pcie_replay_count = &nv_get_pcie_replay_count,
  575. .supports_baco = &amdgpu_dpm_is_baco_supported,
  576. .pre_asic_init = &nv_pre_asic_init,
  577. .update_umd_stable_pstate = &nv_update_umd_stable_pstate,
  578. .query_video_codecs = &nv_query_video_codecs,
  579. };
  580. static int nv_common_early_init(void *handle)
  581. {
  582. #define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
  583. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  584. if (!amdgpu_sriov_vf(adev)) {
  585. adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
  586. adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
  587. }
  588. adev->smc_rreg = NULL;
  589. adev->smc_wreg = NULL;
  590. adev->pcie_rreg = &nv_pcie_rreg;
  591. adev->pcie_wreg = &nv_pcie_wreg;
  592. adev->pcie_rreg64 = &nv_pcie_rreg64;
  593. adev->pcie_wreg64 = &nv_pcie_wreg64;
  594. adev->pciep_rreg = amdgpu_device_pcie_port_rreg;
  595. adev->pciep_wreg = amdgpu_device_pcie_port_wreg;
  596. /* TODO: will add them during VCN v2 implementation */
  597. adev->uvd_ctx_rreg = NULL;
  598. adev->uvd_ctx_wreg = NULL;
  599. adev->didt_rreg = &nv_didt_rreg;
  600. adev->didt_wreg = &nv_didt_wreg;
  601. adev->asic_funcs = &nv_asic_funcs;
  602. adev->rev_id = nv_get_rev_id(adev);
  603. adev->external_rev_id = 0xff;
  604. /* TODO: split the GC and PG flags based on the relevant IP version for which
  605. * they are relevant.
  606. */
  607. switch (adev->ip_versions[GC_HWIP][0]) {
  608. case IP_VERSION(10, 1, 10):
  609. adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
  610. AMD_CG_SUPPORT_GFX_CGCG |
  611. AMD_CG_SUPPORT_IH_CG |
  612. AMD_CG_SUPPORT_HDP_MGCG |
  613. AMD_CG_SUPPORT_HDP_LS |
  614. AMD_CG_SUPPORT_SDMA_MGCG |
  615. AMD_CG_SUPPORT_SDMA_LS |
  616. AMD_CG_SUPPORT_MC_MGCG |
  617. AMD_CG_SUPPORT_MC_LS |
  618. AMD_CG_SUPPORT_ATHUB_MGCG |
  619. AMD_CG_SUPPORT_ATHUB_LS |
  620. AMD_CG_SUPPORT_VCN_MGCG |
  621. AMD_CG_SUPPORT_JPEG_MGCG |
  622. AMD_CG_SUPPORT_BIF_MGCG |
  623. AMD_CG_SUPPORT_BIF_LS;
  624. adev->pg_flags = AMD_PG_SUPPORT_VCN |
  625. AMD_PG_SUPPORT_VCN_DPG |
  626. AMD_PG_SUPPORT_JPEG |
  627. AMD_PG_SUPPORT_ATHUB;
  628. adev->external_rev_id = adev->rev_id + 0x1;
  629. break;
  630. case IP_VERSION(10, 1, 1):
  631. adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
  632. AMD_CG_SUPPORT_GFX_CGCG |
  633. AMD_CG_SUPPORT_IH_CG |
  634. AMD_CG_SUPPORT_HDP_MGCG |
  635. AMD_CG_SUPPORT_HDP_LS |
  636. AMD_CG_SUPPORT_SDMA_MGCG |
  637. AMD_CG_SUPPORT_SDMA_LS |
  638. AMD_CG_SUPPORT_MC_MGCG |
  639. AMD_CG_SUPPORT_MC_LS |
  640. AMD_CG_SUPPORT_ATHUB_MGCG |
  641. AMD_CG_SUPPORT_ATHUB_LS |
  642. AMD_CG_SUPPORT_VCN_MGCG |
  643. AMD_CG_SUPPORT_JPEG_MGCG |
  644. AMD_CG_SUPPORT_BIF_MGCG |
  645. AMD_CG_SUPPORT_BIF_LS;
  646. adev->pg_flags = AMD_PG_SUPPORT_VCN |
  647. AMD_PG_SUPPORT_JPEG |
  648. AMD_PG_SUPPORT_VCN_DPG;
  649. adev->external_rev_id = adev->rev_id + 20;
  650. break;
  651. case IP_VERSION(10, 1, 2):
  652. adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
  653. AMD_CG_SUPPORT_GFX_MGLS |
  654. AMD_CG_SUPPORT_GFX_CGCG |
  655. AMD_CG_SUPPORT_GFX_CP_LS |
  656. AMD_CG_SUPPORT_GFX_RLC_LS |
  657. AMD_CG_SUPPORT_IH_CG |
  658. AMD_CG_SUPPORT_HDP_MGCG |
  659. AMD_CG_SUPPORT_HDP_LS |
  660. AMD_CG_SUPPORT_SDMA_MGCG |
  661. AMD_CG_SUPPORT_SDMA_LS |
  662. AMD_CG_SUPPORT_MC_MGCG |
  663. AMD_CG_SUPPORT_MC_LS |
  664. AMD_CG_SUPPORT_ATHUB_MGCG |
  665. AMD_CG_SUPPORT_ATHUB_LS |
  666. AMD_CG_SUPPORT_VCN_MGCG |
  667. AMD_CG_SUPPORT_JPEG_MGCG;
  668. adev->pg_flags = AMD_PG_SUPPORT_VCN |
  669. AMD_PG_SUPPORT_VCN_DPG |
  670. AMD_PG_SUPPORT_JPEG |
  671. AMD_PG_SUPPORT_ATHUB;
  672. /* guest vm gets 0xffffffff when reading RCC_DEV0_EPF0_STRAP0,
  673. * as a consequence, the rev_id and external_rev_id are wrong.
  674. * workaround it by hardcoding rev_id to 0 (default value).
  675. */
  676. if (amdgpu_sriov_vf(adev))
  677. adev->rev_id = 0;
  678. adev->external_rev_id = adev->rev_id + 0xa;
  679. break;
  680. case IP_VERSION(10, 3, 0):
  681. adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
  682. AMD_CG_SUPPORT_GFX_CGCG |
  683. AMD_CG_SUPPORT_GFX_CGLS |
  684. AMD_CG_SUPPORT_GFX_3D_CGCG |
  685. AMD_CG_SUPPORT_MC_MGCG |
  686. AMD_CG_SUPPORT_VCN_MGCG |
  687. AMD_CG_SUPPORT_JPEG_MGCG |
  688. AMD_CG_SUPPORT_HDP_MGCG |
  689. AMD_CG_SUPPORT_HDP_LS |
  690. AMD_CG_SUPPORT_IH_CG |
  691. AMD_CG_SUPPORT_MC_LS;
  692. adev->pg_flags = AMD_PG_SUPPORT_VCN |
  693. AMD_PG_SUPPORT_VCN_DPG |
  694. AMD_PG_SUPPORT_JPEG |
  695. AMD_PG_SUPPORT_ATHUB |
  696. AMD_PG_SUPPORT_MMHUB;
  697. if (amdgpu_sriov_vf(adev)) {
  698. /* hypervisor control CG and PG enablement */
  699. adev->cg_flags = 0;
  700. adev->pg_flags = 0;
  701. }
  702. adev->external_rev_id = adev->rev_id + 0x28;
  703. break;
  704. case IP_VERSION(10, 3, 2):
  705. adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
  706. AMD_CG_SUPPORT_GFX_CGCG |
  707. AMD_CG_SUPPORT_GFX_CGLS |
  708. AMD_CG_SUPPORT_GFX_3D_CGCG |
  709. AMD_CG_SUPPORT_VCN_MGCG |
  710. AMD_CG_SUPPORT_JPEG_MGCG |
  711. AMD_CG_SUPPORT_MC_MGCG |
  712. AMD_CG_SUPPORT_MC_LS |
  713. AMD_CG_SUPPORT_HDP_MGCG |
  714. AMD_CG_SUPPORT_HDP_LS |
  715. AMD_CG_SUPPORT_IH_CG;
  716. adev->pg_flags = AMD_PG_SUPPORT_VCN |
  717. AMD_PG_SUPPORT_VCN_DPG |
  718. AMD_PG_SUPPORT_JPEG |
  719. AMD_PG_SUPPORT_ATHUB |
  720. AMD_PG_SUPPORT_MMHUB;
  721. adev->external_rev_id = adev->rev_id + 0x32;
  722. break;
  723. case IP_VERSION(10, 3, 1):
  724. adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
  725. AMD_CG_SUPPORT_GFX_MGLS |
  726. AMD_CG_SUPPORT_GFX_CP_LS |
  727. AMD_CG_SUPPORT_GFX_RLC_LS |
  728. AMD_CG_SUPPORT_GFX_CGCG |
  729. AMD_CG_SUPPORT_GFX_CGLS |
  730. AMD_CG_SUPPORT_GFX_3D_CGCG |
  731. AMD_CG_SUPPORT_GFX_3D_CGLS |
  732. AMD_CG_SUPPORT_MC_MGCG |
  733. AMD_CG_SUPPORT_MC_LS |
  734. AMD_CG_SUPPORT_GFX_FGCG |
  735. AMD_CG_SUPPORT_VCN_MGCG |
  736. AMD_CG_SUPPORT_SDMA_MGCG |
  737. AMD_CG_SUPPORT_SDMA_LS |
  738. AMD_CG_SUPPORT_JPEG_MGCG;
  739. adev->pg_flags = AMD_PG_SUPPORT_GFX_PG |
  740. AMD_PG_SUPPORT_VCN |
  741. AMD_PG_SUPPORT_VCN_DPG |
  742. AMD_PG_SUPPORT_JPEG;
  743. if (adev->apu_flags & AMD_APU_IS_VANGOGH)
  744. adev->external_rev_id = adev->rev_id + 0x01;
  745. break;
  746. case IP_VERSION(10, 3, 4):
  747. adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
  748. AMD_CG_SUPPORT_GFX_CGCG |
  749. AMD_CG_SUPPORT_GFX_CGLS |
  750. AMD_CG_SUPPORT_GFX_3D_CGCG |
  751. AMD_CG_SUPPORT_VCN_MGCG |
  752. AMD_CG_SUPPORT_JPEG_MGCG |
  753. AMD_CG_SUPPORT_MC_MGCG |
  754. AMD_CG_SUPPORT_MC_LS |
  755. AMD_CG_SUPPORT_HDP_MGCG |
  756. AMD_CG_SUPPORT_HDP_LS |
  757. AMD_CG_SUPPORT_IH_CG;
  758. adev->pg_flags = AMD_PG_SUPPORT_VCN |
  759. AMD_PG_SUPPORT_VCN_DPG |
  760. AMD_PG_SUPPORT_JPEG |
  761. AMD_PG_SUPPORT_ATHUB |
  762. AMD_PG_SUPPORT_MMHUB;
  763. adev->external_rev_id = adev->rev_id + 0x3c;
  764. break;
  765. case IP_VERSION(10, 3, 5):
  766. adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
  767. AMD_CG_SUPPORT_GFX_CGCG |
  768. AMD_CG_SUPPORT_GFX_CGLS |
  769. AMD_CG_SUPPORT_GFX_3D_CGCG |
  770. AMD_CG_SUPPORT_MC_MGCG |
  771. AMD_CG_SUPPORT_MC_LS |
  772. AMD_CG_SUPPORT_HDP_MGCG |
  773. AMD_CG_SUPPORT_HDP_LS |
  774. AMD_CG_SUPPORT_IH_CG |
  775. AMD_CG_SUPPORT_VCN_MGCG;
  776. adev->pg_flags = AMD_PG_SUPPORT_VCN |
  777. AMD_PG_SUPPORT_VCN_DPG |
  778. AMD_PG_SUPPORT_ATHUB |
  779. AMD_PG_SUPPORT_MMHUB;
  780. adev->external_rev_id = adev->rev_id + 0x46;
  781. break;
  782. case IP_VERSION(10, 3, 3):
  783. adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
  784. AMD_CG_SUPPORT_GFX_MGLS |
  785. AMD_CG_SUPPORT_GFX_CGCG |
  786. AMD_CG_SUPPORT_GFX_CGLS |
  787. AMD_CG_SUPPORT_GFX_3D_CGCG |
  788. AMD_CG_SUPPORT_GFX_3D_CGLS |
  789. AMD_CG_SUPPORT_GFX_RLC_LS |
  790. AMD_CG_SUPPORT_GFX_CP_LS |
  791. AMD_CG_SUPPORT_GFX_FGCG |
  792. AMD_CG_SUPPORT_MC_MGCG |
  793. AMD_CG_SUPPORT_MC_LS |
  794. AMD_CG_SUPPORT_SDMA_LS |
  795. AMD_CG_SUPPORT_HDP_MGCG |
  796. AMD_CG_SUPPORT_HDP_LS |
  797. AMD_CG_SUPPORT_ATHUB_MGCG |
  798. AMD_CG_SUPPORT_ATHUB_LS |
  799. AMD_CG_SUPPORT_IH_CG |
  800. AMD_CG_SUPPORT_VCN_MGCG |
  801. AMD_CG_SUPPORT_JPEG_MGCG;
  802. adev->pg_flags = AMD_PG_SUPPORT_GFX_PG |
  803. AMD_PG_SUPPORT_VCN |
  804. AMD_PG_SUPPORT_VCN_DPG |
  805. AMD_PG_SUPPORT_JPEG;
  806. if (adev->pdev->device == 0x1681)
  807. adev->external_rev_id = 0x20;
  808. else
  809. adev->external_rev_id = adev->rev_id + 0x01;
  810. break;
  811. case IP_VERSION(10, 1, 3):
  812. case IP_VERSION(10, 1, 4):
  813. adev->cg_flags = 0;
  814. adev->pg_flags = 0;
  815. adev->external_rev_id = adev->rev_id + 0x82;
  816. break;
  817. case IP_VERSION(10, 3, 6):
  818. adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
  819. AMD_CG_SUPPORT_GFX_MGLS |
  820. AMD_CG_SUPPORT_GFX_CGCG |
  821. AMD_CG_SUPPORT_GFX_CGLS |
  822. AMD_CG_SUPPORT_GFX_3D_CGCG |
  823. AMD_CG_SUPPORT_GFX_3D_CGLS |
  824. AMD_CG_SUPPORT_GFX_RLC_LS |
  825. AMD_CG_SUPPORT_GFX_CP_LS |
  826. AMD_CG_SUPPORT_GFX_FGCG |
  827. AMD_CG_SUPPORT_MC_MGCG |
  828. AMD_CG_SUPPORT_MC_LS |
  829. AMD_CG_SUPPORT_SDMA_LS |
  830. AMD_CG_SUPPORT_HDP_MGCG |
  831. AMD_CG_SUPPORT_HDP_LS |
  832. AMD_CG_SUPPORT_ATHUB_MGCG |
  833. AMD_CG_SUPPORT_ATHUB_LS |
  834. AMD_CG_SUPPORT_IH_CG |
  835. AMD_CG_SUPPORT_VCN_MGCG |
  836. AMD_CG_SUPPORT_JPEG_MGCG;
  837. adev->pg_flags = AMD_PG_SUPPORT_GFX_PG |
  838. AMD_PG_SUPPORT_VCN |
  839. AMD_PG_SUPPORT_VCN_DPG |
  840. AMD_PG_SUPPORT_JPEG;
  841. adev->external_rev_id = adev->rev_id + 0x01;
  842. break;
  843. case IP_VERSION(10, 3, 7):
  844. adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
  845. AMD_CG_SUPPORT_GFX_MGLS |
  846. AMD_CG_SUPPORT_GFX_CGCG |
  847. AMD_CG_SUPPORT_GFX_CGLS |
  848. AMD_CG_SUPPORT_GFX_3D_CGCG |
  849. AMD_CG_SUPPORT_GFX_3D_CGLS |
  850. AMD_CG_SUPPORT_GFX_RLC_LS |
  851. AMD_CG_SUPPORT_GFX_CP_LS |
  852. AMD_CG_SUPPORT_GFX_FGCG |
  853. AMD_CG_SUPPORT_MC_MGCG |
  854. AMD_CG_SUPPORT_MC_LS |
  855. AMD_CG_SUPPORT_SDMA_LS |
  856. AMD_CG_SUPPORT_HDP_MGCG |
  857. AMD_CG_SUPPORT_HDP_LS |
  858. AMD_CG_SUPPORT_ATHUB_MGCG |
  859. AMD_CG_SUPPORT_ATHUB_LS |
  860. AMD_CG_SUPPORT_IH_CG |
  861. AMD_CG_SUPPORT_VCN_MGCG |
  862. AMD_CG_SUPPORT_JPEG_MGCG;
  863. adev->pg_flags = AMD_PG_SUPPORT_VCN |
  864. AMD_PG_SUPPORT_VCN_DPG |
  865. AMD_PG_SUPPORT_JPEG |
  866. AMD_PG_SUPPORT_GFX_PG;
  867. adev->external_rev_id = adev->rev_id + 0x01;
  868. break;
  869. default:
  870. /* FIXME: not supported yet */
  871. return -EINVAL;
  872. }
  873. if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK)
  874. adev->pg_flags &= ~(AMD_PG_SUPPORT_VCN |
  875. AMD_PG_SUPPORT_VCN_DPG |
  876. AMD_PG_SUPPORT_JPEG);
  877. if (amdgpu_sriov_vf(adev)) {
  878. amdgpu_virt_init_setting(adev);
  879. xgpu_nv_mailbox_set_irq_funcs(adev);
  880. }
  881. return 0;
  882. }
  883. static int nv_common_late_init(void *handle)
  884. {
  885. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  886. if (amdgpu_sriov_vf(adev)) {
  887. xgpu_nv_mailbox_get_irq(adev);
  888. amdgpu_virt_update_sriov_video_codec(adev,
  889. sriov_sc_video_codecs_encode_array, ARRAY_SIZE(sriov_sc_video_codecs_encode_array),
  890. sriov_sc_video_codecs_decode_array, ARRAY_SIZE(sriov_sc_video_codecs_decode_array));
  891. }
  892. return 0;
  893. }
  894. static int nv_common_sw_init(void *handle)
  895. {
  896. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  897. if (amdgpu_sriov_vf(adev))
  898. xgpu_nv_mailbox_add_irq_id(adev);
  899. return 0;
  900. }
  901. static int nv_common_sw_fini(void *handle)
  902. {
  903. return 0;
  904. }
  905. static int nv_common_hw_init(void *handle)
  906. {
  907. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  908. if (adev->nbio.funcs->apply_lc_spc_mode_wa)
  909. adev->nbio.funcs->apply_lc_spc_mode_wa(adev);
  910. if (adev->nbio.funcs->apply_l1_link_width_reconfig_wa)
  911. adev->nbio.funcs->apply_l1_link_width_reconfig_wa(adev);
  912. /* enable pcie gen2/3 link */
  913. nv_pcie_gen3_enable(adev);
  914. /* enable aspm */
  915. nv_program_aspm(adev);
  916. /* setup nbio registers */
  917. adev->nbio.funcs->init_registers(adev);
  918. /* remap HDP registers to a hole in mmio space,
  919. * for the purpose of expose those registers
  920. * to process space
  921. */
  922. if (adev->nbio.funcs->remap_hdp_registers && !amdgpu_sriov_vf(adev))
  923. adev->nbio.funcs->remap_hdp_registers(adev);
  924. /* enable the doorbell aperture */
  925. nv_enable_doorbell_aperture(adev, true);
  926. return 0;
  927. }
  928. static int nv_common_hw_fini(void *handle)
  929. {
  930. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  931. /* disable the doorbell aperture */
  932. nv_enable_doorbell_aperture(adev, false);
  933. return 0;
  934. }
  935. static int nv_common_suspend(void *handle)
  936. {
  937. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  938. return nv_common_hw_fini(adev);
  939. }
  940. static int nv_common_resume(void *handle)
  941. {
  942. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  943. return nv_common_hw_init(adev);
  944. }
  945. static bool nv_common_is_idle(void *handle)
  946. {
  947. return true;
  948. }
  949. static int nv_common_wait_for_idle(void *handle)
  950. {
  951. return 0;
  952. }
  953. static int nv_common_soft_reset(void *handle)
  954. {
  955. return 0;
  956. }
  957. static int nv_common_set_clockgating_state(void *handle,
  958. enum amd_clockgating_state state)
  959. {
  960. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  961. if (amdgpu_sriov_vf(adev))
  962. return 0;
  963. switch (adev->ip_versions[NBIO_HWIP][0]) {
  964. case IP_VERSION(2, 3, 0):
  965. case IP_VERSION(2, 3, 1):
  966. case IP_VERSION(2, 3, 2):
  967. case IP_VERSION(3, 3, 0):
  968. case IP_VERSION(3, 3, 1):
  969. case IP_VERSION(3, 3, 2):
  970. case IP_VERSION(3, 3, 3):
  971. adev->nbio.funcs->update_medium_grain_clock_gating(adev,
  972. state == AMD_CG_STATE_GATE);
  973. adev->nbio.funcs->update_medium_grain_light_sleep(adev,
  974. state == AMD_CG_STATE_GATE);
  975. adev->hdp.funcs->update_clock_gating(adev,
  976. state == AMD_CG_STATE_GATE);
  977. adev->smuio.funcs->update_rom_clock_gating(adev,
  978. state == AMD_CG_STATE_GATE);
  979. break;
  980. default:
  981. break;
  982. }
  983. return 0;
  984. }
  985. static int nv_common_set_powergating_state(void *handle,
  986. enum amd_powergating_state state)
  987. {
  988. /* TODO */
  989. return 0;
  990. }
  991. static void nv_common_get_clockgating_state(void *handle, u64 *flags)
  992. {
  993. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  994. if (amdgpu_sriov_vf(adev))
  995. *flags = 0;
  996. adev->nbio.funcs->get_clockgating_state(adev, flags);
  997. adev->hdp.funcs->get_clock_gating_state(adev, flags);
  998. adev->smuio.funcs->get_clock_gating_state(adev, flags);
  999. return;
  1000. }
  1001. static const struct amd_ip_funcs nv_common_ip_funcs = {
  1002. .name = "nv_common",
  1003. .early_init = nv_common_early_init,
  1004. .late_init = nv_common_late_init,
  1005. .sw_init = nv_common_sw_init,
  1006. .sw_fini = nv_common_sw_fini,
  1007. .hw_init = nv_common_hw_init,
  1008. .hw_fini = nv_common_hw_fini,
  1009. .suspend = nv_common_suspend,
  1010. .resume = nv_common_resume,
  1011. .is_idle = nv_common_is_idle,
  1012. .wait_for_idle = nv_common_wait_for_idle,
  1013. .soft_reset = nv_common_soft_reset,
  1014. .set_clockgating_state = nv_common_set_clockgating_state,
  1015. .set_powergating_state = nv_common_set_powergating_state,
  1016. .get_clockgating_state = nv_common_get_clockgating_state,
  1017. };