usb4.c 55 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * USB4 specific functionality
  4. *
  5. * Copyright (C) 2019, Intel Corporation
  6. * Authors: Mika Westerberg <[email protected]>
  7. * Rajmohan Mani <[email protected]>
  8. */
  9. #include <linux/delay.h>
  10. #include <linux/ktime.h>
  11. #include "sb_regs.h"
  12. #include "tb.h"
  13. #define USB4_DATA_RETRIES 3
  14. enum usb4_sb_target {
  15. USB4_SB_TARGET_ROUTER,
  16. USB4_SB_TARGET_PARTNER,
  17. USB4_SB_TARGET_RETIMER,
  18. };
  19. #define USB4_NVM_READ_OFFSET_MASK GENMASK(23, 2)
  20. #define USB4_NVM_READ_OFFSET_SHIFT 2
  21. #define USB4_NVM_READ_LENGTH_MASK GENMASK(27, 24)
  22. #define USB4_NVM_READ_LENGTH_SHIFT 24
  23. #define USB4_NVM_SET_OFFSET_MASK USB4_NVM_READ_OFFSET_MASK
  24. #define USB4_NVM_SET_OFFSET_SHIFT USB4_NVM_READ_OFFSET_SHIFT
  25. #define USB4_DROM_ADDRESS_MASK GENMASK(14, 2)
  26. #define USB4_DROM_ADDRESS_SHIFT 2
  27. #define USB4_DROM_SIZE_MASK GENMASK(19, 15)
  28. #define USB4_DROM_SIZE_SHIFT 15
  29. #define USB4_NVM_SECTOR_SIZE_MASK GENMASK(23, 0)
  30. #define USB4_BA_LENGTH_MASK GENMASK(7, 0)
  31. #define USB4_BA_INDEX_MASK GENMASK(15, 0)
  32. enum usb4_ba_index {
  33. USB4_BA_MAX_USB3 = 0x1,
  34. USB4_BA_MIN_DP_AUX = 0x2,
  35. USB4_BA_MIN_DP_MAIN = 0x3,
  36. USB4_BA_MAX_PCIE = 0x4,
  37. USB4_BA_MAX_HI = 0x5,
  38. };
  39. #define USB4_BA_VALUE_MASK GENMASK(31, 16)
  40. #define USB4_BA_VALUE_SHIFT 16
  41. static int usb4_native_switch_op(struct tb_switch *sw, u16 opcode,
  42. u32 *metadata, u8 *status,
  43. const void *tx_data, size_t tx_dwords,
  44. void *rx_data, size_t rx_dwords)
  45. {
  46. u32 val;
  47. int ret;
  48. if (metadata) {
  49. ret = tb_sw_write(sw, metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1);
  50. if (ret)
  51. return ret;
  52. }
  53. if (tx_dwords) {
  54. ret = tb_sw_write(sw, tx_data, TB_CFG_SWITCH, ROUTER_CS_9,
  55. tx_dwords);
  56. if (ret)
  57. return ret;
  58. }
  59. val = opcode | ROUTER_CS_26_OV;
  60. ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1);
  61. if (ret)
  62. return ret;
  63. ret = tb_switch_wait_for_bit(sw, ROUTER_CS_26, ROUTER_CS_26_OV, 0, 500);
  64. if (ret)
  65. return ret;
  66. ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1);
  67. if (ret)
  68. return ret;
  69. if (val & ROUTER_CS_26_ONS)
  70. return -EOPNOTSUPP;
  71. if (status)
  72. *status = (val & ROUTER_CS_26_STATUS_MASK) >>
  73. ROUTER_CS_26_STATUS_SHIFT;
  74. if (metadata) {
  75. ret = tb_sw_read(sw, metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1);
  76. if (ret)
  77. return ret;
  78. }
  79. if (rx_dwords) {
  80. ret = tb_sw_read(sw, rx_data, TB_CFG_SWITCH, ROUTER_CS_9,
  81. rx_dwords);
  82. if (ret)
  83. return ret;
  84. }
  85. return 0;
  86. }
  87. static int __usb4_switch_op(struct tb_switch *sw, u16 opcode, u32 *metadata,
  88. u8 *status, const void *tx_data, size_t tx_dwords,
  89. void *rx_data, size_t rx_dwords)
  90. {
  91. const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
  92. if (tx_dwords > NVM_DATA_DWORDS || rx_dwords > NVM_DATA_DWORDS)
  93. return -EINVAL;
  94. /*
  95. * If the connection manager implementation provides USB4 router
  96. * operation proxy callback, call it here instead of running the
  97. * operation natively.
  98. */
  99. if (cm_ops->usb4_switch_op) {
  100. int ret;
  101. ret = cm_ops->usb4_switch_op(sw, opcode, metadata, status,
  102. tx_data, tx_dwords, rx_data,
  103. rx_dwords);
  104. if (ret != -EOPNOTSUPP)
  105. return ret;
  106. /*
  107. * If the proxy was not supported then run the native
  108. * router operation instead.
  109. */
  110. }
  111. return usb4_native_switch_op(sw, opcode, metadata, status, tx_data,
  112. tx_dwords, rx_data, rx_dwords);
  113. }
  114. static inline int usb4_switch_op(struct tb_switch *sw, u16 opcode,
  115. u32 *metadata, u8 *status)
  116. {
  117. return __usb4_switch_op(sw, opcode, metadata, status, NULL, 0, NULL, 0);
  118. }
  119. static inline int usb4_switch_op_data(struct tb_switch *sw, u16 opcode,
  120. u32 *metadata, u8 *status,
  121. const void *tx_data, size_t tx_dwords,
  122. void *rx_data, size_t rx_dwords)
  123. {
  124. return __usb4_switch_op(sw, opcode, metadata, status, tx_data,
  125. tx_dwords, rx_data, rx_dwords);
  126. }
  127. static void usb4_switch_check_wakes(struct tb_switch *sw)
  128. {
  129. struct tb_port *port;
  130. bool wakeup = false;
  131. u32 val;
  132. if (!device_may_wakeup(&sw->dev))
  133. return;
  134. if (tb_route(sw)) {
  135. if (tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1))
  136. return;
  137. tb_sw_dbg(sw, "PCIe wake: %s, USB3 wake: %s\n",
  138. (val & ROUTER_CS_6_WOPS) ? "yes" : "no",
  139. (val & ROUTER_CS_6_WOUS) ? "yes" : "no");
  140. wakeup = val & (ROUTER_CS_6_WOPS | ROUTER_CS_6_WOUS);
  141. }
  142. /* Check for any connected downstream ports for USB4 wake */
  143. tb_switch_for_each_port(sw, port) {
  144. if (!tb_port_has_remote(port))
  145. continue;
  146. if (tb_port_read(port, &val, TB_CFG_PORT,
  147. port->cap_usb4 + PORT_CS_18, 1))
  148. break;
  149. tb_port_dbg(port, "USB4 wake: %s\n",
  150. (val & PORT_CS_18_WOU4S) ? "yes" : "no");
  151. if (val & PORT_CS_18_WOU4S)
  152. wakeup = true;
  153. }
  154. if (wakeup)
  155. pm_wakeup_event(&sw->dev, 0);
  156. }
  157. static bool link_is_usb4(struct tb_port *port)
  158. {
  159. u32 val;
  160. if (!port->cap_usb4)
  161. return false;
  162. if (tb_port_read(port, &val, TB_CFG_PORT,
  163. port->cap_usb4 + PORT_CS_18, 1))
  164. return false;
  165. return !(val & PORT_CS_18_TCM);
  166. }
  167. /**
  168. * usb4_switch_setup() - Additional setup for USB4 device
  169. * @sw: USB4 router to setup
  170. *
  171. * USB4 routers need additional settings in order to enable all the
  172. * tunneling. This function enables USB and PCIe tunneling if it can be
  173. * enabled (e.g the parent switch also supports them). If USB tunneling
  174. * is not available for some reason (like that there is Thunderbolt 3
  175. * switch upstream) then the internal xHCI controller is enabled
  176. * instead.
  177. */
  178. int usb4_switch_setup(struct tb_switch *sw)
  179. {
  180. struct tb_port *downstream_port;
  181. struct tb_switch *parent;
  182. bool tbt3, xhci;
  183. u32 val = 0;
  184. int ret;
  185. usb4_switch_check_wakes(sw);
  186. if (!tb_route(sw))
  187. return 0;
  188. ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1);
  189. if (ret)
  190. return ret;
  191. parent = tb_switch_parent(sw);
  192. downstream_port = tb_port_at(tb_route(sw), parent);
  193. sw->link_usb4 = link_is_usb4(downstream_port);
  194. tb_sw_dbg(sw, "link: %s\n", sw->link_usb4 ? "USB4" : "TBT");
  195. xhci = val & ROUTER_CS_6_HCI;
  196. tbt3 = !(val & ROUTER_CS_6_TNS);
  197. tb_sw_dbg(sw, "TBT3 support: %s, xHCI: %s\n",
  198. tbt3 ? "yes" : "no", xhci ? "yes" : "no");
  199. ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
  200. if (ret)
  201. return ret;
  202. if (tb_acpi_may_tunnel_usb3() && sw->link_usb4 &&
  203. tb_switch_find_port(parent, TB_TYPE_USB3_DOWN)) {
  204. val |= ROUTER_CS_5_UTO;
  205. xhci = false;
  206. }
  207. /*
  208. * Only enable PCIe tunneling if the parent router supports it
  209. * and it is not disabled.
  210. */
  211. if (tb_acpi_may_tunnel_pcie() &&
  212. tb_switch_find_port(parent, TB_TYPE_PCIE_DOWN)) {
  213. val |= ROUTER_CS_5_PTO;
  214. /*
  215. * xHCI can be enabled if PCIe tunneling is supported
  216. * and the parent does not have any USB3 dowstream
  217. * adapters (so we cannot do USB 3.x tunneling).
  218. */
  219. if (xhci)
  220. val |= ROUTER_CS_5_HCO;
  221. }
  222. /* TBT3 supported by the CM */
  223. val |= ROUTER_CS_5_C3S;
  224. /* Tunneling configuration is ready now */
  225. val |= ROUTER_CS_5_CV;
  226. ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
  227. if (ret)
  228. return ret;
  229. return tb_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_CR,
  230. ROUTER_CS_6_CR, 50);
  231. }
  232. /**
  233. * usb4_switch_read_uid() - Read UID from USB4 router
  234. * @sw: USB4 router
  235. * @uid: UID is stored here
  236. *
  237. * Reads 64-bit UID from USB4 router config space.
  238. */
  239. int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid)
  240. {
  241. return tb_sw_read(sw, uid, TB_CFG_SWITCH, ROUTER_CS_7, 2);
  242. }
  243. static int usb4_switch_drom_read_block(void *data,
  244. unsigned int dwaddress, void *buf,
  245. size_t dwords)
  246. {
  247. struct tb_switch *sw = data;
  248. u8 status = 0;
  249. u32 metadata;
  250. int ret;
  251. metadata = (dwords << USB4_DROM_SIZE_SHIFT) & USB4_DROM_SIZE_MASK;
  252. metadata |= (dwaddress << USB4_DROM_ADDRESS_SHIFT) &
  253. USB4_DROM_ADDRESS_MASK;
  254. ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_DROM_READ, &metadata,
  255. &status, NULL, 0, buf, dwords);
  256. if (ret)
  257. return ret;
  258. return status ? -EIO : 0;
  259. }
  260. /**
  261. * usb4_switch_drom_read() - Read arbitrary bytes from USB4 router DROM
  262. * @sw: USB4 router
  263. * @address: Byte address inside DROM to start reading
  264. * @buf: Buffer where the DROM content is stored
  265. * @size: Number of bytes to read from DROM
  266. *
  267. * Uses USB4 router operations to read router DROM. For devices this
  268. * should always work but for hosts it may return %-EOPNOTSUPP in which
  269. * case the host router does not have DROM.
  270. */
  271. int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf,
  272. size_t size)
  273. {
  274. return tb_nvm_read_data(address, buf, size, USB4_DATA_RETRIES,
  275. usb4_switch_drom_read_block, sw);
  276. }
  277. /**
  278. * usb4_switch_lane_bonding_possible() - Are conditions met for lane bonding
  279. * @sw: USB4 router
  280. *
  281. * Checks whether conditions are met so that lane bonding can be
  282. * established with the upstream router. Call only for device routers.
  283. */
  284. bool usb4_switch_lane_bonding_possible(struct tb_switch *sw)
  285. {
  286. struct tb_port *up;
  287. int ret;
  288. u32 val;
  289. up = tb_upstream_port(sw);
  290. ret = tb_port_read(up, &val, TB_CFG_PORT, up->cap_usb4 + PORT_CS_18, 1);
  291. if (ret)
  292. return false;
  293. return !!(val & PORT_CS_18_BE);
  294. }
  295. /**
  296. * usb4_switch_set_wake() - Enabled/disable wake
  297. * @sw: USB4 router
  298. * @flags: Wakeup flags (%0 to disable)
  299. *
  300. * Enables/disables router to wake up from sleep.
  301. */
  302. int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags)
  303. {
  304. struct tb_port *port;
  305. u64 route = tb_route(sw);
  306. u32 val;
  307. int ret;
  308. /*
  309. * Enable wakes coming from all USB4 downstream ports (from
  310. * child routers). For device routers do this also for the
  311. * upstream USB4 port.
  312. */
  313. tb_switch_for_each_port(sw, port) {
  314. if (!tb_port_is_null(port))
  315. continue;
  316. if (!route && tb_is_upstream_port(port))
  317. continue;
  318. if (!port->cap_usb4)
  319. continue;
  320. ret = tb_port_read(port, &val, TB_CFG_PORT,
  321. port->cap_usb4 + PORT_CS_19, 1);
  322. if (ret)
  323. return ret;
  324. val &= ~(PORT_CS_19_WOC | PORT_CS_19_WOD | PORT_CS_19_WOU4);
  325. if (tb_is_upstream_port(port)) {
  326. val |= PORT_CS_19_WOU4;
  327. } else {
  328. bool configured = val & PORT_CS_19_PC;
  329. if ((flags & TB_WAKE_ON_CONNECT) && !configured)
  330. val |= PORT_CS_19_WOC;
  331. if ((flags & TB_WAKE_ON_DISCONNECT) && configured)
  332. val |= PORT_CS_19_WOD;
  333. if ((flags & TB_WAKE_ON_USB4) && configured)
  334. val |= PORT_CS_19_WOU4;
  335. }
  336. ret = tb_port_write(port, &val, TB_CFG_PORT,
  337. port->cap_usb4 + PORT_CS_19, 1);
  338. if (ret)
  339. return ret;
  340. }
  341. /*
  342. * Enable wakes from PCIe, USB 3.x and DP on this router. Only
  343. * needed for device routers.
  344. */
  345. if (route) {
  346. ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
  347. if (ret)
  348. return ret;
  349. val &= ~(ROUTER_CS_5_WOP | ROUTER_CS_5_WOU | ROUTER_CS_5_WOD);
  350. if (flags & TB_WAKE_ON_USB3)
  351. val |= ROUTER_CS_5_WOU;
  352. if (flags & TB_WAKE_ON_PCIE)
  353. val |= ROUTER_CS_5_WOP;
  354. if (flags & TB_WAKE_ON_DP)
  355. val |= ROUTER_CS_5_WOD;
  356. ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
  357. if (ret)
  358. return ret;
  359. }
  360. return 0;
  361. }
  362. /**
  363. * usb4_switch_set_sleep() - Prepare the router to enter sleep
  364. * @sw: USB4 router
  365. *
  366. * Sets sleep bit for the router. Returns when the router sleep ready
  367. * bit has been asserted.
  368. */
  369. int usb4_switch_set_sleep(struct tb_switch *sw)
  370. {
  371. int ret;
  372. u32 val;
  373. /* Set sleep bit and wait for sleep ready to be asserted */
  374. ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
  375. if (ret)
  376. return ret;
  377. val |= ROUTER_CS_5_SLP;
  378. ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
  379. if (ret)
  380. return ret;
  381. return tb_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_SLPR,
  382. ROUTER_CS_6_SLPR, 500);
  383. }
  384. /**
  385. * usb4_switch_nvm_sector_size() - Return router NVM sector size
  386. * @sw: USB4 router
  387. *
  388. * If the router supports NVM operations this function returns the NVM
  389. * sector size in bytes. If NVM operations are not supported returns
  390. * %-EOPNOTSUPP.
  391. */
  392. int usb4_switch_nvm_sector_size(struct tb_switch *sw)
  393. {
  394. u32 metadata;
  395. u8 status;
  396. int ret;
  397. ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SECTOR_SIZE, &metadata,
  398. &status);
  399. if (ret)
  400. return ret;
  401. if (status)
  402. return status == 0x2 ? -EOPNOTSUPP : -EIO;
  403. return metadata & USB4_NVM_SECTOR_SIZE_MASK;
  404. }
  405. static int usb4_switch_nvm_read_block(void *data,
  406. unsigned int dwaddress, void *buf, size_t dwords)
  407. {
  408. struct tb_switch *sw = data;
  409. u8 status = 0;
  410. u32 metadata;
  411. int ret;
  412. metadata = (dwords << USB4_NVM_READ_LENGTH_SHIFT) &
  413. USB4_NVM_READ_LENGTH_MASK;
  414. metadata |= (dwaddress << USB4_NVM_READ_OFFSET_SHIFT) &
  415. USB4_NVM_READ_OFFSET_MASK;
  416. ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_NVM_READ, &metadata,
  417. &status, NULL, 0, buf, dwords);
  418. if (ret)
  419. return ret;
  420. return status ? -EIO : 0;
  421. }
  422. /**
  423. * usb4_switch_nvm_read() - Read arbitrary bytes from router NVM
  424. * @sw: USB4 router
  425. * @address: Starting address in bytes
  426. * @buf: Read data is placed here
  427. * @size: How many bytes to read
  428. *
  429. * Reads NVM contents of the router. If NVM is not supported returns
  430. * %-EOPNOTSUPP.
  431. */
  432. int usb4_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf,
  433. size_t size)
  434. {
  435. return tb_nvm_read_data(address, buf, size, USB4_DATA_RETRIES,
  436. usb4_switch_nvm_read_block, sw);
  437. }
  438. /**
  439. * usb4_switch_nvm_set_offset() - Set NVM write offset
  440. * @sw: USB4 router
  441. * @address: Start offset
  442. *
  443. * Explicitly sets NVM write offset. Normally when writing to NVM this
  444. * is done automatically by usb4_switch_nvm_write().
  445. *
  446. * Returns %0 in success and negative errno if there was a failure.
  447. */
  448. int usb4_switch_nvm_set_offset(struct tb_switch *sw, unsigned int address)
  449. {
  450. u32 metadata, dwaddress;
  451. u8 status = 0;
  452. int ret;
  453. dwaddress = address / 4;
  454. metadata = (dwaddress << USB4_NVM_SET_OFFSET_SHIFT) &
  455. USB4_NVM_SET_OFFSET_MASK;
  456. ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SET_OFFSET, &metadata,
  457. &status);
  458. if (ret)
  459. return ret;
  460. return status ? -EIO : 0;
  461. }
  462. static int usb4_switch_nvm_write_next_block(void *data, unsigned int dwaddress,
  463. const void *buf, size_t dwords)
  464. {
  465. struct tb_switch *sw = data;
  466. u8 status;
  467. int ret;
  468. ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_NVM_WRITE, NULL, &status,
  469. buf, dwords, NULL, 0);
  470. if (ret)
  471. return ret;
  472. return status ? -EIO : 0;
  473. }
  474. /**
  475. * usb4_switch_nvm_write() - Write to the router NVM
  476. * @sw: USB4 router
  477. * @address: Start address where to write in bytes
  478. * @buf: Pointer to the data to write
  479. * @size: Size of @buf in bytes
  480. *
  481. * Writes @buf to the router NVM using USB4 router operations. If NVM
  482. * write is not supported returns %-EOPNOTSUPP.
  483. */
  484. int usb4_switch_nvm_write(struct tb_switch *sw, unsigned int address,
  485. const void *buf, size_t size)
  486. {
  487. int ret;
  488. ret = usb4_switch_nvm_set_offset(sw, address);
  489. if (ret)
  490. return ret;
  491. return tb_nvm_write_data(address, buf, size, USB4_DATA_RETRIES,
  492. usb4_switch_nvm_write_next_block, sw);
  493. }
  494. /**
  495. * usb4_switch_nvm_authenticate() - Authenticate new NVM
  496. * @sw: USB4 router
  497. *
  498. * After the new NVM has been written via usb4_switch_nvm_write(), this
  499. * function triggers NVM authentication process. The router gets power
  500. * cycled and if the authentication is successful the new NVM starts
  501. * running. In case of failure returns negative errno.
  502. *
  503. * The caller should call usb4_switch_nvm_authenticate_status() to read
  504. * the status of the authentication after power cycle. It should be the
  505. * first router operation to avoid the status being lost.
  506. */
  507. int usb4_switch_nvm_authenticate(struct tb_switch *sw)
  508. {
  509. int ret;
  510. ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_AUTH, NULL, NULL);
  511. switch (ret) {
  512. /*
  513. * The router is power cycled once NVM_AUTH is started so it is
  514. * expected to get any of the following errors back.
  515. */
  516. case -EACCES:
  517. case -ENOTCONN:
  518. case -ETIMEDOUT:
  519. return 0;
  520. default:
  521. return ret;
  522. }
  523. }
  524. /**
  525. * usb4_switch_nvm_authenticate_status() - Read status of last NVM authenticate
  526. * @sw: USB4 router
  527. * @status: Status code of the operation
  528. *
  529. * The function checks if there is status available from the last NVM
  530. * authenticate router operation. If there is status then %0 is returned
  531. * and the status code is placed in @status. Returns negative errno in case
  532. * of failure.
  533. *
  534. * Must be called before any other router operation.
  535. */
  536. int usb4_switch_nvm_authenticate_status(struct tb_switch *sw, u32 *status)
  537. {
  538. const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
  539. u16 opcode;
  540. u32 val;
  541. int ret;
  542. if (cm_ops->usb4_switch_nvm_authenticate_status) {
  543. ret = cm_ops->usb4_switch_nvm_authenticate_status(sw, status);
  544. if (ret != -EOPNOTSUPP)
  545. return ret;
  546. }
  547. ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1);
  548. if (ret)
  549. return ret;
  550. /* Check that the opcode is correct */
  551. opcode = val & ROUTER_CS_26_OPCODE_MASK;
  552. if (opcode == USB4_SWITCH_OP_NVM_AUTH) {
  553. if (val & ROUTER_CS_26_OV)
  554. return -EBUSY;
  555. if (val & ROUTER_CS_26_ONS)
  556. return -EOPNOTSUPP;
  557. *status = (val & ROUTER_CS_26_STATUS_MASK) >>
  558. ROUTER_CS_26_STATUS_SHIFT;
  559. } else {
  560. *status = 0;
  561. }
  562. return 0;
  563. }
  564. /**
  565. * usb4_switch_credits_init() - Read buffer allocation parameters
  566. * @sw: USB4 router
  567. *
  568. * Reads @sw buffer allocation parameters and initializes @sw buffer
  569. * allocation fields accordingly. Specifically @sw->credits_allocation
  570. * is set to %true if these parameters can be used in tunneling.
  571. *
  572. * Returns %0 on success and negative errno otherwise.
  573. */
  574. int usb4_switch_credits_init(struct tb_switch *sw)
  575. {
  576. int max_usb3, min_dp_aux, min_dp_main, max_pcie, max_dma;
  577. int ret, length, i, nports;
  578. const struct tb_port *port;
  579. u32 data[NVM_DATA_DWORDS];
  580. u32 metadata = 0;
  581. u8 status = 0;
  582. memset(data, 0, sizeof(data));
  583. ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_BUFFER_ALLOC, &metadata,
  584. &status, NULL, 0, data, ARRAY_SIZE(data));
  585. if (ret)
  586. return ret;
  587. if (status)
  588. return -EIO;
  589. length = metadata & USB4_BA_LENGTH_MASK;
  590. if (WARN_ON(length > ARRAY_SIZE(data)))
  591. return -EMSGSIZE;
  592. max_usb3 = -1;
  593. min_dp_aux = -1;
  594. min_dp_main = -1;
  595. max_pcie = -1;
  596. max_dma = -1;
  597. tb_sw_dbg(sw, "credit allocation parameters:\n");
  598. for (i = 0; i < length; i++) {
  599. u16 index, value;
  600. index = data[i] & USB4_BA_INDEX_MASK;
  601. value = (data[i] & USB4_BA_VALUE_MASK) >> USB4_BA_VALUE_SHIFT;
  602. switch (index) {
  603. case USB4_BA_MAX_USB3:
  604. tb_sw_dbg(sw, " USB3: %u\n", value);
  605. max_usb3 = value;
  606. break;
  607. case USB4_BA_MIN_DP_AUX:
  608. tb_sw_dbg(sw, " DP AUX: %u\n", value);
  609. min_dp_aux = value;
  610. break;
  611. case USB4_BA_MIN_DP_MAIN:
  612. tb_sw_dbg(sw, " DP main: %u\n", value);
  613. min_dp_main = value;
  614. break;
  615. case USB4_BA_MAX_PCIE:
  616. tb_sw_dbg(sw, " PCIe: %u\n", value);
  617. max_pcie = value;
  618. break;
  619. case USB4_BA_MAX_HI:
  620. tb_sw_dbg(sw, " DMA: %u\n", value);
  621. max_dma = value;
  622. break;
  623. default:
  624. tb_sw_dbg(sw, " unknown credit allocation index %#x, skipping\n",
  625. index);
  626. break;
  627. }
  628. }
  629. /*
  630. * Validate the buffer allocation preferences. If we find
  631. * issues, log a warning and fall back using the hard-coded
  632. * values.
  633. */
  634. /* Host router must report baMaxHI */
  635. if (!tb_route(sw) && max_dma < 0) {
  636. tb_sw_warn(sw, "host router is missing baMaxHI\n");
  637. goto err_invalid;
  638. }
  639. nports = 0;
  640. tb_switch_for_each_port(sw, port) {
  641. if (tb_port_is_null(port))
  642. nports++;
  643. }
  644. /* Must have DP buffer allocation (multiple USB4 ports) */
  645. if (nports > 2 && (min_dp_aux < 0 || min_dp_main < 0)) {
  646. tb_sw_warn(sw, "multiple USB4 ports require baMinDPaux/baMinDPmain\n");
  647. goto err_invalid;
  648. }
  649. tb_switch_for_each_port(sw, port) {
  650. if (tb_port_is_dpout(port) && min_dp_main < 0) {
  651. tb_sw_warn(sw, "missing baMinDPmain");
  652. goto err_invalid;
  653. }
  654. if ((tb_port_is_dpin(port) || tb_port_is_dpout(port)) &&
  655. min_dp_aux < 0) {
  656. tb_sw_warn(sw, "missing baMinDPaux");
  657. goto err_invalid;
  658. }
  659. if ((tb_port_is_usb3_down(port) || tb_port_is_usb3_up(port)) &&
  660. max_usb3 < 0) {
  661. tb_sw_warn(sw, "missing baMaxUSB3");
  662. goto err_invalid;
  663. }
  664. if ((tb_port_is_pcie_down(port) || tb_port_is_pcie_up(port)) &&
  665. max_pcie < 0) {
  666. tb_sw_warn(sw, "missing baMaxPCIe");
  667. goto err_invalid;
  668. }
  669. }
  670. /*
  671. * Buffer allocation passed the validation so we can use it in
  672. * path creation.
  673. */
  674. sw->credit_allocation = true;
  675. if (max_usb3 > 0)
  676. sw->max_usb3_credits = max_usb3;
  677. if (min_dp_aux > 0)
  678. sw->min_dp_aux_credits = min_dp_aux;
  679. if (min_dp_main > 0)
  680. sw->min_dp_main_credits = min_dp_main;
  681. if (max_pcie > 0)
  682. sw->max_pcie_credits = max_pcie;
  683. if (max_dma > 0)
  684. sw->max_dma_credits = max_dma;
  685. return 0;
  686. err_invalid:
  687. return -EINVAL;
  688. }
  689. /**
  690. * usb4_switch_query_dp_resource() - Query availability of DP IN resource
  691. * @sw: USB4 router
  692. * @in: DP IN adapter
  693. *
  694. * For DP tunneling this function can be used to query availability of
  695. * DP IN resource. Returns true if the resource is available for DP
  696. * tunneling, false otherwise.
  697. */
  698. bool usb4_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
  699. {
  700. u32 metadata = in->port;
  701. u8 status;
  702. int ret;
  703. ret = usb4_switch_op(sw, USB4_SWITCH_OP_QUERY_DP_RESOURCE, &metadata,
  704. &status);
  705. /*
  706. * If DP resource allocation is not supported assume it is
  707. * always available.
  708. */
  709. if (ret == -EOPNOTSUPP)
  710. return true;
  711. else if (ret)
  712. return false;
  713. return !status;
  714. }
  715. /**
  716. * usb4_switch_alloc_dp_resource() - Allocate DP IN resource
  717. * @sw: USB4 router
  718. * @in: DP IN adapter
  719. *
  720. * Allocates DP IN resource for DP tunneling using USB4 router
  721. * operations. If the resource was allocated returns %0. Otherwise
  722. * returns negative errno, in particular %-EBUSY if the resource is
  723. * already allocated.
  724. */
  725. int usb4_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
  726. {
  727. u32 metadata = in->port;
  728. u8 status;
  729. int ret;
  730. ret = usb4_switch_op(sw, USB4_SWITCH_OP_ALLOC_DP_RESOURCE, &metadata,
  731. &status);
  732. if (ret == -EOPNOTSUPP)
  733. return 0;
  734. else if (ret)
  735. return ret;
  736. return status ? -EBUSY : 0;
  737. }
  738. /**
  739. * usb4_switch_dealloc_dp_resource() - Releases allocated DP IN resource
  740. * @sw: USB4 router
  741. * @in: DP IN adapter
  742. *
  743. * Releases the previously allocated DP IN resource.
  744. */
  745. int usb4_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
  746. {
  747. u32 metadata = in->port;
  748. u8 status;
  749. int ret;
  750. ret = usb4_switch_op(sw, USB4_SWITCH_OP_DEALLOC_DP_RESOURCE, &metadata,
  751. &status);
  752. if (ret == -EOPNOTSUPP)
  753. return 0;
  754. else if (ret)
  755. return ret;
  756. return status ? -EIO : 0;
  757. }
  758. static int usb4_port_idx(const struct tb_switch *sw, const struct tb_port *port)
  759. {
  760. struct tb_port *p;
  761. int usb4_idx = 0;
  762. /* Assume port is primary */
  763. tb_switch_for_each_port(sw, p) {
  764. if (!tb_port_is_null(p))
  765. continue;
  766. if (tb_is_upstream_port(p))
  767. continue;
  768. if (!p->link_nr) {
  769. if (p == port)
  770. break;
  771. usb4_idx++;
  772. }
  773. }
  774. return usb4_idx;
  775. }
  776. /**
  777. * usb4_switch_map_pcie_down() - Map USB4 port to a PCIe downstream adapter
  778. * @sw: USB4 router
  779. * @port: USB4 port
  780. *
  781. * USB4 routers have direct mapping between USB4 ports and PCIe
  782. * downstream adapters where the PCIe topology is extended. This
  783. * function returns the corresponding downstream PCIe adapter or %NULL
  784. * if no such mapping was possible.
  785. */
  786. struct tb_port *usb4_switch_map_pcie_down(struct tb_switch *sw,
  787. const struct tb_port *port)
  788. {
  789. int usb4_idx = usb4_port_idx(sw, port);
  790. struct tb_port *p;
  791. int pcie_idx = 0;
  792. /* Find PCIe down port matching usb4_port */
  793. tb_switch_for_each_port(sw, p) {
  794. if (!tb_port_is_pcie_down(p))
  795. continue;
  796. if (pcie_idx == usb4_idx)
  797. return p;
  798. pcie_idx++;
  799. }
  800. return NULL;
  801. }
  802. /**
  803. * usb4_switch_map_usb3_down() - Map USB4 port to a USB3 downstream adapter
  804. * @sw: USB4 router
  805. * @port: USB4 port
  806. *
  807. * USB4 routers have direct mapping between USB4 ports and USB 3.x
  808. * downstream adapters where the USB 3.x topology is extended. This
  809. * function returns the corresponding downstream USB 3.x adapter or
  810. * %NULL if no such mapping was possible.
  811. */
  812. struct tb_port *usb4_switch_map_usb3_down(struct tb_switch *sw,
  813. const struct tb_port *port)
  814. {
  815. int usb4_idx = usb4_port_idx(sw, port);
  816. struct tb_port *p;
  817. int usb_idx = 0;
  818. /* Find USB3 down port matching usb4_port */
  819. tb_switch_for_each_port(sw, p) {
  820. if (!tb_port_is_usb3_down(p))
  821. continue;
  822. if (usb_idx == usb4_idx)
  823. return p;
  824. usb_idx++;
  825. }
  826. return NULL;
  827. }
  828. /**
  829. * usb4_switch_add_ports() - Add USB4 ports for this router
  830. * @sw: USB4 router
  831. *
  832. * For USB4 router finds all USB4 ports and registers devices for each.
  833. * Can be called to any router.
  834. *
  835. * Return %0 in case of success and negative errno in case of failure.
  836. */
  837. int usb4_switch_add_ports(struct tb_switch *sw)
  838. {
  839. struct tb_port *port;
  840. if (tb_switch_is_icm(sw) || !tb_switch_is_usb4(sw))
  841. return 0;
  842. tb_switch_for_each_port(sw, port) {
  843. struct usb4_port *usb4;
  844. if (!tb_port_is_null(port))
  845. continue;
  846. if (!port->cap_usb4)
  847. continue;
  848. usb4 = usb4_port_device_add(port);
  849. if (IS_ERR(usb4)) {
  850. usb4_switch_remove_ports(sw);
  851. return PTR_ERR(usb4);
  852. }
  853. port->usb4 = usb4;
  854. }
  855. return 0;
  856. }
  857. /**
  858. * usb4_switch_remove_ports() - Removes USB4 ports from this router
  859. * @sw: USB4 router
  860. *
  861. * Unregisters previously registered USB4 ports.
  862. */
  863. void usb4_switch_remove_ports(struct tb_switch *sw)
  864. {
  865. struct tb_port *port;
  866. tb_switch_for_each_port(sw, port) {
  867. if (port->usb4) {
  868. usb4_port_device_remove(port->usb4);
  869. port->usb4 = NULL;
  870. }
  871. }
  872. }
  873. /**
  874. * usb4_port_unlock() - Unlock USB4 downstream port
  875. * @port: USB4 port to unlock
  876. *
  877. * Unlocks USB4 downstream port so that the connection manager can
  878. * access the router below this port.
  879. */
  880. int usb4_port_unlock(struct tb_port *port)
  881. {
  882. int ret;
  883. u32 val;
  884. ret = tb_port_read(port, &val, TB_CFG_PORT, ADP_CS_4, 1);
  885. if (ret)
  886. return ret;
  887. val &= ~ADP_CS_4_LCK;
  888. return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_4, 1);
  889. }
  890. /**
  891. * usb4_port_hotplug_enable() - Enables hotplug for a port
  892. * @port: USB4 port to operate on
  893. *
  894. * Enables hot plug events on a given port. This is only intended
  895. * to be used on lane, DP-IN, and DP-OUT adapters.
  896. */
  897. int usb4_port_hotplug_enable(struct tb_port *port)
  898. {
  899. int ret;
  900. u32 val;
  901. ret = tb_port_read(port, &val, TB_CFG_PORT, ADP_CS_5, 1);
  902. if (ret)
  903. return ret;
  904. val &= ~ADP_CS_5_DHP;
  905. return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_5, 1);
  906. }
  907. static int usb4_port_set_configured(struct tb_port *port, bool configured)
  908. {
  909. int ret;
  910. u32 val;
  911. if (!port->cap_usb4)
  912. return -EINVAL;
  913. ret = tb_port_read(port, &val, TB_CFG_PORT,
  914. port->cap_usb4 + PORT_CS_19, 1);
  915. if (ret)
  916. return ret;
  917. if (configured)
  918. val |= PORT_CS_19_PC;
  919. else
  920. val &= ~PORT_CS_19_PC;
  921. return tb_port_write(port, &val, TB_CFG_PORT,
  922. port->cap_usb4 + PORT_CS_19, 1);
  923. }
  924. /**
  925. * usb4_port_configure() - Set USB4 port configured
  926. * @port: USB4 router
  927. *
  928. * Sets the USB4 link to be configured for power management purposes.
  929. */
  930. int usb4_port_configure(struct tb_port *port)
  931. {
  932. return usb4_port_set_configured(port, true);
  933. }
  934. /**
  935. * usb4_port_unconfigure() - Set USB4 port unconfigured
  936. * @port: USB4 router
  937. *
  938. * Sets the USB4 link to be unconfigured for power management purposes.
  939. */
  940. void usb4_port_unconfigure(struct tb_port *port)
  941. {
  942. usb4_port_set_configured(port, false);
  943. }
  944. static int usb4_set_xdomain_configured(struct tb_port *port, bool configured)
  945. {
  946. int ret;
  947. u32 val;
  948. if (!port->cap_usb4)
  949. return -EINVAL;
  950. ret = tb_port_read(port, &val, TB_CFG_PORT,
  951. port->cap_usb4 + PORT_CS_19, 1);
  952. if (ret)
  953. return ret;
  954. if (configured)
  955. val |= PORT_CS_19_PID;
  956. else
  957. val &= ~PORT_CS_19_PID;
  958. return tb_port_write(port, &val, TB_CFG_PORT,
  959. port->cap_usb4 + PORT_CS_19, 1);
  960. }
  961. /**
  962. * usb4_port_configure_xdomain() - Configure port for XDomain
  963. * @port: USB4 port connected to another host
  964. * @xd: XDomain that is connected to the port
  965. *
  966. * Marks the USB4 port as being connected to another host and updates
  967. * the link type. Returns %0 in success and negative errno in failure.
  968. */
  969. int usb4_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd)
  970. {
  971. xd->link_usb4 = link_is_usb4(port);
  972. return usb4_set_xdomain_configured(port, true);
  973. }
  974. /**
  975. * usb4_port_unconfigure_xdomain() - Unconfigure port for XDomain
  976. * @port: USB4 port that was connected to another host
  977. *
  978. * Clears USB4 port from being marked as XDomain.
  979. */
  980. void usb4_port_unconfigure_xdomain(struct tb_port *port)
  981. {
  982. usb4_set_xdomain_configured(port, false);
  983. }
  984. static int usb4_port_wait_for_bit(struct tb_port *port, u32 offset, u32 bit,
  985. u32 value, int timeout_msec)
  986. {
  987. ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
  988. do {
  989. u32 val;
  990. int ret;
  991. ret = tb_port_read(port, &val, TB_CFG_PORT, offset, 1);
  992. if (ret)
  993. return ret;
  994. if ((val & bit) == value)
  995. return 0;
  996. usleep_range(50, 100);
  997. } while (ktime_before(ktime_get(), timeout));
  998. return -ETIMEDOUT;
  999. }
  1000. static int usb4_port_read_data(struct tb_port *port, void *data, size_t dwords)
  1001. {
  1002. if (dwords > NVM_DATA_DWORDS)
  1003. return -EINVAL;
  1004. return tb_port_read(port, data, TB_CFG_PORT, port->cap_usb4 + PORT_CS_2,
  1005. dwords);
  1006. }
  1007. static int usb4_port_write_data(struct tb_port *port, const void *data,
  1008. size_t dwords)
  1009. {
  1010. if (dwords > NVM_DATA_DWORDS)
  1011. return -EINVAL;
  1012. return tb_port_write(port, data, TB_CFG_PORT, port->cap_usb4 + PORT_CS_2,
  1013. dwords);
  1014. }
  1015. static int usb4_port_sb_read(struct tb_port *port, enum usb4_sb_target target,
  1016. u8 index, u8 reg, void *buf, u8 size)
  1017. {
  1018. size_t dwords = DIV_ROUND_UP(size, 4);
  1019. int ret;
  1020. u32 val;
  1021. if (!port->cap_usb4)
  1022. return -EINVAL;
  1023. val = reg;
  1024. val |= size << PORT_CS_1_LENGTH_SHIFT;
  1025. val |= (target << PORT_CS_1_TARGET_SHIFT) & PORT_CS_1_TARGET_MASK;
  1026. if (target == USB4_SB_TARGET_RETIMER)
  1027. val |= (index << PORT_CS_1_RETIMER_INDEX_SHIFT);
  1028. val |= PORT_CS_1_PND;
  1029. ret = tb_port_write(port, &val, TB_CFG_PORT,
  1030. port->cap_usb4 + PORT_CS_1, 1);
  1031. if (ret)
  1032. return ret;
  1033. ret = usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_1,
  1034. PORT_CS_1_PND, 0, 500);
  1035. if (ret)
  1036. return ret;
  1037. ret = tb_port_read(port, &val, TB_CFG_PORT,
  1038. port->cap_usb4 + PORT_CS_1, 1);
  1039. if (ret)
  1040. return ret;
  1041. if (val & PORT_CS_1_NR)
  1042. return -ENODEV;
  1043. if (val & PORT_CS_1_RC)
  1044. return -EIO;
  1045. return buf ? usb4_port_read_data(port, buf, dwords) : 0;
  1046. }
  1047. static int usb4_port_sb_write(struct tb_port *port, enum usb4_sb_target target,
  1048. u8 index, u8 reg, const void *buf, u8 size)
  1049. {
  1050. size_t dwords = DIV_ROUND_UP(size, 4);
  1051. int ret;
  1052. u32 val;
  1053. if (!port->cap_usb4)
  1054. return -EINVAL;
  1055. if (buf) {
  1056. ret = usb4_port_write_data(port, buf, dwords);
  1057. if (ret)
  1058. return ret;
  1059. }
  1060. val = reg;
  1061. val |= size << PORT_CS_1_LENGTH_SHIFT;
  1062. val |= PORT_CS_1_WNR_WRITE;
  1063. val |= (target << PORT_CS_1_TARGET_SHIFT) & PORT_CS_1_TARGET_MASK;
  1064. if (target == USB4_SB_TARGET_RETIMER)
  1065. val |= (index << PORT_CS_1_RETIMER_INDEX_SHIFT);
  1066. val |= PORT_CS_1_PND;
  1067. ret = tb_port_write(port, &val, TB_CFG_PORT,
  1068. port->cap_usb4 + PORT_CS_1, 1);
  1069. if (ret)
  1070. return ret;
  1071. ret = usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_1,
  1072. PORT_CS_1_PND, 0, 500);
  1073. if (ret)
  1074. return ret;
  1075. ret = tb_port_read(port, &val, TB_CFG_PORT,
  1076. port->cap_usb4 + PORT_CS_1, 1);
  1077. if (ret)
  1078. return ret;
  1079. if (val & PORT_CS_1_NR)
  1080. return -ENODEV;
  1081. if (val & PORT_CS_1_RC)
  1082. return -EIO;
  1083. return 0;
  1084. }
  1085. static int usb4_port_sb_op(struct tb_port *port, enum usb4_sb_target target,
  1086. u8 index, enum usb4_sb_opcode opcode, int timeout_msec)
  1087. {
  1088. ktime_t timeout;
  1089. u32 val;
  1090. int ret;
  1091. val = opcode;
  1092. ret = usb4_port_sb_write(port, target, index, USB4_SB_OPCODE, &val,
  1093. sizeof(val));
  1094. if (ret)
  1095. return ret;
  1096. timeout = ktime_add_ms(ktime_get(), timeout_msec);
  1097. do {
  1098. /* Check results */
  1099. ret = usb4_port_sb_read(port, target, index, USB4_SB_OPCODE,
  1100. &val, sizeof(val));
  1101. if (ret)
  1102. return ret;
  1103. switch (val) {
  1104. case 0:
  1105. return 0;
  1106. case USB4_SB_OPCODE_ERR:
  1107. return -EAGAIN;
  1108. case USB4_SB_OPCODE_ONS:
  1109. return -EOPNOTSUPP;
  1110. default:
  1111. if (val != opcode)
  1112. return -EIO;
  1113. break;
  1114. }
  1115. } while (ktime_before(ktime_get(), timeout));
  1116. return -ETIMEDOUT;
  1117. }
  1118. static int usb4_port_set_router_offline(struct tb_port *port, bool offline)
  1119. {
  1120. u32 val = !offline;
  1121. int ret;
  1122. ret = usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0,
  1123. USB4_SB_METADATA, &val, sizeof(val));
  1124. if (ret)
  1125. return ret;
  1126. val = USB4_SB_OPCODE_ROUTER_OFFLINE;
  1127. return usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0,
  1128. USB4_SB_OPCODE, &val, sizeof(val));
  1129. }
  1130. /**
  1131. * usb4_port_router_offline() - Put the USB4 port to offline mode
  1132. * @port: USB4 port
  1133. *
  1134. * This function puts the USB4 port into offline mode. In this mode the
  1135. * port does not react on hotplug events anymore. This needs to be
  1136. * called before retimer access is done when the USB4 links is not up.
  1137. *
  1138. * Returns %0 in case of success and negative errno if there was an
  1139. * error.
  1140. */
  1141. int usb4_port_router_offline(struct tb_port *port)
  1142. {
  1143. return usb4_port_set_router_offline(port, true);
  1144. }
  1145. /**
  1146. * usb4_port_router_online() - Put the USB4 port back to online
  1147. * @port: USB4 port
  1148. *
  1149. * Makes the USB4 port functional again.
  1150. */
  1151. int usb4_port_router_online(struct tb_port *port)
  1152. {
  1153. return usb4_port_set_router_offline(port, false);
  1154. }
  1155. /**
  1156. * usb4_port_enumerate_retimers() - Send RT broadcast transaction
  1157. * @port: USB4 port
  1158. *
  1159. * This forces the USB4 port to send broadcast RT transaction which
  1160. * makes the retimers on the link to assign index to themselves. Returns
  1161. * %0 in case of success and negative errno if there was an error.
  1162. */
  1163. int usb4_port_enumerate_retimers(struct tb_port *port)
  1164. {
  1165. u32 val;
  1166. val = USB4_SB_OPCODE_ENUMERATE_RETIMERS;
  1167. return usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0,
  1168. USB4_SB_OPCODE, &val, sizeof(val));
  1169. }
  1170. /**
  1171. * usb4_port_clx_supported() - Check if CLx is supported by the link
  1172. * @port: Port to check for CLx support for
  1173. *
  1174. * PORT_CS_18_CPS bit reflects if the link supports CLx including
  1175. * active cables (if connected on the link).
  1176. */
  1177. bool usb4_port_clx_supported(struct tb_port *port)
  1178. {
  1179. int ret;
  1180. u32 val;
  1181. ret = tb_port_read(port, &val, TB_CFG_PORT,
  1182. port->cap_usb4 + PORT_CS_18, 1);
  1183. if (ret)
  1184. return false;
  1185. return !!(val & PORT_CS_18_CPS);
  1186. }
  1187. /**
  1188. * usb4_port_margining_caps() - Read USB4 port marginig capabilities
  1189. * @port: USB4 port
  1190. * @caps: Array with at least two elements to hold the results
  1191. *
  1192. * Reads the USB4 port lane margining capabilities into @caps.
  1193. */
  1194. int usb4_port_margining_caps(struct tb_port *port, u32 *caps)
  1195. {
  1196. int ret;
  1197. ret = usb4_port_sb_op(port, USB4_SB_TARGET_ROUTER, 0,
  1198. USB4_SB_OPCODE_READ_LANE_MARGINING_CAP, 500);
  1199. if (ret)
  1200. return ret;
  1201. return usb4_port_sb_read(port, USB4_SB_TARGET_ROUTER, 0,
  1202. USB4_SB_DATA, caps, sizeof(*caps) * 2);
  1203. }
  1204. /**
  1205. * usb4_port_hw_margin() - Run hardware lane margining on port
  1206. * @port: USB4 port
  1207. * @lanes: Which lanes to run (must match the port capabilities). Can be
  1208. * %0, %1 or %7.
  1209. * @ber_level: BER level contour value
  1210. * @timing: Perform timing margining instead of voltage
  1211. * @right_high: Use Right/high margin instead of left/low
  1212. * @results: Array with at least two elements to hold the results
  1213. *
  1214. * Runs hardware lane margining on USB4 port and returns the result in
  1215. * @results.
  1216. */
  1217. int usb4_port_hw_margin(struct tb_port *port, unsigned int lanes,
  1218. unsigned int ber_level, bool timing, bool right_high,
  1219. u32 *results)
  1220. {
  1221. u32 val;
  1222. int ret;
  1223. val = lanes;
  1224. if (timing)
  1225. val |= USB4_MARGIN_HW_TIME;
  1226. if (right_high)
  1227. val |= USB4_MARGIN_HW_RH;
  1228. if (ber_level)
  1229. val |= (ber_level << USB4_MARGIN_HW_BER_SHIFT) &
  1230. USB4_MARGIN_HW_BER_MASK;
  1231. ret = usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0,
  1232. USB4_SB_METADATA, &val, sizeof(val));
  1233. if (ret)
  1234. return ret;
  1235. ret = usb4_port_sb_op(port, USB4_SB_TARGET_ROUTER, 0,
  1236. USB4_SB_OPCODE_RUN_HW_LANE_MARGINING, 2500);
  1237. if (ret)
  1238. return ret;
  1239. return usb4_port_sb_read(port, USB4_SB_TARGET_ROUTER, 0,
  1240. USB4_SB_DATA, results, sizeof(*results) * 2);
  1241. }
  1242. /**
  1243. * usb4_port_sw_margin() - Run software lane margining on port
  1244. * @port: USB4 port
  1245. * @lanes: Which lanes to run (must match the port capabilities). Can be
  1246. * %0, %1 or %7.
  1247. * @timing: Perform timing margining instead of voltage
  1248. * @right_high: Use Right/high margin instead of left/low
  1249. * @counter: What to do with the error counter
  1250. *
  1251. * Runs software lane margining on USB4 port. Read back the error
  1252. * counters by calling usb4_port_sw_margin_errors(). Returns %0 in
  1253. * success and negative errno otherwise.
  1254. */
  1255. int usb4_port_sw_margin(struct tb_port *port, unsigned int lanes, bool timing,
  1256. bool right_high, u32 counter)
  1257. {
  1258. u32 val;
  1259. int ret;
  1260. val = lanes;
  1261. if (timing)
  1262. val |= USB4_MARGIN_SW_TIME;
  1263. if (right_high)
  1264. val |= USB4_MARGIN_SW_RH;
  1265. val |= (counter << USB4_MARGIN_SW_COUNTER_SHIFT) &
  1266. USB4_MARGIN_SW_COUNTER_MASK;
  1267. ret = usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0,
  1268. USB4_SB_METADATA, &val, sizeof(val));
  1269. if (ret)
  1270. return ret;
  1271. return usb4_port_sb_op(port, USB4_SB_TARGET_ROUTER, 0,
  1272. USB4_SB_OPCODE_RUN_SW_LANE_MARGINING, 2500);
  1273. }
  1274. /**
  1275. * usb4_port_sw_margin_errors() - Read the software margining error counters
  1276. * @port: USB4 port
  1277. * @errors: Error metadata is copied here.
  1278. *
  1279. * This reads back the software margining error counters from the port.
  1280. * Returns %0 in success and negative errno otherwise.
  1281. */
  1282. int usb4_port_sw_margin_errors(struct tb_port *port, u32 *errors)
  1283. {
  1284. int ret;
  1285. ret = usb4_port_sb_op(port, USB4_SB_TARGET_ROUTER, 0,
  1286. USB4_SB_OPCODE_READ_SW_MARGIN_ERR, 150);
  1287. if (ret)
  1288. return ret;
  1289. return usb4_port_sb_read(port, USB4_SB_TARGET_ROUTER, 0,
  1290. USB4_SB_METADATA, errors, sizeof(*errors));
  1291. }
  1292. static inline int usb4_port_retimer_op(struct tb_port *port, u8 index,
  1293. enum usb4_sb_opcode opcode,
  1294. int timeout_msec)
  1295. {
  1296. return usb4_port_sb_op(port, USB4_SB_TARGET_RETIMER, index, opcode,
  1297. timeout_msec);
  1298. }
  1299. /**
  1300. * usb4_port_retimer_set_inbound_sbtx() - Enable sideband channel transactions
  1301. * @port: USB4 port
  1302. * @index: Retimer index
  1303. *
  1304. * Enables sideband channel transations on SBTX. Can be used when USB4
  1305. * link does not go up, for example if there is no device connected.
  1306. */
  1307. int usb4_port_retimer_set_inbound_sbtx(struct tb_port *port, u8 index)
  1308. {
  1309. int ret;
  1310. ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_SET_INBOUND_SBTX,
  1311. 500);
  1312. if (ret != -ENODEV)
  1313. return ret;
  1314. /*
  1315. * Per the USB4 retimer spec, the retimer is not required to
  1316. * send an RT (Retimer Transaction) response for the first
  1317. * SET_INBOUND_SBTX command
  1318. */
  1319. return usb4_port_retimer_op(port, index, USB4_SB_OPCODE_SET_INBOUND_SBTX,
  1320. 500);
  1321. }
  1322. /**
  1323. * usb4_port_retimer_unset_inbound_sbtx() - Disable sideband channel transactions
  1324. * @port: USB4 port
  1325. * @index: Retimer index
  1326. *
  1327. * Disables sideband channel transations on SBTX. The reverse of
  1328. * usb4_port_retimer_set_inbound_sbtx().
  1329. */
  1330. int usb4_port_retimer_unset_inbound_sbtx(struct tb_port *port, u8 index)
  1331. {
  1332. return usb4_port_retimer_op(port, index,
  1333. USB4_SB_OPCODE_UNSET_INBOUND_SBTX, 500);
  1334. }
  1335. /**
  1336. * usb4_port_retimer_read() - Read from retimer sideband registers
  1337. * @port: USB4 port
  1338. * @index: Retimer index
  1339. * @reg: Sideband register to read
  1340. * @buf: Data from @reg is stored here
  1341. * @size: Number of bytes to read
  1342. *
  1343. * Function reads retimer sideband registers starting from @reg. The
  1344. * retimer is connected to @port at @index. Returns %0 in case of
  1345. * success, and read data is copied to @buf. If there is no retimer
  1346. * present at given @index returns %-ENODEV. In any other failure
  1347. * returns negative errno.
  1348. */
  1349. int usb4_port_retimer_read(struct tb_port *port, u8 index, u8 reg, void *buf,
  1350. u8 size)
  1351. {
  1352. return usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index, reg, buf,
  1353. size);
  1354. }
  1355. /**
  1356. * usb4_port_retimer_write() - Write to retimer sideband registers
  1357. * @port: USB4 port
  1358. * @index: Retimer index
  1359. * @reg: Sideband register to write
  1360. * @buf: Data that is written starting from @reg
  1361. * @size: Number of bytes to write
  1362. *
  1363. * Writes retimer sideband registers starting from @reg. The retimer is
  1364. * connected to @port at @index. Returns %0 in case of success. If there
  1365. * is no retimer present at given @index returns %-ENODEV. In any other
  1366. * failure returns negative errno.
  1367. */
  1368. int usb4_port_retimer_write(struct tb_port *port, u8 index, u8 reg,
  1369. const void *buf, u8 size)
  1370. {
  1371. return usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index, reg, buf,
  1372. size);
  1373. }
  1374. /**
  1375. * usb4_port_retimer_is_last() - Is the retimer last on-board retimer
  1376. * @port: USB4 port
  1377. * @index: Retimer index
  1378. *
  1379. * If the retimer at @index is last one (connected directly to the
  1380. * Type-C port) this function returns %1. If it is not returns %0. If
  1381. * the retimer is not present returns %-ENODEV. Otherwise returns
  1382. * negative errno.
  1383. */
  1384. int usb4_port_retimer_is_last(struct tb_port *port, u8 index)
  1385. {
  1386. u32 metadata;
  1387. int ret;
  1388. ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_QUERY_LAST_RETIMER,
  1389. 500);
  1390. if (ret)
  1391. return ret;
  1392. ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA, &metadata,
  1393. sizeof(metadata));
  1394. return ret ? ret : metadata & 1;
  1395. }
  1396. /**
  1397. * usb4_port_retimer_nvm_sector_size() - Read retimer NVM sector size
  1398. * @port: USB4 port
  1399. * @index: Retimer index
  1400. *
  1401. * Reads NVM sector size (in bytes) of a retimer at @index. This
  1402. * operation can be used to determine whether the retimer supports NVM
  1403. * upgrade for example. Returns sector size in bytes or negative errno
  1404. * in case of error. Specifically returns %-ENODEV if there is no
  1405. * retimer at @index.
  1406. */
  1407. int usb4_port_retimer_nvm_sector_size(struct tb_port *port, u8 index)
  1408. {
  1409. u32 metadata;
  1410. int ret;
  1411. ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_GET_NVM_SECTOR_SIZE,
  1412. 500);
  1413. if (ret)
  1414. return ret;
  1415. ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA, &metadata,
  1416. sizeof(metadata));
  1417. return ret ? ret : metadata & USB4_NVM_SECTOR_SIZE_MASK;
  1418. }
  1419. /**
  1420. * usb4_port_retimer_nvm_set_offset() - Set NVM write offset
  1421. * @port: USB4 port
  1422. * @index: Retimer index
  1423. * @address: Start offset
  1424. *
  1425. * Exlicitly sets NVM write offset. Normally when writing to NVM this is
  1426. * done automatically by usb4_port_retimer_nvm_write().
  1427. *
  1428. * Returns %0 in success and negative errno if there was a failure.
  1429. */
  1430. int usb4_port_retimer_nvm_set_offset(struct tb_port *port, u8 index,
  1431. unsigned int address)
  1432. {
  1433. u32 metadata, dwaddress;
  1434. int ret;
  1435. dwaddress = address / 4;
  1436. metadata = (dwaddress << USB4_NVM_SET_OFFSET_SHIFT) &
  1437. USB4_NVM_SET_OFFSET_MASK;
  1438. ret = usb4_port_retimer_write(port, index, USB4_SB_METADATA, &metadata,
  1439. sizeof(metadata));
  1440. if (ret)
  1441. return ret;
  1442. return usb4_port_retimer_op(port, index, USB4_SB_OPCODE_NVM_SET_OFFSET,
  1443. 500);
  1444. }
  1445. struct retimer_info {
  1446. struct tb_port *port;
  1447. u8 index;
  1448. };
  1449. static int usb4_port_retimer_nvm_write_next_block(void *data,
  1450. unsigned int dwaddress, const void *buf, size_t dwords)
  1451. {
  1452. const struct retimer_info *info = data;
  1453. struct tb_port *port = info->port;
  1454. u8 index = info->index;
  1455. int ret;
  1456. ret = usb4_port_retimer_write(port, index, USB4_SB_DATA,
  1457. buf, dwords * 4);
  1458. if (ret)
  1459. return ret;
  1460. return usb4_port_retimer_op(port, index,
  1461. USB4_SB_OPCODE_NVM_BLOCK_WRITE, 1000);
  1462. }
  1463. /**
  1464. * usb4_port_retimer_nvm_write() - Write to retimer NVM
  1465. * @port: USB4 port
  1466. * @index: Retimer index
  1467. * @address: Byte address where to start the write
  1468. * @buf: Data to write
  1469. * @size: Size in bytes how much to write
  1470. *
  1471. * Writes @size bytes from @buf to the retimer NVM. Used for NVM
  1472. * upgrade. Returns %0 if the data was written successfully and negative
  1473. * errno in case of failure. Specifically returns %-ENODEV if there is
  1474. * no retimer at @index.
  1475. */
  1476. int usb4_port_retimer_nvm_write(struct tb_port *port, u8 index, unsigned int address,
  1477. const void *buf, size_t size)
  1478. {
  1479. struct retimer_info info = { .port = port, .index = index };
  1480. int ret;
  1481. ret = usb4_port_retimer_nvm_set_offset(port, index, address);
  1482. if (ret)
  1483. return ret;
  1484. return tb_nvm_write_data(address, buf, size, USB4_DATA_RETRIES,
  1485. usb4_port_retimer_nvm_write_next_block, &info);
  1486. }
  1487. /**
  1488. * usb4_port_retimer_nvm_authenticate() - Start retimer NVM upgrade
  1489. * @port: USB4 port
  1490. * @index: Retimer index
  1491. *
  1492. * After the new NVM image has been written via usb4_port_retimer_nvm_write()
  1493. * this function can be used to trigger the NVM upgrade process. If
  1494. * successful the retimer restarts with the new NVM and may not have the
  1495. * index set so one needs to call usb4_port_enumerate_retimers() to
  1496. * force index to be assigned.
  1497. */
  1498. int usb4_port_retimer_nvm_authenticate(struct tb_port *port, u8 index)
  1499. {
  1500. u32 val;
  1501. /*
  1502. * We need to use the raw operation here because once the
  1503. * authentication completes the retimer index is not set anymore
  1504. * so we do not get back the status now.
  1505. */
  1506. val = USB4_SB_OPCODE_NVM_AUTH_WRITE;
  1507. return usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index,
  1508. USB4_SB_OPCODE, &val, sizeof(val));
  1509. }
  1510. /**
  1511. * usb4_port_retimer_nvm_authenticate_status() - Read status of NVM upgrade
  1512. * @port: USB4 port
  1513. * @index: Retimer index
  1514. * @status: Raw status code read from metadata
  1515. *
  1516. * This can be called after usb4_port_retimer_nvm_authenticate() and
  1517. * usb4_port_enumerate_retimers() to fetch status of the NVM upgrade.
  1518. *
  1519. * Returns %0 if the authentication status was successfully read. The
  1520. * completion metadata (the result) is then stored into @status. If
  1521. * reading the status fails, returns negative errno.
  1522. */
  1523. int usb4_port_retimer_nvm_authenticate_status(struct tb_port *port, u8 index,
  1524. u32 *status)
  1525. {
  1526. u32 metadata, val;
  1527. int ret;
  1528. ret = usb4_port_retimer_read(port, index, USB4_SB_OPCODE, &val,
  1529. sizeof(val));
  1530. if (ret)
  1531. return ret;
  1532. switch (val) {
  1533. case 0:
  1534. *status = 0;
  1535. return 0;
  1536. case USB4_SB_OPCODE_ERR:
  1537. ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA,
  1538. &metadata, sizeof(metadata));
  1539. if (ret)
  1540. return ret;
  1541. *status = metadata & USB4_SB_METADATA_NVM_AUTH_WRITE_MASK;
  1542. return 0;
  1543. case USB4_SB_OPCODE_ONS:
  1544. return -EOPNOTSUPP;
  1545. default:
  1546. return -EIO;
  1547. }
  1548. }
  1549. static int usb4_port_retimer_nvm_read_block(void *data, unsigned int dwaddress,
  1550. void *buf, size_t dwords)
  1551. {
  1552. const struct retimer_info *info = data;
  1553. struct tb_port *port = info->port;
  1554. u8 index = info->index;
  1555. u32 metadata;
  1556. int ret;
  1557. metadata = dwaddress << USB4_NVM_READ_OFFSET_SHIFT;
  1558. if (dwords < NVM_DATA_DWORDS)
  1559. metadata |= dwords << USB4_NVM_READ_LENGTH_SHIFT;
  1560. ret = usb4_port_retimer_write(port, index, USB4_SB_METADATA, &metadata,
  1561. sizeof(metadata));
  1562. if (ret)
  1563. return ret;
  1564. ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_NVM_READ, 500);
  1565. if (ret)
  1566. return ret;
  1567. return usb4_port_retimer_read(port, index, USB4_SB_DATA, buf,
  1568. dwords * 4);
  1569. }
  1570. /**
  1571. * usb4_port_retimer_nvm_read() - Read contents of retimer NVM
  1572. * @port: USB4 port
  1573. * @index: Retimer index
  1574. * @address: NVM address (in bytes) to start reading
  1575. * @buf: Data read from NVM is stored here
  1576. * @size: Number of bytes to read
  1577. *
  1578. * Reads retimer NVM and copies the contents to @buf. Returns %0 if the
  1579. * read was successful and negative errno in case of failure.
  1580. * Specifically returns %-ENODEV if there is no retimer at @index.
  1581. */
  1582. int usb4_port_retimer_nvm_read(struct tb_port *port, u8 index,
  1583. unsigned int address, void *buf, size_t size)
  1584. {
  1585. struct retimer_info info = { .port = port, .index = index };
  1586. return tb_nvm_read_data(address, buf, size, USB4_DATA_RETRIES,
  1587. usb4_port_retimer_nvm_read_block, &info);
  1588. }
  1589. static inline unsigned int
  1590. usb4_usb3_port_max_bandwidth(const struct tb_port *port, unsigned int bw)
  1591. {
  1592. /* Take the possible bandwidth limitation into account */
  1593. if (port->max_bw)
  1594. return min(bw, port->max_bw);
  1595. return bw;
  1596. }
  1597. /**
  1598. * usb4_usb3_port_max_link_rate() - Maximum support USB3 link rate
  1599. * @port: USB3 adapter port
  1600. *
  1601. * Return maximum supported link rate of a USB3 adapter in Mb/s.
  1602. * Negative errno in case of error.
  1603. */
  1604. int usb4_usb3_port_max_link_rate(struct tb_port *port)
  1605. {
  1606. int ret, lr;
  1607. u32 val;
  1608. if (!tb_port_is_usb3_down(port) && !tb_port_is_usb3_up(port))
  1609. return -EINVAL;
  1610. ret = tb_port_read(port, &val, TB_CFG_PORT,
  1611. port->cap_adap + ADP_USB3_CS_4, 1);
  1612. if (ret)
  1613. return ret;
  1614. lr = (val & ADP_USB3_CS_4_MSLR_MASK) >> ADP_USB3_CS_4_MSLR_SHIFT;
  1615. ret = lr == ADP_USB3_CS_4_MSLR_20G ? 20000 : 10000;
  1616. return usb4_usb3_port_max_bandwidth(port, ret);
  1617. }
  1618. /**
  1619. * usb4_usb3_port_actual_link_rate() - Established USB3 link rate
  1620. * @port: USB3 adapter port
  1621. *
  1622. * Return actual established link rate of a USB3 adapter in Mb/s. If the
  1623. * link is not up returns %0 and negative errno in case of failure.
  1624. */
  1625. int usb4_usb3_port_actual_link_rate(struct tb_port *port)
  1626. {
  1627. int ret, lr;
  1628. u32 val;
  1629. if (!tb_port_is_usb3_down(port) && !tb_port_is_usb3_up(port))
  1630. return -EINVAL;
  1631. ret = tb_port_read(port, &val, TB_CFG_PORT,
  1632. port->cap_adap + ADP_USB3_CS_4, 1);
  1633. if (ret)
  1634. return ret;
  1635. if (!(val & ADP_USB3_CS_4_ULV))
  1636. return 0;
  1637. lr = val & ADP_USB3_CS_4_ALR_MASK;
  1638. ret = lr == ADP_USB3_CS_4_ALR_20G ? 20000 : 10000;
  1639. return usb4_usb3_port_max_bandwidth(port, ret);
  1640. }
  1641. static int usb4_usb3_port_cm_request(struct tb_port *port, bool request)
  1642. {
  1643. int ret;
  1644. u32 val;
  1645. if (!tb_port_is_usb3_down(port))
  1646. return -EINVAL;
  1647. if (tb_route(port->sw))
  1648. return -EINVAL;
  1649. ret = tb_port_read(port, &val, TB_CFG_PORT,
  1650. port->cap_adap + ADP_USB3_CS_2, 1);
  1651. if (ret)
  1652. return ret;
  1653. if (request)
  1654. val |= ADP_USB3_CS_2_CMR;
  1655. else
  1656. val &= ~ADP_USB3_CS_2_CMR;
  1657. ret = tb_port_write(port, &val, TB_CFG_PORT,
  1658. port->cap_adap + ADP_USB3_CS_2, 1);
  1659. if (ret)
  1660. return ret;
  1661. /*
  1662. * We can use val here directly as the CMR bit is in the same place
  1663. * as HCA. Just mask out others.
  1664. */
  1665. val &= ADP_USB3_CS_2_CMR;
  1666. return usb4_port_wait_for_bit(port, port->cap_adap + ADP_USB3_CS_1,
  1667. ADP_USB3_CS_1_HCA, val, 1500);
  1668. }
  1669. static inline int usb4_usb3_port_set_cm_request(struct tb_port *port)
  1670. {
  1671. return usb4_usb3_port_cm_request(port, true);
  1672. }
  1673. static inline int usb4_usb3_port_clear_cm_request(struct tb_port *port)
  1674. {
  1675. return usb4_usb3_port_cm_request(port, false);
  1676. }
  1677. static unsigned int usb3_bw_to_mbps(u32 bw, u8 scale)
  1678. {
  1679. unsigned long uframes;
  1680. uframes = bw * 512UL << scale;
  1681. return DIV_ROUND_CLOSEST(uframes * 8000, 1000 * 1000);
  1682. }
  1683. static u32 mbps_to_usb3_bw(unsigned int mbps, u8 scale)
  1684. {
  1685. unsigned long uframes;
  1686. /* 1 uframe is 1/8 ms (125 us) -> 1 / 8000 s */
  1687. uframes = ((unsigned long)mbps * 1000 * 1000) / 8000;
  1688. return DIV_ROUND_UP(uframes, 512UL << scale);
  1689. }
  1690. static int usb4_usb3_port_read_allocated_bandwidth(struct tb_port *port,
  1691. int *upstream_bw,
  1692. int *downstream_bw)
  1693. {
  1694. u32 val, bw, scale;
  1695. int ret;
  1696. ret = tb_port_read(port, &val, TB_CFG_PORT,
  1697. port->cap_adap + ADP_USB3_CS_2, 1);
  1698. if (ret)
  1699. return ret;
  1700. ret = tb_port_read(port, &scale, TB_CFG_PORT,
  1701. port->cap_adap + ADP_USB3_CS_3, 1);
  1702. if (ret)
  1703. return ret;
  1704. scale &= ADP_USB3_CS_3_SCALE_MASK;
  1705. bw = val & ADP_USB3_CS_2_AUBW_MASK;
  1706. *upstream_bw = usb3_bw_to_mbps(bw, scale);
  1707. bw = (val & ADP_USB3_CS_2_ADBW_MASK) >> ADP_USB3_CS_2_ADBW_SHIFT;
  1708. *downstream_bw = usb3_bw_to_mbps(bw, scale);
  1709. return 0;
  1710. }
  1711. /**
  1712. * usb4_usb3_port_allocated_bandwidth() - Bandwidth allocated for USB3
  1713. * @port: USB3 adapter port
  1714. * @upstream_bw: Allocated upstream bandwidth is stored here
  1715. * @downstream_bw: Allocated downstream bandwidth is stored here
  1716. *
  1717. * Stores currently allocated USB3 bandwidth into @upstream_bw and
  1718. * @downstream_bw in Mb/s. Returns %0 in case of success and negative
  1719. * errno in failure.
  1720. */
  1721. int usb4_usb3_port_allocated_bandwidth(struct tb_port *port, int *upstream_bw,
  1722. int *downstream_bw)
  1723. {
  1724. int ret;
  1725. ret = usb4_usb3_port_set_cm_request(port);
  1726. if (ret)
  1727. return ret;
  1728. ret = usb4_usb3_port_read_allocated_bandwidth(port, upstream_bw,
  1729. downstream_bw);
  1730. usb4_usb3_port_clear_cm_request(port);
  1731. return ret;
  1732. }
  1733. static int usb4_usb3_port_read_consumed_bandwidth(struct tb_port *port,
  1734. int *upstream_bw,
  1735. int *downstream_bw)
  1736. {
  1737. u32 val, bw, scale;
  1738. int ret;
  1739. ret = tb_port_read(port, &val, TB_CFG_PORT,
  1740. port->cap_adap + ADP_USB3_CS_1, 1);
  1741. if (ret)
  1742. return ret;
  1743. ret = tb_port_read(port, &scale, TB_CFG_PORT,
  1744. port->cap_adap + ADP_USB3_CS_3, 1);
  1745. if (ret)
  1746. return ret;
  1747. scale &= ADP_USB3_CS_3_SCALE_MASK;
  1748. bw = val & ADP_USB3_CS_1_CUBW_MASK;
  1749. *upstream_bw = usb3_bw_to_mbps(bw, scale);
  1750. bw = (val & ADP_USB3_CS_1_CDBW_MASK) >> ADP_USB3_CS_1_CDBW_SHIFT;
  1751. *downstream_bw = usb3_bw_to_mbps(bw, scale);
  1752. return 0;
  1753. }
  1754. static int usb4_usb3_port_write_allocated_bandwidth(struct tb_port *port,
  1755. int upstream_bw,
  1756. int downstream_bw)
  1757. {
  1758. u32 val, ubw, dbw, scale;
  1759. int ret, max_bw;
  1760. /* Figure out suitable scale */
  1761. scale = 0;
  1762. max_bw = max(upstream_bw, downstream_bw);
  1763. while (scale < 64) {
  1764. if (mbps_to_usb3_bw(max_bw, scale) < 4096)
  1765. break;
  1766. scale++;
  1767. }
  1768. if (WARN_ON(scale >= 64))
  1769. return -EINVAL;
  1770. ret = tb_port_write(port, &scale, TB_CFG_PORT,
  1771. port->cap_adap + ADP_USB3_CS_3, 1);
  1772. if (ret)
  1773. return ret;
  1774. ubw = mbps_to_usb3_bw(upstream_bw, scale);
  1775. dbw = mbps_to_usb3_bw(downstream_bw, scale);
  1776. tb_port_dbg(port, "scaled bandwidth %u/%u, scale %u\n", ubw, dbw, scale);
  1777. ret = tb_port_read(port, &val, TB_CFG_PORT,
  1778. port->cap_adap + ADP_USB3_CS_2, 1);
  1779. if (ret)
  1780. return ret;
  1781. val &= ~(ADP_USB3_CS_2_AUBW_MASK | ADP_USB3_CS_2_ADBW_MASK);
  1782. val |= dbw << ADP_USB3_CS_2_ADBW_SHIFT;
  1783. val |= ubw;
  1784. return tb_port_write(port, &val, TB_CFG_PORT,
  1785. port->cap_adap + ADP_USB3_CS_2, 1);
  1786. }
  1787. /**
  1788. * usb4_usb3_port_allocate_bandwidth() - Allocate bandwidth for USB3
  1789. * @port: USB3 adapter port
  1790. * @upstream_bw: New upstream bandwidth
  1791. * @downstream_bw: New downstream bandwidth
  1792. *
  1793. * This can be used to set how much bandwidth is allocated for the USB3
  1794. * tunneled isochronous traffic. @upstream_bw and @downstream_bw are the
  1795. * new values programmed to the USB3 adapter allocation registers. If
  1796. * the values are lower than what is currently consumed the allocation
  1797. * is set to what is currently consumed instead (consumed bandwidth
  1798. * cannot be taken away by CM). The actual new values are returned in
  1799. * @upstream_bw and @downstream_bw.
  1800. *
  1801. * Returns %0 in case of success and negative errno if there was a
  1802. * failure.
  1803. */
  1804. int usb4_usb3_port_allocate_bandwidth(struct tb_port *port, int *upstream_bw,
  1805. int *downstream_bw)
  1806. {
  1807. int ret, consumed_up, consumed_down, allocate_up, allocate_down;
  1808. ret = usb4_usb3_port_set_cm_request(port);
  1809. if (ret)
  1810. return ret;
  1811. ret = usb4_usb3_port_read_consumed_bandwidth(port, &consumed_up,
  1812. &consumed_down);
  1813. if (ret)
  1814. goto err_request;
  1815. /* Don't allow it go lower than what is consumed */
  1816. allocate_up = max(*upstream_bw, consumed_up);
  1817. allocate_down = max(*downstream_bw, consumed_down);
  1818. ret = usb4_usb3_port_write_allocated_bandwidth(port, allocate_up,
  1819. allocate_down);
  1820. if (ret)
  1821. goto err_request;
  1822. *upstream_bw = allocate_up;
  1823. *downstream_bw = allocate_down;
  1824. err_request:
  1825. usb4_usb3_port_clear_cm_request(port);
  1826. return ret;
  1827. }
  1828. /**
  1829. * usb4_usb3_port_release_bandwidth() - Release allocated USB3 bandwidth
  1830. * @port: USB3 adapter port
  1831. * @upstream_bw: New allocated upstream bandwidth
  1832. * @downstream_bw: New allocated downstream bandwidth
  1833. *
  1834. * Releases USB3 allocated bandwidth down to what is actually consumed.
  1835. * The new bandwidth is returned in @upstream_bw and @downstream_bw.
  1836. *
  1837. * Returns 0% in success and negative errno in case of failure.
  1838. */
  1839. int usb4_usb3_port_release_bandwidth(struct tb_port *port, int *upstream_bw,
  1840. int *downstream_bw)
  1841. {
  1842. int ret, consumed_up, consumed_down;
  1843. ret = usb4_usb3_port_set_cm_request(port);
  1844. if (ret)
  1845. return ret;
  1846. ret = usb4_usb3_port_read_consumed_bandwidth(port, &consumed_up,
  1847. &consumed_down);
  1848. if (ret)
  1849. goto err_request;
  1850. /*
  1851. * Always keep 1000 Mb/s to make sure xHCI has at least some
  1852. * bandwidth available for isochronous traffic.
  1853. */
  1854. if (consumed_up < 1000)
  1855. consumed_up = 1000;
  1856. if (consumed_down < 1000)
  1857. consumed_down = 1000;
  1858. ret = usb4_usb3_port_write_allocated_bandwidth(port, consumed_up,
  1859. consumed_down);
  1860. if (ret)
  1861. goto err_request;
  1862. *upstream_bw = consumed_up;
  1863. *downstream_bw = consumed_down;
  1864. err_request:
  1865. usb4_usb3_port_clear_cm_request(port);
  1866. return ret;
  1867. }