tunnel.c 44 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Thunderbolt driver - Tunneling support
  4. *
  5. * Copyright (c) 2014 Andreas Noever <[email protected]>
  6. * Copyright (C) 2019, Intel Corporation
  7. */
  8. #include <linux/delay.h>
  9. #include <linux/slab.h>
  10. #include <linux/list.h>
  11. #include "tunnel.h"
  12. #include "tb.h"
  13. /* PCIe adapters use always HopID of 8 for both directions */
  14. #define TB_PCI_HOPID 8
  15. #define TB_PCI_PATH_DOWN 0
  16. #define TB_PCI_PATH_UP 1
  17. /* USB3 adapters use always HopID of 8 for both directions */
  18. #define TB_USB3_HOPID 8
  19. #define TB_USB3_PATH_DOWN 0
  20. #define TB_USB3_PATH_UP 1
  21. /* DP adapters use HopID 8 for AUX and 9 for Video */
  22. #define TB_DP_AUX_TX_HOPID 8
  23. #define TB_DP_AUX_RX_HOPID 8
  24. #define TB_DP_VIDEO_HOPID 9
  25. #define TB_DP_VIDEO_PATH_OUT 0
  26. #define TB_DP_AUX_PATH_OUT 1
  27. #define TB_DP_AUX_PATH_IN 2
  28. /* Minimum number of credits needed for PCIe path */
  29. #define TB_MIN_PCIE_CREDITS 6U
  30. /*
  31. * Number of credits we try to allocate for each DMA path if not limited
  32. * by the host router baMaxHI.
  33. */
  34. #define TB_DMA_CREDITS 14U
  35. /* Minimum number of credits for DMA path */
  36. #define TB_MIN_DMA_CREDITS 1U
  37. static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" };
  38. #define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...) \
  39. do { \
  40. struct tb_tunnel *__tunnel = (tunnel); \
  41. level(__tunnel->tb, "%llx:%x <-> %llx:%x (%s): " fmt, \
  42. tb_route(__tunnel->src_port->sw), \
  43. __tunnel->src_port->port, \
  44. tb_route(__tunnel->dst_port->sw), \
  45. __tunnel->dst_port->port, \
  46. tb_tunnel_names[__tunnel->type], \
  47. ## arg); \
  48. } while (0)
  49. #define tb_tunnel_WARN(tunnel, fmt, arg...) \
  50. __TB_TUNNEL_PRINT(tb_WARN, tunnel, fmt, ##arg)
  51. #define tb_tunnel_warn(tunnel, fmt, arg...) \
  52. __TB_TUNNEL_PRINT(tb_warn, tunnel, fmt, ##arg)
  53. #define tb_tunnel_info(tunnel, fmt, arg...) \
  54. __TB_TUNNEL_PRINT(tb_info, tunnel, fmt, ##arg)
  55. #define tb_tunnel_dbg(tunnel, fmt, arg...) \
  56. __TB_TUNNEL_PRINT(tb_dbg, tunnel, fmt, ##arg)
  57. static inline unsigned int tb_usable_credits(const struct tb_port *port)
  58. {
  59. return port->total_credits - port->ctl_credits;
  60. }
  61. /**
  62. * tb_available_credits() - Available credits for PCIe and DMA
  63. * @port: Lane adapter to check
  64. * @max_dp_streams: If non-%NULL stores maximum number of simultaneous DP
  65. * streams possible through this lane adapter
  66. */
  67. static unsigned int tb_available_credits(const struct tb_port *port,
  68. size_t *max_dp_streams)
  69. {
  70. const struct tb_switch *sw = port->sw;
  71. int credits, usb3, pcie, spare;
  72. size_t ndp;
  73. usb3 = tb_acpi_may_tunnel_usb3() ? sw->max_usb3_credits : 0;
  74. pcie = tb_acpi_may_tunnel_pcie() ? sw->max_pcie_credits : 0;
  75. if (tb_acpi_is_xdomain_allowed()) {
  76. spare = min_not_zero(sw->max_dma_credits, TB_DMA_CREDITS);
  77. /* Add some credits for potential second DMA tunnel */
  78. spare += TB_MIN_DMA_CREDITS;
  79. } else {
  80. spare = 0;
  81. }
  82. credits = tb_usable_credits(port);
  83. if (tb_acpi_may_tunnel_dp()) {
  84. /*
  85. * Maximum number of DP streams possible through the
  86. * lane adapter.
  87. */
  88. if (sw->min_dp_aux_credits + sw->min_dp_main_credits)
  89. ndp = (credits - (usb3 + pcie + spare)) /
  90. (sw->min_dp_aux_credits + sw->min_dp_main_credits);
  91. else
  92. ndp = 0;
  93. } else {
  94. ndp = 0;
  95. }
  96. credits -= ndp * (sw->min_dp_aux_credits + sw->min_dp_main_credits);
  97. credits -= usb3;
  98. if (max_dp_streams)
  99. *max_dp_streams = ndp;
  100. return credits > 0 ? credits : 0;
  101. }
  102. static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths,
  103. enum tb_tunnel_type type)
  104. {
  105. struct tb_tunnel *tunnel;
  106. tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
  107. if (!tunnel)
  108. return NULL;
  109. tunnel->paths = kcalloc(npaths, sizeof(tunnel->paths[0]), GFP_KERNEL);
  110. if (!tunnel->paths) {
  111. tb_tunnel_free(tunnel);
  112. return NULL;
  113. }
  114. INIT_LIST_HEAD(&tunnel->list);
  115. tunnel->tb = tb;
  116. tunnel->npaths = npaths;
  117. tunnel->type = type;
  118. return tunnel;
  119. }
  120. static int tb_pci_activate(struct tb_tunnel *tunnel, bool activate)
  121. {
  122. int res;
  123. res = tb_pci_port_enable(tunnel->src_port, activate);
  124. if (res)
  125. return res;
  126. if (tb_port_is_pcie_up(tunnel->dst_port))
  127. return tb_pci_port_enable(tunnel->dst_port, activate);
  128. return 0;
  129. }
  130. static int tb_pci_init_credits(struct tb_path_hop *hop)
  131. {
  132. struct tb_port *port = hop->in_port;
  133. struct tb_switch *sw = port->sw;
  134. unsigned int credits;
  135. if (tb_port_use_credit_allocation(port)) {
  136. unsigned int available;
  137. available = tb_available_credits(port, NULL);
  138. credits = min(sw->max_pcie_credits, available);
  139. if (credits < TB_MIN_PCIE_CREDITS)
  140. return -ENOSPC;
  141. credits = max(TB_MIN_PCIE_CREDITS, credits);
  142. } else {
  143. if (tb_port_is_null(port))
  144. credits = port->bonded ? 32 : 16;
  145. else
  146. credits = 7;
  147. }
  148. hop->initial_credits = credits;
  149. return 0;
  150. }
  151. static int tb_pci_init_path(struct tb_path *path)
  152. {
  153. struct tb_path_hop *hop;
  154. path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
  155. path->egress_shared_buffer = TB_PATH_NONE;
  156. path->ingress_fc_enable = TB_PATH_ALL;
  157. path->ingress_shared_buffer = TB_PATH_NONE;
  158. path->priority = 3;
  159. path->weight = 1;
  160. path->drop_packages = 0;
  161. tb_path_for_each_hop(path, hop) {
  162. int ret;
  163. ret = tb_pci_init_credits(hop);
  164. if (ret)
  165. return ret;
  166. }
  167. return 0;
  168. }
  169. /**
  170. * tb_tunnel_discover_pci() - Discover existing PCIe tunnels
  171. * @tb: Pointer to the domain structure
  172. * @down: PCIe downstream adapter
  173. * @alloc_hopid: Allocate HopIDs from visited ports
  174. *
  175. * If @down adapter is active, follows the tunnel to the PCIe upstream
  176. * adapter and back. Returns the discovered tunnel or %NULL if there was
  177. * no tunnel.
  178. */
  179. struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down,
  180. bool alloc_hopid)
  181. {
  182. struct tb_tunnel *tunnel;
  183. struct tb_path *path;
  184. if (!tb_pci_port_is_enabled(down))
  185. return NULL;
  186. tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
  187. if (!tunnel)
  188. return NULL;
  189. tunnel->activate = tb_pci_activate;
  190. tunnel->src_port = down;
  191. /*
  192. * Discover both paths even if they are not complete. We will
  193. * clean them up by calling tb_tunnel_deactivate() below in that
  194. * case.
  195. */
  196. path = tb_path_discover(down, TB_PCI_HOPID, NULL, -1,
  197. &tunnel->dst_port, "PCIe Up", alloc_hopid);
  198. if (!path) {
  199. /* Just disable the downstream port */
  200. tb_pci_port_enable(down, false);
  201. goto err_free;
  202. }
  203. tunnel->paths[TB_PCI_PATH_UP] = path;
  204. if (tb_pci_init_path(tunnel->paths[TB_PCI_PATH_UP]))
  205. goto err_free;
  206. path = tb_path_discover(tunnel->dst_port, -1, down, TB_PCI_HOPID, NULL,
  207. "PCIe Down", alloc_hopid);
  208. if (!path)
  209. goto err_deactivate;
  210. tunnel->paths[TB_PCI_PATH_DOWN] = path;
  211. if (tb_pci_init_path(tunnel->paths[TB_PCI_PATH_DOWN]))
  212. goto err_deactivate;
  213. /* Validate that the tunnel is complete */
  214. if (!tb_port_is_pcie_up(tunnel->dst_port)) {
  215. tb_port_warn(tunnel->dst_port,
  216. "path does not end on a PCIe adapter, cleaning up\n");
  217. goto err_deactivate;
  218. }
  219. if (down != tunnel->src_port) {
  220. tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
  221. goto err_deactivate;
  222. }
  223. if (!tb_pci_port_is_enabled(tunnel->dst_port)) {
  224. tb_tunnel_warn(tunnel,
  225. "tunnel is not fully activated, cleaning up\n");
  226. goto err_deactivate;
  227. }
  228. tb_tunnel_dbg(tunnel, "discovered\n");
  229. return tunnel;
  230. err_deactivate:
  231. tb_tunnel_deactivate(tunnel);
  232. err_free:
  233. tb_tunnel_free(tunnel);
  234. return NULL;
  235. }
  236. /**
  237. * tb_tunnel_alloc_pci() - allocate a pci tunnel
  238. * @tb: Pointer to the domain structure
  239. * @up: PCIe upstream adapter port
  240. * @down: PCIe downstream adapter port
  241. *
  242. * Allocate a PCI tunnel. The ports must be of type TB_TYPE_PCIE_UP and
  243. * TB_TYPE_PCIE_DOWN.
  244. *
  245. * Return: Returns a tb_tunnel on success or NULL on failure.
  246. */
  247. struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
  248. struct tb_port *down)
  249. {
  250. struct tb_tunnel *tunnel;
  251. struct tb_path *path;
  252. tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
  253. if (!tunnel)
  254. return NULL;
  255. tunnel->activate = tb_pci_activate;
  256. tunnel->src_port = down;
  257. tunnel->dst_port = up;
  258. path = tb_path_alloc(tb, down, TB_PCI_HOPID, up, TB_PCI_HOPID, 0,
  259. "PCIe Down");
  260. if (!path)
  261. goto err_free;
  262. tunnel->paths[TB_PCI_PATH_DOWN] = path;
  263. if (tb_pci_init_path(path))
  264. goto err_free;
  265. path = tb_path_alloc(tb, up, TB_PCI_HOPID, down, TB_PCI_HOPID, 0,
  266. "PCIe Up");
  267. if (!path)
  268. goto err_free;
  269. tunnel->paths[TB_PCI_PATH_UP] = path;
  270. if (tb_pci_init_path(path))
  271. goto err_free;
  272. return tunnel;
  273. err_free:
  274. tb_tunnel_free(tunnel);
  275. return NULL;
  276. }
  277. static bool tb_dp_is_usb4(const struct tb_switch *sw)
  278. {
  279. /* Titan Ridge DP adapters need the same treatment as USB4 */
  280. return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw);
  281. }
  282. static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out)
  283. {
  284. int timeout = 10;
  285. u32 val;
  286. int ret;
  287. /* Both ends need to support this */
  288. if (!tb_dp_is_usb4(in->sw) || !tb_dp_is_usb4(out->sw))
  289. return 0;
  290. ret = tb_port_read(out, &val, TB_CFG_PORT,
  291. out->cap_adap + DP_STATUS_CTRL, 1);
  292. if (ret)
  293. return ret;
  294. val |= DP_STATUS_CTRL_UF | DP_STATUS_CTRL_CMHS;
  295. ret = tb_port_write(out, &val, TB_CFG_PORT,
  296. out->cap_adap + DP_STATUS_CTRL, 1);
  297. if (ret)
  298. return ret;
  299. do {
  300. ret = tb_port_read(out, &val, TB_CFG_PORT,
  301. out->cap_adap + DP_STATUS_CTRL, 1);
  302. if (ret)
  303. return ret;
  304. if (!(val & DP_STATUS_CTRL_CMHS))
  305. return 0;
  306. usleep_range(10, 100);
  307. } while (timeout--);
  308. return -ETIMEDOUT;
  309. }
  310. static inline u32 tb_dp_cap_get_rate(u32 val)
  311. {
  312. u32 rate = (val & DP_COMMON_CAP_RATE_MASK) >> DP_COMMON_CAP_RATE_SHIFT;
  313. switch (rate) {
  314. case DP_COMMON_CAP_RATE_RBR:
  315. return 1620;
  316. case DP_COMMON_CAP_RATE_HBR:
  317. return 2700;
  318. case DP_COMMON_CAP_RATE_HBR2:
  319. return 5400;
  320. case DP_COMMON_CAP_RATE_HBR3:
  321. return 8100;
  322. default:
  323. return 0;
  324. }
  325. }
  326. static inline u32 tb_dp_cap_set_rate(u32 val, u32 rate)
  327. {
  328. val &= ~DP_COMMON_CAP_RATE_MASK;
  329. switch (rate) {
  330. default:
  331. WARN(1, "invalid rate %u passed, defaulting to 1620 MB/s\n", rate);
  332. fallthrough;
  333. case 1620:
  334. val |= DP_COMMON_CAP_RATE_RBR << DP_COMMON_CAP_RATE_SHIFT;
  335. break;
  336. case 2700:
  337. val |= DP_COMMON_CAP_RATE_HBR << DP_COMMON_CAP_RATE_SHIFT;
  338. break;
  339. case 5400:
  340. val |= DP_COMMON_CAP_RATE_HBR2 << DP_COMMON_CAP_RATE_SHIFT;
  341. break;
  342. case 8100:
  343. val |= DP_COMMON_CAP_RATE_HBR3 << DP_COMMON_CAP_RATE_SHIFT;
  344. break;
  345. }
  346. return val;
  347. }
  348. static inline u32 tb_dp_cap_get_lanes(u32 val)
  349. {
  350. u32 lanes = (val & DP_COMMON_CAP_LANES_MASK) >> DP_COMMON_CAP_LANES_SHIFT;
  351. switch (lanes) {
  352. case DP_COMMON_CAP_1_LANE:
  353. return 1;
  354. case DP_COMMON_CAP_2_LANES:
  355. return 2;
  356. case DP_COMMON_CAP_4_LANES:
  357. return 4;
  358. default:
  359. return 0;
  360. }
  361. }
  362. static inline u32 tb_dp_cap_set_lanes(u32 val, u32 lanes)
  363. {
  364. val &= ~DP_COMMON_CAP_LANES_MASK;
  365. switch (lanes) {
  366. default:
  367. WARN(1, "invalid number of lanes %u passed, defaulting to 1\n",
  368. lanes);
  369. fallthrough;
  370. case 1:
  371. val |= DP_COMMON_CAP_1_LANE << DP_COMMON_CAP_LANES_SHIFT;
  372. break;
  373. case 2:
  374. val |= DP_COMMON_CAP_2_LANES << DP_COMMON_CAP_LANES_SHIFT;
  375. break;
  376. case 4:
  377. val |= DP_COMMON_CAP_4_LANES << DP_COMMON_CAP_LANES_SHIFT;
  378. break;
  379. }
  380. return val;
  381. }
  382. static unsigned int tb_dp_bandwidth(unsigned int rate, unsigned int lanes)
  383. {
  384. /* Tunneling removes the DP 8b/10b encoding */
  385. return rate * lanes * 8 / 10;
  386. }
  387. static int tb_dp_reduce_bandwidth(int max_bw, u32 in_rate, u32 in_lanes,
  388. u32 out_rate, u32 out_lanes, u32 *new_rate,
  389. u32 *new_lanes)
  390. {
  391. static const u32 dp_bw[][2] = {
  392. /* Mb/s, lanes */
  393. { 8100, 4 }, /* 25920 Mb/s */
  394. { 5400, 4 }, /* 17280 Mb/s */
  395. { 8100, 2 }, /* 12960 Mb/s */
  396. { 2700, 4 }, /* 8640 Mb/s */
  397. { 5400, 2 }, /* 8640 Mb/s */
  398. { 8100, 1 }, /* 6480 Mb/s */
  399. { 1620, 4 }, /* 5184 Mb/s */
  400. { 5400, 1 }, /* 4320 Mb/s */
  401. { 2700, 2 }, /* 4320 Mb/s */
  402. { 1620, 2 }, /* 2592 Mb/s */
  403. { 2700, 1 }, /* 2160 Mb/s */
  404. { 1620, 1 }, /* 1296 Mb/s */
  405. };
  406. unsigned int i;
  407. /*
  408. * Find a combination that can fit into max_bw and does not
  409. * exceed the maximum rate and lanes supported by the DP OUT and
  410. * DP IN adapters.
  411. */
  412. for (i = 0; i < ARRAY_SIZE(dp_bw); i++) {
  413. if (dp_bw[i][0] > out_rate || dp_bw[i][1] > out_lanes)
  414. continue;
  415. if (dp_bw[i][0] > in_rate || dp_bw[i][1] > in_lanes)
  416. continue;
  417. if (tb_dp_bandwidth(dp_bw[i][0], dp_bw[i][1]) <= max_bw) {
  418. *new_rate = dp_bw[i][0];
  419. *new_lanes = dp_bw[i][1];
  420. return 0;
  421. }
  422. }
  423. return -ENOSR;
  424. }
  425. static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
  426. {
  427. u32 out_dp_cap, out_rate, out_lanes, in_dp_cap, in_rate, in_lanes, bw;
  428. struct tb_port *out = tunnel->dst_port;
  429. struct tb_port *in = tunnel->src_port;
  430. int ret, max_bw;
  431. /*
  432. * Copy DP_LOCAL_CAP register to DP_REMOTE_CAP register for
  433. * newer generation hardware.
  434. */
  435. if (in->sw->generation < 2 || out->sw->generation < 2)
  436. return 0;
  437. /*
  438. * Perform connection manager handshake between IN and OUT ports
  439. * before capabilities exchange can take place.
  440. */
  441. ret = tb_dp_cm_handshake(in, out);
  442. if (ret)
  443. return ret;
  444. /* Read both DP_LOCAL_CAP registers */
  445. ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT,
  446. in->cap_adap + DP_LOCAL_CAP, 1);
  447. if (ret)
  448. return ret;
  449. ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT,
  450. out->cap_adap + DP_LOCAL_CAP, 1);
  451. if (ret)
  452. return ret;
  453. /* Write IN local caps to OUT remote caps */
  454. ret = tb_port_write(out, &in_dp_cap, TB_CFG_PORT,
  455. out->cap_adap + DP_REMOTE_CAP, 1);
  456. if (ret)
  457. return ret;
  458. in_rate = tb_dp_cap_get_rate(in_dp_cap);
  459. in_lanes = tb_dp_cap_get_lanes(in_dp_cap);
  460. tb_port_dbg(in, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
  461. in_rate, in_lanes, tb_dp_bandwidth(in_rate, in_lanes));
  462. /*
  463. * If the tunnel bandwidth is limited (max_bw is set) then see
  464. * if we need to reduce bandwidth to fit there.
  465. */
  466. out_rate = tb_dp_cap_get_rate(out_dp_cap);
  467. out_lanes = tb_dp_cap_get_lanes(out_dp_cap);
  468. bw = tb_dp_bandwidth(out_rate, out_lanes);
  469. tb_port_dbg(out, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
  470. out_rate, out_lanes, bw);
  471. if (in->sw->config.depth < out->sw->config.depth)
  472. max_bw = tunnel->max_down;
  473. else
  474. max_bw = tunnel->max_up;
  475. if (max_bw && bw > max_bw) {
  476. u32 new_rate, new_lanes, new_bw;
  477. ret = tb_dp_reduce_bandwidth(max_bw, in_rate, in_lanes,
  478. out_rate, out_lanes, &new_rate,
  479. &new_lanes);
  480. if (ret) {
  481. tb_port_info(out, "not enough bandwidth for DP tunnel\n");
  482. return ret;
  483. }
  484. new_bw = tb_dp_bandwidth(new_rate, new_lanes);
  485. tb_port_dbg(out, "bandwidth reduced to %u Mb/s x%u = %u Mb/s\n",
  486. new_rate, new_lanes, new_bw);
  487. /*
  488. * Set new rate and number of lanes before writing it to
  489. * the IN port remote caps.
  490. */
  491. out_dp_cap = tb_dp_cap_set_rate(out_dp_cap, new_rate);
  492. out_dp_cap = tb_dp_cap_set_lanes(out_dp_cap, new_lanes);
  493. }
  494. /*
  495. * Titan Ridge does not disable AUX timers when it gets
  496. * SET_CONFIG with SET_LTTPR_MODE set. This causes problems with
  497. * DP tunneling.
  498. */
  499. if (tb_route(out->sw) && tb_switch_is_titan_ridge(out->sw)) {
  500. out_dp_cap |= DP_COMMON_CAP_LTTPR_NS;
  501. tb_port_dbg(out, "disabling LTTPR\n");
  502. }
  503. return tb_port_write(in, &out_dp_cap, TB_CFG_PORT,
  504. in->cap_adap + DP_REMOTE_CAP, 1);
  505. }
  506. static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
  507. {
  508. int ret;
  509. if (active) {
  510. struct tb_path **paths;
  511. int last;
  512. paths = tunnel->paths;
  513. last = paths[TB_DP_VIDEO_PATH_OUT]->path_length - 1;
  514. tb_dp_port_set_hops(tunnel->src_port,
  515. paths[TB_DP_VIDEO_PATH_OUT]->hops[0].in_hop_index,
  516. paths[TB_DP_AUX_PATH_OUT]->hops[0].in_hop_index,
  517. paths[TB_DP_AUX_PATH_IN]->hops[last].next_hop_index);
  518. tb_dp_port_set_hops(tunnel->dst_port,
  519. paths[TB_DP_VIDEO_PATH_OUT]->hops[last].next_hop_index,
  520. paths[TB_DP_AUX_PATH_IN]->hops[0].in_hop_index,
  521. paths[TB_DP_AUX_PATH_OUT]->hops[last].next_hop_index);
  522. } else {
  523. tb_dp_port_hpd_clear(tunnel->src_port);
  524. tb_dp_port_set_hops(tunnel->src_port, 0, 0, 0);
  525. if (tb_port_is_dpout(tunnel->dst_port))
  526. tb_dp_port_set_hops(tunnel->dst_port, 0, 0, 0);
  527. }
  528. ret = tb_dp_port_enable(tunnel->src_port, active);
  529. if (ret)
  530. return ret;
  531. if (tb_port_is_dpout(tunnel->dst_port))
  532. return tb_dp_port_enable(tunnel->dst_port, active);
  533. return 0;
  534. }
  535. static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
  536. int *consumed_down)
  537. {
  538. struct tb_port *in = tunnel->src_port;
  539. const struct tb_switch *sw = in->sw;
  540. u32 val, rate = 0, lanes = 0;
  541. int ret;
  542. if (tb_dp_is_usb4(sw)) {
  543. int timeout = 20;
  544. /*
  545. * Wait for DPRX done. Normally it should be already set
  546. * for active tunnel.
  547. */
  548. do {
  549. ret = tb_port_read(in, &val, TB_CFG_PORT,
  550. in->cap_adap + DP_COMMON_CAP, 1);
  551. if (ret)
  552. return ret;
  553. if (val & DP_COMMON_CAP_DPRX_DONE) {
  554. rate = tb_dp_cap_get_rate(val);
  555. lanes = tb_dp_cap_get_lanes(val);
  556. break;
  557. }
  558. msleep(250);
  559. } while (timeout--);
  560. if (!timeout)
  561. return -ETIMEDOUT;
  562. } else if (sw->generation >= 2) {
  563. /*
  564. * Read from the copied remote cap so that we take into
  565. * account if capabilities were reduced during exchange.
  566. */
  567. ret = tb_port_read(in, &val, TB_CFG_PORT,
  568. in->cap_adap + DP_REMOTE_CAP, 1);
  569. if (ret)
  570. return ret;
  571. rate = tb_dp_cap_get_rate(val);
  572. lanes = tb_dp_cap_get_lanes(val);
  573. } else {
  574. /* No bandwidth management for legacy devices */
  575. *consumed_up = 0;
  576. *consumed_down = 0;
  577. return 0;
  578. }
  579. if (in->sw->config.depth < tunnel->dst_port->sw->config.depth) {
  580. *consumed_up = 0;
  581. *consumed_down = tb_dp_bandwidth(rate, lanes);
  582. } else {
  583. *consumed_up = tb_dp_bandwidth(rate, lanes);
  584. *consumed_down = 0;
  585. }
  586. return 0;
  587. }
  588. static void tb_dp_init_aux_credits(struct tb_path_hop *hop)
  589. {
  590. struct tb_port *port = hop->in_port;
  591. struct tb_switch *sw = port->sw;
  592. if (tb_port_use_credit_allocation(port))
  593. hop->initial_credits = sw->min_dp_aux_credits;
  594. else
  595. hop->initial_credits = 1;
  596. }
  597. static void tb_dp_init_aux_path(struct tb_path *path)
  598. {
  599. struct tb_path_hop *hop;
  600. path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
  601. path->egress_shared_buffer = TB_PATH_NONE;
  602. path->ingress_fc_enable = TB_PATH_ALL;
  603. path->ingress_shared_buffer = TB_PATH_NONE;
  604. path->priority = 2;
  605. path->weight = 1;
  606. tb_path_for_each_hop(path, hop)
  607. tb_dp_init_aux_credits(hop);
  608. }
  609. static int tb_dp_init_video_credits(struct tb_path_hop *hop)
  610. {
  611. struct tb_port *port = hop->in_port;
  612. struct tb_switch *sw = port->sw;
  613. if (tb_port_use_credit_allocation(port)) {
  614. unsigned int nfc_credits;
  615. size_t max_dp_streams;
  616. tb_available_credits(port, &max_dp_streams);
  617. /*
  618. * Read the number of currently allocated NFC credits
  619. * from the lane adapter. Since we only use them for DP
  620. * tunneling we can use that to figure out how many DP
  621. * tunnels already go through the lane adapter.
  622. */
  623. nfc_credits = port->config.nfc_credits &
  624. ADP_CS_4_NFC_BUFFERS_MASK;
  625. if (nfc_credits / sw->min_dp_main_credits > max_dp_streams)
  626. return -ENOSPC;
  627. hop->nfc_credits = sw->min_dp_main_credits;
  628. } else {
  629. hop->nfc_credits = min(port->total_credits - 2, 12U);
  630. }
  631. return 0;
  632. }
  633. static int tb_dp_init_video_path(struct tb_path *path)
  634. {
  635. struct tb_path_hop *hop;
  636. path->egress_fc_enable = TB_PATH_NONE;
  637. path->egress_shared_buffer = TB_PATH_NONE;
  638. path->ingress_fc_enable = TB_PATH_NONE;
  639. path->ingress_shared_buffer = TB_PATH_NONE;
  640. path->priority = 1;
  641. path->weight = 1;
  642. tb_path_for_each_hop(path, hop) {
  643. int ret;
  644. ret = tb_dp_init_video_credits(hop);
  645. if (ret)
  646. return ret;
  647. }
  648. return 0;
  649. }
  650. /**
  651. * tb_tunnel_discover_dp() - Discover existing Display Port tunnels
  652. * @tb: Pointer to the domain structure
  653. * @in: DP in adapter
  654. * @alloc_hopid: Allocate HopIDs from visited ports
  655. *
  656. * If @in adapter is active, follows the tunnel to the DP out adapter
  657. * and back. Returns the discovered tunnel or %NULL if there was no
  658. * tunnel.
  659. *
  660. * Return: DP tunnel or %NULL if no tunnel found.
  661. */
  662. struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
  663. bool alloc_hopid)
  664. {
  665. struct tb_tunnel *tunnel;
  666. struct tb_port *port;
  667. struct tb_path *path;
  668. if (!tb_dp_port_is_enabled(in))
  669. return NULL;
  670. tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
  671. if (!tunnel)
  672. return NULL;
  673. tunnel->init = tb_dp_xchg_caps;
  674. tunnel->activate = tb_dp_activate;
  675. tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
  676. tunnel->src_port = in;
  677. path = tb_path_discover(in, TB_DP_VIDEO_HOPID, NULL, -1,
  678. &tunnel->dst_port, "Video", alloc_hopid);
  679. if (!path) {
  680. /* Just disable the DP IN port */
  681. tb_dp_port_enable(in, false);
  682. goto err_free;
  683. }
  684. tunnel->paths[TB_DP_VIDEO_PATH_OUT] = path;
  685. if (tb_dp_init_video_path(tunnel->paths[TB_DP_VIDEO_PATH_OUT]))
  686. goto err_free;
  687. path = tb_path_discover(in, TB_DP_AUX_TX_HOPID, NULL, -1, NULL, "AUX TX",
  688. alloc_hopid);
  689. if (!path)
  690. goto err_deactivate;
  691. tunnel->paths[TB_DP_AUX_PATH_OUT] = path;
  692. tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_OUT]);
  693. path = tb_path_discover(tunnel->dst_port, -1, in, TB_DP_AUX_RX_HOPID,
  694. &port, "AUX RX", alloc_hopid);
  695. if (!path)
  696. goto err_deactivate;
  697. tunnel->paths[TB_DP_AUX_PATH_IN] = path;
  698. tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_IN]);
  699. /* Validate that the tunnel is complete */
  700. if (!tb_port_is_dpout(tunnel->dst_port)) {
  701. tb_port_warn(in, "path does not end on a DP adapter, cleaning up\n");
  702. goto err_deactivate;
  703. }
  704. if (!tb_dp_port_is_enabled(tunnel->dst_port))
  705. goto err_deactivate;
  706. if (!tb_dp_port_hpd_is_active(tunnel->dst_port))
  707. goto err_deactivate;
  708. if (port != tunnel->src_port) {
  709. tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
  710. goto err_deactivate;
  711. }
  712. tb_tunnel_dbg(tunnel, "discovered\n");
  713. return tunnel;
  714. err_deactivate:
  715. tb_tunnel_deactivate(tunnel);
  716. err_free:
  717. tb_tunnel_free(tunnel);
  718. return NULL;
  719. }
  720. /**
  721. * tb_tunnel_alloc_dp() - allocate a Display Port tunnel
  722. * @tb: Pointer to the domain structure
  723. * @in: DP in adapter port
  724. * @out: DP out adapter port
  725. * @link_nr: Preferred lane adapter when the link is not bonded
  726. * @max_up: Maximum available upstream bandwidth for the DP tunnel (%0
  727. * if not limited)
  728. * @max_down: Maximum available downstream bandwidth for the DP tunnel
  729. * (%0 if not limited)
  730. *
  731. * Allocates a tunnel between @in and @out that is capable of tunneling
  732. * Display Port traffic.
  733. *
  734. * Return: Returns a tb_tunnel on success or NULL on failure.
  735. */
  736. struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
  737. struct tb_port *out, int link_nr,
  738. int max_up, int max_down)
  739. {
  740. struct tb_tunnel *tunnel;
  741. struct tb_path **paths;
  742. struct tb_path *path;
  743. if (WARN_ON(!in->cap_adap || !out->cap_adap))
  744. return NULL;
  745. tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
  746. if (!tunnel)
  747. return NULL;
  748. tunnel->init = tb_dp_xchg_caps;
  749. tunnel->activate = tb_dp_activate;
  750. tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
  751. tunnel->src_port = in;
  752. tunnel->dst_port = out;
  753. tunnel->max_up = max_up;
  754. tunnel->max_down = max_down;
  755. paths = tunnel->paths;
  756. path = tb_path_alloc(tb, in, TB_DP_VIDEO_HOPID, out, TB_DP_VIDEO_HOPID,
  757. link_nr, "Video");
  758. if (!path)
  759. goto err_free;
  760. tb_dp_init_video_path(path);
  761. paths[TB_DP_VIDEO_PATH_OUT] = path;
  762. path = tb_path_alloc(tb, in, TB_DP_AUX_TX_HOPID, out,
  763. TB_DP_AUX_TX_HOPID, link_nr, "AUX TX");
  764. if (!path)
  765. goto err_free;
  766. tb_dp_init_aux_path(path);
  767. paths[TB_DP_AUX_PATH_OUT] = path;
  768. path = tb_path_alloc(tb, out, TB_DP_AUX_RX_HOPID, in,
  769. TB_DP_AUX_RX_HOPID, link_nr, "AUX RX");
  770. if (!path)
  771. goto err_free;
  772. tb_dp_init_aux_path(path);
  773. paths[TB_DP_AUX_PATH_IN] = path;
  774. return tunnel;
  775. err_free:
  776. tb_tunnel_free(tunnel);
  777. return NULL;
  778. }
  779. static unsigned int tb_dma_available_credits(const struct tb_port *port)
  780. {
  781. const struct tb_switch *sw = port->sw;
  782. int credits;
  783. credits = tb_available_credits(port, NULL);
  784. if (tb_acpi_may_tunnel_pcie())
  785. credits -= sw->max_pcie_credits;
  786. credits -= port->dma_credits;
  787. return credits > 0 ? credits : 0;
  788. }
  789. static int tb_dma_reserve_credits(struct tb_path_hop *hop, unsigned int credits)
  790. {
  791. struct tb_port *port = hop->in_port;
  792. if (tb_port_use_credit_allocation(port)) {
  793. unsigned int available = tb_dma_available_credits(port);
  794. /*
  795. * Need to have at least TB_MIN_DMA_CREDITS, otherwise
  796. * DMA path cannot be established.
  797. */
  798. if (available < TB_MIN_DMA_CREDITS)
  799. return -ENOSPC;
  800. while (credits > available)
  801. credits--;
  802. tb_port_dbg(port, "reserving %u credits for DMA path\n",
  803. credits);
  804. port->dma_credits += credits;
  805. } else {
  806. if (tb_port_is_null(port))
  807. credits = port->bonded ? 14 : 6;
  808. else
  809. credits = min(port->total_credits, credits);
  810. }
  811. hop->initial_credits = credits;
  812. return 0;
  813. }
  814. /* Path from lane adapter to NHI */
  815. static int tb_dma_init_rx_path(struct tb_path *path, unsigned int credits)
  816. {
  817. struct tb_path_hop *hop;
  818. unsigned int i, tmp;
  819. path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
  820. path->ingress_fc_enable = TB_PATH_ALL;
  821. path->egress_shared_buffer = TB_PATH_NONE;
  822. path->ingress_shared_buffer = TB_PATH_NONE;
  823. path->priority = 5;
  824. path->weight = 1;
  825. path->clear_fc = true;
  826. /*
  827. * First lane adapter is the one connected to the remote host.
  828. * We don't tunnel other traffic over this link so can use all
  829. * the credits (except the ones reserved for control traffic).
  830. */
  831. hop = &path->hops[0];
  832. tmp = min(tb_usable_credits(hop->in_port), credits);
  833. hop->initial_credits = tmp;
  834. hop->in_port->dma_credits += tmp;
  835. for (i = 1; i < path->path_length; i++) {
  836. int ret;
  837. ret = tb_dma_reserve_credits(&path->hops[i], credits);
  838. if (ret)
  839. return ret;
  840. }
  841. return 0;
  842. }
  843. /* Path from NHI to lane adapter */
  844. static int tb_dma_init_tx_path(struct tb_path *path, unsigned int credits)
  845. {
  846. struct tb_path_hop *hop;
  847. path->egress_fc_enable = TB_PATH_ALL;
  848. path->ingress_fc_enable = TB_PATH_ALL;
  849. path->egress_shared_buffer = TB_PATH_NONE;
  850. path->ingress_shared_buffer = TB_PATH_NONE;
  851. path->priority = 5;
  852. path->weight = 1;
  853. path->clear_fc = true;
  854. tb_path_for_each_hop(path, hop) {
  855. int ret;
  856. ret = tb_dma_reserve_credits(hop, credits);
  857. if (ret)
  858. return ret;
  859. }
  860. return 0;
  861. }
  862. static void tb_dma_release_credits(struct tb_path_hop *hop)
  863. {
  864. struct tb_port *port = hop->in_port;
  865. if (tb_port_use_credit_allocation(port)) {
  866. port->dma_credits -= hop->initial_credits;
  867. tb_port_dbg(port, "released %u DMA path credits\n",
  868. hop->initial_credits);
  869. }
  870. }
  871. static void tb_dma_deinit_path(struct tb_path *path)
  872. {
  873. struct tb_path_hop *hop;
  874. tb_path_for_each_hop(path, hop)
  875. tb_dma_release_credits(hop);
  876. }
  877. static void tb_dma_deinit(struct tb_tunnel *tunnel)
  878. {
  879. int i;
  880. for (i = 0; i < tunnel->npaths; i++) {
  881. if (!tunnel->paths[i])
  882. continue;
  883. tb_dma_deinit_path(tunnel->paths[i]);
  884. }
  885. }
  886. /**
  887. * tb_tunnel_alloc_dma() - allocate a DMA tunnel
  888. * @tb: Pointer to the domain structure
  889. * @nhi: Host controller port
  890. * @dst: Destination null port which the other domain is connected to
  891. * @transmit_path: HopID used for transmitting packets
  892. * @transmit_ring: NHI ring number used to send packets towards the
  893. * other domain. Set to %-1 if TX path is not needed.
  894. * @receive_path: HopID used for receiving packets
  895. * @receive_ring: NHI ring number used to receive packets from the
  896. * other domain. Set to %-1 if RX path is not needed.
  897. *
  898. * Return: Returns a tb_tunnel on success or NULL on failure.
  899. */
  900. struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
  901. struct tb_port *dst, int transmit_path,
  902. int transmit_ring, int receive_path,
  903. int receive_ring)
  904. {
  905. struct tb_tunnel *tunnel;
  906. size_t npaths = 0, i = 0;
  907. struct tb_path *path;
  908. int credits;
  909. if (receive_ring > 0)
  910. npaths++;
  911. if (transmit_ring > 0)
  912. npaths++;
  913. if (WARN_ON(!npaths))
  914. return NULL;
  915. tunnel = tb_tunnel_alloc(tb, npaths, TB_TUNNEL_DMA);
  916. if (!tunnel)
  917. return NULL;
  918. tunnel->src_port = nhi;
  919. tunnel->dst_port = dst;
  920. tunnel->deinit = tb_dma_deinit;
  921. credits = min_not_zero(TB_DMA_CREDITS, nhi->sw->max_dma_credits);
  922. if (receive_ring > 0) {
  923. path = tb_path_alloc(tb, dst, receive_path, nhi, receive_ring, 0,
  924. "DMA RX");
  925. if (!path)
  926. goto err_free;
  927. tunnel->paths[i++] = path;
  928. if (tb_dma_init_rx_path(path, credits)) {
  929. tb_tunnel_dbg(tunnel, "not enough buffers for RX path\n");
  930. goto err_free;
  931. }
  932. }
  933. if (transmit_ring > 0) {
  934. path = tb_path_alloc(tb, nhi, transmit_ring, dst, transmit_path, 0,
  935. "DMA TX");
  936. if (!path)
  937. goto err_free;
  938. tunnel->paths[i++] = path;
  939. if (tb_dma_init_tx_path(path, credits)) {
  940. tb_tunnel_dbg(tunnel, "not enough buffers for TX path\n");
  941. goto err_free;
  942. }
  943. }
  944. return tunnel;
  945. err_free:
  946. tb_tunnel_free(tunnel);
  947. return NULL;
  948. }
  949. /**
  950. * tb_tunnel_match_dma() - Match DMA tunnel
  951. * @tunnel: Tunnel to match
  952. * @transmit_path: HopID used for transmitting packets. Pass %-1 to ignore.
  953. * @transmit_ring: NHI ring number used to send packets towards the
  954. * other domain. Pass %-1 to ignore.
  955. * @receive_path: HopID used for receiving packets. Pass %-1 to ignore.
  956. * @receive_ring: NHI ring number used to receive packets from the
  957. * other domain. Pass %-1 to ignore.
  958. *
  959. * This function can be used to match specific DMA tunnel, if there are
  960. * multiple DMA tunnels going through the same XDomain connection.
  961. * Returns true if there is match and false otherwise.
  962. */
  963. bool tb_tunnel_match_dma(const struct tb_tunnel *tunnel, int transmit_path,
  964. int transmit_ring, int receive_path, int receive_ring)
  965. {
  966. const struct tb_path *tx_path = NULL, *rx_path = NULL;
  967. int i;
  968. if (!receive_ring || !transmit_ring)
  969. return false;
  970. for (i = 0; i < tunnel->npaths; i++) {
  971. const struct tb_path *path = tunnel->paths[i];
  972. if (!path)
  973. continue;
  974. if (tb_port_is_nhi(path->hops[0].in_port))
  975. tx_path = path;
  976. else if (tb_port_is_nhi(path->hops[path->path_length - 1].out_port))
  977. rx_path = path;
  978. }
  979. if (transmit_ring > 0 || transmit_path > 0) {
  980. if (!tx_path)
  981. return false;
  982. if (transmit_ring > 0 &&
  983. (tx_path->hops[0].in_hop_index != transmit_ring))
  984. return false;
  985. if (transmit_path > 0 &&
  986. (tx_path->hops[tx_path->path_length - 1].next_hop_index != transmit_path))
  987. return false;
  988. }
  989. if (receive_ring > 0 || receive_path > 0) {
  990. if (!rx_path)
  991. return false;
  992. if (receive_path > 0 &&
  993. (rx_path->hops[0].in_hop_index != receive_path))
  994. return false;
  995. if (receive_ring > 0 &&
  996. (rx_path->hops[rx_path->path_length - 1].next_hop_index != receive_ring))
  997. return false;
  998. }
  999. return true;
  1000. }
  1001. static int tb_usb3_max_link_rate(struct tb_port *up, struct tb_port *down)
  1002. {
  1003. int ret, up_max_rate, down_max_rate;
  1004. ret = usb4_usb3_port_max_link_rate(up);
  1005. if (ret < 0)
  1006. return ret;
  1007. up_max_rate = ret;
  1008. ret = usb4_usb3_port_max_link_rate(down);
  1009. if (ret < 0)
  1010. return ret;
  1011. down_max_rate = ret;
  1012. return min(up_max_rate, down_max_rate);
  1013. }
  1014. static int tb_usb3_init(struct tb_tunnel *tunnel)
  1015. {
  1016. tb_tunnel_dbg(tunnel, "allocating initial bandwidth %d/%d Mb/s\n",
  1017. tunnel->allocated_up, tunnel->allocated_down);
  1018. return usb4_usb3_port_allocate_bandwidth(tunnel->src_port,
  1019. &tunnel->allocated_up,
  1020. &tunnel->allocated_down);
  1021. }
  1022. static int tb_usb3_activate(struct tb_tunnel *tunnel, bool activate)
  1023. {
  1024. int res;
  1025. res = tb_usb3_port_enable(tunnel->src_port, activate);
  1026. if (res)
  1027. return res;
  1028. if (tb_port_is_usb3_up(tunnel->dst_port))
  1029. return tb_usb3_port_enable(tunnel->dst_port, activate);
  1030. return 0;
  1031. }
  1032. static int tb_usb3_consumed_bandwidth(struct tb_tunnel *tunnel,
  1033. int *consumed_up, int *consumed_down)
  1034. {
  1035. int pcie_enabled = tb_acpi_may_tunnel_pcie();
  1036. /*
  1037. * PCIe tunneling, if enabled, affects the USB3 bandwidth so
  1038. * take that it into account here.
  1039. */
  1040. *consumed_up = tunnel->allocated_up * (3 + pcie_enabled) / 3;
  1041. *consumed_down = tunnel->allocated_down * (3 + pcie_enabled) / 3;
  1042. return 0;
  1043. }
  1044. static int tb_usb3_release_unused_bandwidth(struct tb_tunnel *tunnel)
  1045. {
  1046. int ret;
  1047. ret = usb4_usb3_port_release_bandwidth(tunnel->src_port,
  1048. &tunnel->allocated_up,
  1049. &tunnel->allocated_down);
  1050. if (ret)
  1051. return ret;
  1052. tb_tunnel_dbg(tunnel, "decreased bandwidth allocation to %d/%d Mb/s\n",
  1053. tunnel->allocated_up, tunnel->allocated_down);
  1054. return 0;
  1055. }
  1056. static void tb_usb3_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
  1057. int *available_up,
  1058. int *available_down)
  1059. {
  1060. int ret, max_rate, allocate_up, allocate_down;
  1061. ret = usb4_usb3_port_actual_link_rate(tunnel->src_port);
  1062. if (ret < 0) {
  1063. tb_tunnel_warn(tunnel, "failed to read actual link rate\n");
  1064. return;
  1065. } else if (!ret) {
  1066. /* Use maximum link rate if the link valid is not set */
  1067. ret = tb_usb3_max_link_rate(tunnel->dst_port, tunnel->src_port);
  1068. if (ret < 0) {
  1069. tb_tunnel_warn(tunnel, "failed to read maximum link rate\n");
  1070. return;
  1071. }
  1072. }
  1073. /*
  1074. * 90% of the max rate can be allocated for isochronous
  1075. * transfers.
  1076. */
  1077. max_rate = ret * 90 / 100;
  1078. /* No need to reclaim if already at maximum */
  1079. if (tunnel->allocated_up >= max_rate &&
  1080. tunnel->allocated_down >= max_rate)
  1081. return;
  1082. /* Don't go lower than what is already allocated */
  1083. allocate_up = min(max_rate, *available_up);
  1084. if (allocate_up < tunnel->allocated_up)
  1085. allocate_up = tunnel->allocated_up;
  1086. allocate_down = min(max_rate, *available_down);
  1087. if (allocate_down < tunnel->allocated_down)
  1088. allocate_down = tunnel->allocated_down;
  1089. /* If no changes no need to do more */
  1090. if (allocate_up == tunnel->allocated_up &&
  1091. allocate_down == tunnel->allocated_down)
  1092. return;
  1093. ret = usb4_usb3_port_allocate_bandwidth(tunnel->src_port, &allocate_up,
  1094. &allocate_down);
  1095. if (ret) {
  1096. tb_tunnel_info(tunnel, "failed to allocate bandwidth\n");
  1097. return;
  1098. }
  1099. tunnel->allocated_up = allocate_up;
  1100. *available_up -= tunnel->allocated_up;
  1101. tunnel->allocated_down = allocate_down;
  1102. *available_down -= tunnel->allocated_down;
  1103. tb_tunnel_dbg(tunnel, "increased bandwidth allocation to %d/%d Mb/s\n",
  1104. tunnel->allocated_up, tunnel->allocated_down);
  1105. }
  1106. static void tb_usb3_init_credits(struct tb_path_hop *hop)
  1107. {
  1108. struct tb_port *port = hop->in_port;
  1109. struct tb_switch *sw = port->sw;
  1110. unsigned int credits;
  1111. if (tb_port_use_credit_allocation(port)) {
  1112. credits = sw->max_usb3_credits;
  1113. } else {
  1114. if (tb_port_is_null(port))
  1115. credits = port->bonded ? 32 : 16;
  1116. else
  1117. credits = 7;
  1118. }
  1119. hop->initial_credits = credits;
  1120. }
  1121. static void tb_usb3_init_path(struct tb_path *path)
  1122. {
  1123. struct tb_path_hop *hop;
  1124. path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
  1125. path->egress_shared_buffer = TB_PATH_NONE;
  1126. path->ingress_fc_enable = TB_PATH_ALL;
  1127. path->ingress_shared_buffer = TB_PATH_NONE;
  1128. path->priority = 3;
  1129. path->weight = 3;
  1130. path->drop_packages = 0;
  1131. tb_path_for_each_hop(path, hop)
  1132. tb_usb3_init_credits(hop);
  1133. }
  1134. /**
  1135. * tb_tunnel_discover_usb3() - Discover existing USB3 tunnels
  1136. * @tb: Pointer to the domain structure
  1137. * @down: USB3 downstream adapter
  1138. * @alloc_hopid: Allocate HopIDs from visited ports
  1139. *
  1140. * If @down adapter is active, follows the tunnel to the USB3 upstream
  1141. * adapter and back. Returns the discovered tunnel or %NULL if there was
  1142. * no tunnel.
  1143. */
  1144. struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down,
  1145. bool alloc_hopid)
  1146. {
  1147. struct tb_tunnel *tunnel;
  1148. struct tb_path *path;
  1149. if (!tb_usb3_port_is_enabled(down))
  1150. return NULL;
  1151. tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
  1152. if (!tunnel)
  1153. return NULL;
  1154. tunnel->activate = tb_usb3_activate;
  1155. tunnel->src_port = down;
  1156. /*
  1157. * Discover both paths even if they are not complete. We will
  1158. * clean them up by calling tb_tunnel_deactivate() below in that
  1159. * case.
  1160. */
  1161. path = tb_path_discover(down, TB_USB3_HOPID, NULL, -1,
  1162. &tunnel->dst_port, "USB3 Down", alloc_hopid);
  1163. if (!path) {
  1164. /* Just disable the downstream port */
  1165. tb_usb3_port_enable(down, false);
  1166. goto err_free;
  1167. }
  1168. tunnel->paths[TB_USB3_PATH_DOWN] = path;
  1169. tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_DOWN]);
  1170. path = tb_path_discover(tunnel->dst_port, -1, down, TB_USB3_HOPID, NULL,
  1171. "USB3 Up", alloc_hopid);
  1172. if (!path)
  1173. goto err_deactivate;
  1174. tunnel->paths[TB_USB3_PATH_UP] = path;
  1175. tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_UP]);
  1176. /* Validate that the tunnel is complete */
  1177. if (!tb_port_is_usb3_up(tunnel->dst_port)) {
  1178. tb_port_warn(tunnel->dst_port,
  1179. "path does not end on an USB3 adapter, cleaning up\n");
  1180. goto err_deactivate;
  1181. }
  1182. if (down != tunnel->src_port) {
  1183. tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
  1184. goto err_deactivate;
  1185. }
  1186. if (!tb_usb3_port_is_enabled(tunnel->dst_port)) {
  1187. tb_tunnel_warn(tunnel,
  1188. "tunnel is not fully activated, cleaning up\n");
  1189. goto err_deactivate;
  1190. }
  1191. if (!tb_route(down->sw)) {
  1192. int ret;
  1193. /*
  1194. * Read the initial bandwidth allocation for the first
  1195. * hop tunnel.
  1196. */
  1197. ret = usb4_usb3_port_allocated_bandwidth(down,
  1198. &tunnel->allocated_up, &tunnel->allocated_down);
  1199. if (ret)
  1200. goto err_deactivate;
  1201. tb_tunnel_dbg(tunnel, "currently allocated bandwidth %d/%d Mb/s\n",
  1202. tunnel->allocated_up, tunnel->allocated_down);
  1203. tunnel->init = tb_usb3_init;
  1204. tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
  1205. tunnel->release_unused_bandwidth =
  1206. tb_usb3_release_unused_bandwidth;
  1207. tunnel->reclaim_available_bandwidth =
  1208. tb_usb3_reclaim_available_bandwidth;
  1209. }
  1210. tb_tunnel_dbg(tunnel, "discovered\n");
  1211. return tunnel;
  1212. err_deactivate:
  1213. tb_tunnel_deactivate(tunnel);
  1214. err_free:
  1215. tb_tunnel_free(tunnel);
  1216. return NULL;
  1217. }
  1218. /**
  1219. * tb_tunnel_alloc_usb3() - allocate a USB3 tunnel
  1220. * @tb: Pointer to the domain structure
  1221. * @up: USB3 upstream adapter port
  1222. * @down: USB3 downstream adapter port
  1223. * @max_up: Maximum available upstream bandwidth for the USB3 tunnel (%0
  1224. * if not limited).
  1225. * @max_down: Maximum available downstream bandwidth for the USB3 tunnel
  1226. * (%0 if not limited).
  1227. *
  1228. * Allocate an USB3 tunnel. The ports must be of type @TB_TYPE_USB3_UP and
  1229. * @TB_TYPE_USB3_DOWN.
  1230. *
  1231. * Return: Returns a tb_tunnel on success or %NULL on failure.
  1232. */
  1233. struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
  1234. struct tb_port *down, int max_up,
  1235. int max_down)
  1236. {
  1237. struct tb_tunnel *tunnel;
  1238. struct tb_path *path;
  1239. int max_rate = 0;
  1240. /*
  1241. * Check that we have enough bandwidth available for the new
  1242. * USB3 tunnel.
  1243. */
  1244. if (max_up > 0 || max_down > 0) {
  1245. max_rate = tb_usb3_max_link_rate(down, up);
  1246. if (max_rate < 0)
  1247. return NULL;
  1248. /* Only 90% can be allocated for USB3 isochronous transfers */
  1249. max_rate = max_rate * 90 / 100;
  1250. tb_port_dbg(up, "required bandwidth for USB3 tunnel %d Mb/s\n",
  1251. max_rate);
  1252. if (max_rate > max_up || max_rate > max_down) {
  1253. tb_port_warn(up, "not enough bandwidth for USB3 tunnel\n");
  1254. return NULL;
  1255. }
  1256. }
  1257. tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
  1258. if (!tunnel)
  1259. return NULL;
  1260. tunnel->activate = tb_usb3_activate;
  1261. tunnel->src_port = down;
  1262. tunnel->dst_port = up;
  1263. tunnel->max_up = max_up;
  1264. tunnel->max_down = max_down;
  1265. path = tb_path_alloc(tb, down, TB_USB3_HOPID, up, TB_USB3_HOPID, 0,
  1266. "USB3 Down");
  1267. if (!path) {
  1268. tb_tunnel_free(tunnel);
  1269. return NULL;
  1270. }
  1271. tb_usb3_init_path(path);
  1272. tunnel->paths[TB_USB3_PATH_DOWN] = path;
  1273. path = tb_path_alloc(tb, up, TB_USB3_HOPID, down, TB_USB3_HOPID, 0,
  1274. "USB3 Up");
  1275. if (!path) {
  1276. tb_tunnel_free(tunnel);
  1277. return NULL;
  1278. }
  1279. tb_usb3_init_path(path);
  1280. tunnel->paths[TB_USB3_PATH_UP] = path;
  1281. if (!tb_route(down->sw)) {
  1282. tunnel->allocated_up = max_rate;
  1283. tunnel->allocated_down = max_rate;
  1284. tunnel->init = tb_usb3_init;
  1285. tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
  1286. tunnel->release_unused_bandwidth =
  1287. tb_usb3_release_unused_bandwidth;
  1288. tunnel->reclaim_available_bandwidth =
  1289. tb_usb3_reclaim_available_bandwidth;
  1290. }
  1291. return tunnel;
  1292. }
  1293. /**
  1294. * tb_tunnel_free() - free a tunnel
  1295. * @tunnel: Tunnel to be freed
  1296. *
  1297. * Frees a tunnel. The tunnel does not need to be deactivated.
  1298. */
  1299. void tb_tunnel_free(struct tb_tunnel *tunnel)
  1300. {
  1301. int i;
  1302. if (!tunnel)
  1303. return;
  1304. if (tunnel->deinit)
  1305. tunnel->deinit(tunnel);
  1306. for (i = 0; i < tunnel->npaths; i++) {
  1307. if (tunnel->paths[i])
  1308. tb_path_free(tunnel->paths[i]);
  1309. }
  1310. kfree(tunnel->paths);
  1311. kfree(tunnel);
  1312. }
  1313. /**
  1314. * tb_tunnel_is_invalid - check whether an activated path is still valid
  1315. * @tunnel: Tunnel to check
  1316. */
  1317. bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel)
  1318. {
  1319. int i;
  1320. for (i = 0; i < tunnel->npaths; i++) {
  1321. WARN_ON(!tunnel->paths[i]->activated);
  1322. if (tb_path_is_invalid(tunnel->paths[i]))
  1323. return true;
  1324. }
  1325. return false;
  1326. }
  1327. /**
  1328. * tb_tunnel_restart() - activate a tunnel after a hardware reset
  1329. * @tunnel: Tunnel to restart
  1330. *
  1331. * Return: 0 on success and negative errno in case if failure
  1332. */
  1333. int tb_tunnel_restart(struct tb_tunnel *tunnel)
  1334. {
  1335. int res, i;
  1336. tb_tunnel_dbg(tunnel, "activating\n");
  1337. /*
  1338. * Make sure all paths are properly disabled before enabling
  1339. * them again.
  1340. */
  1341. for (i = 0; i < tunnel->npaths; i++) {
  1342. if (tunnel->paths[i]->activated) {
  1343. tb_path_deactivate(tunnel->paths[i]);
  1344. tunnel->paths[i]->activated = false;
  1345. }
  1346. }
  1347. if (tunnel->init) {
  1348. res = tunnel->init(tunnel);
  1349. if (res)
  1350. return res;
  1351. }
  1352. for (i = 0; i < tunnel->npaths; i++) {
  1353. res = tb_path_activate(tunnel->paths[i]);
  1354. if (res)
  1355. goto err;
  1356. }
  1357. if (tunnel->activate) {
  1358. res = tunnel->activate(tunnel, true);
  1359. if (res)
  1360. goto err;
  1361. }
  1362. return 0;
  1363. err:
  1364. tb_tunnel_warn(tunnel, "activation failed\n");
  1365. tb_tunnel_deactivate(tunnel);
  1366. return res;
  1367. }
  1368. /**
  1369. * tb_tunnel_activate() - activate a tunnel
  1370. * @tunnel: Tunnel to activate
  1371. *
  1372. * Return: Returns 0 on success or an error code on failure.
  1373. */
  1374. int tb_tunnel_activate(struct tb_tunnel *tunnel)
  1375. {
  1376. int i;
  1377. for (i = 0; i < tunnel->npaths; i++) {
  1378. if (tunnel->paths[i]->activated) {
  1379. tb_tunnel_WARN(tunnel,
  1380. "trying to activate an already activated tunnel\n");
  1381. return -EINVAL;
  1382. }
  1383. }
  1384. return tb_tunnel_restart(tunnel);
  1385. }
  1386. /**
  1387. * tb_tunnel_deactivate() - deactivate a tunnel
  1388. * @tunnel: Tunnel to deactivate
  1389. */
  1390. void tb_tunnel_deactivate(struct tb_tunnel *tunnel)
  1391. {
  1392. int i;
  1393. tb_tunnel_dbg(tunnel, "deactivating\n");
  1394. if (tunnel->activate)
  1395. tunnel->activate(tunnel, false);
  1396. for (i = 0; i < tunnel->npaths; i++) {
  1397. if (tunnel->paths[i] && tunnel->paths[i]->activated)
  1398. tb_path_deactivate(tunnel->paths[i]);
  1399. }
  1400. }
  1401. /**
  1402. * tb_tunnel_port_on_path() - Does the tunnel go through port
  1403. * @tunnel: Tunnel to check
  1404. * @port: Port to check
  1405. *
  1406. * Returns true if @tunnel goes through @port (direction does not matter),
  1407. * false otherwise.
  1408. */
  1409. bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel,
  1410. const struct tb_port *port)
  1411. {
  1412. int i;
  1413. for (i = 0; i < tunnel->npaths; i++) {
  1414. if (!tunnel->paths[i])
  1415. continue;
  1416. if (tb_path_port_on_path(tunnel->paths[i], port))
  1417. return true;
  1418. }
  1419. return false;
  1420. }
  1421. static bool tb_tunnel_is_active(const struct tb_tunnel *tunnel)
  1422. {
  1423. int i;
  1424. for (i = 0; i < tunnel->npaths; i++) {
  1425. if (!tunnel->paths[i])
  1426. return false;
  1427. if (!tunnel->paths[i]->activated)
  1428. return false;
  1429. }
  1430. return true;
  1431. }
  1432. /**
  1433. * tb_tunnel_consumed_bandwidth() - Return bandwidth consumed by the tunnel
  1434. * @tunnel: Tunnel to check
  1435. * @consumed_up: Consumed bandwidth in Mb/s from @dst_port to @src_port.
  1436. * Can be %NULL.
  1437. * @consumed_down: Consumed bandwidth in Mb/s from @src_port to @dst_port.
  1438. * Can be %NULL.
  1439. *
  1440. * Stores the amount of isochronous bandwidth @tunnel consumes in
  1441. * @consumed_up and @consumed_down. In case of success returns %0,
  1442. * negative errno otherwise.
  1443. */
  1444. int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
  1445. int *consumed_down)
  1446. {
  1447. int up_bw = 0, down_bw = 0;
  1448. if (!tb_tunnel_is_active(tunnel))
  1449. goto out;
  1450. if (tunnel->consumed_bandwidth) {
  1451. int ret;
  1452. ret = tunnel->consumed_bandwidth(tunnel, &up_bw, &down_bw);
  1453. if (ret)
  1454. return ret;
  1455. tb_tunnel_dbg(tunnel, "consumed bandwidth %d/%d Mb/s\n", up_bw,
  1456. down_bw);
  1457. }
  1458. out:
  1459. if (consumed_up)
  1460. *consumed_up = up_bw;
  1461. if (consumed_down)
  1462. *consumed_down = down_bw;
  1463. return 0;
  1464. }
  1465. /**
  1466. * tb_tunnel_release_unused_bandwidth() - Release unused bandwidth
  1467. * @tunnel: Tunnel whose unused bandwidth to release
  1468. *
  1469. * If tunnel supports dynamic bandwidth management (USB3 tunnels at the
  1470. * moment) this function makes it to release all the unused bandwidth.
  1471. *
  1472. * Returns %0 in case of success and negative errno otherwise.
  1473. */
  1474. int tb_tunnel_release_unused_bandwidth(struct tb_tunnel *tunnel)
  1475. {
  1476. if (!tb_tunnel_is_active(tunnel))
  1477. return 0;
  1478. if (tunnel->release_unused_bandwidth) {
  1479. int ret;
  1480. ret = tunnel->release_unused_bandwidth(tunnel);
  1481. if (ret)
  1482. return ret;
  1483. }
  1484. return 0;
  1485. }
  1486. /**
  1487. * tb_tunnel_reclaim_available_bandwidth() - Reclaim available bandwidth
  1488. * @tunnel: Tunnel reclaiming available bandwidth
  1489. * @available_up: Available upstream bandwidth (in Mb/s)
  1490. * @available_down: Available downstream bandwidth (in Mb/s)
  1491. *
  1492. * Reclaims bandwidth from @available_up and @available_down and updates
  1493. * the variables accordingly (e.g decreases both according to what was
  1494. * reclaimed by the tunnel). If nothing was reclaimed the values are
  1495. * kept as is.
  1496. */
  1497. void tb_tunnel_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
  1498. int *available_up,
  1499. int *available_down)
  1500. {
  1501. if (!tb_tunnel_is_active(tunnel))
  1502. return;
  1503. if (tunnel->reclaim_available_bandwidth)
  1504. tunnel->reclaim_available_bandwidth(tunnel, available_up,
  1505. available_down);
  1506. }