tb.c 45 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Thunderbolt driver - bus logic (NHI independent)
  4. *
  5. * Copyright (c) 2014 Andreas Noever <[email protected]>
  6. * Copyright (C) 2019, Intel Corporation
  7. */
  8. #include <linux/slab.h>
  9. #include <linux/errno.h>
  10. #include <linux/delay.h>
  11. #include <linux/pm_runtime.h>
  12. #include <linux/platform_data/x86/apple.h>
  13. #include "tb.h"
  14. #include "tb_regs.h"
  15. #include "tunnel.h"
  16. #define TB_TIMEOUT 100 /* ms */
  17. /**
  18. * struct tb_cm - Simple Thunderbolt connection manager
  19. * @tunnel_list: List of active tunnels
  20. * @dp_resources: List of available DP resources for DP tunneling
  21. * @hotplug_active: tb_handle_hotplug will stop progressing plug
  22. * events and exit if this is not set (it needs to
  23. * acquire the lock one more time). Used to drain wq
  24. * after cfg has been paused.
  25. * @remove_work: Work used to remove any unplugged routers after
  26. * runtime resume
  27. */
  28. struct tb_cm {
  29. struct list_head tunnel_list;
  30. struct list_head dp_resources;
  31. bool hotplug_active;
  32. struct delayed_work remove_work;
  33. };
  34. static inline struct tb *tcm_to_tb(struct tb_cm *tcm)
  35. {
  36. return ((void *)tcm - sizeof(struct tb));
  37. }
  38. struct tb_hotplug_event {
  39. struct work_struct work;
  40. struct tb *tb;
  41. u64 route;
  42. u8 port;
  43. bool unplug;
  44. };
  45. static void tb_handle_hotplug(struct work_struct *work);
  46. static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
  47. {
  48. struct tb_hotplug_event *ev;
  49. ev = kmalloc(sizeof(*ev), GFP_KERNEL);
  50. if (!ev)
  51. return;
  52. ev->tb = tb;
  53. ev->route = route;
  54. ev->port = port;
  55. ev->unplug = unplug;
  56. INIT_WORK(&ev->work, tb_handle_hotplug);
  57. queue_work(tb->wq, &ev->work);
  58. }
  59. /* enumeration & hot plug handling */
  60. static void tb_add_dp_resources(struct tb_switch *sw)
  61. {
  62. struct tb_cm *tcm = tb_priv(sw->tb);
  63. struct tb_port *port;
  64. tb_switch_for_each_port(sw, port) {
  65. if (!tb_port_is_dpin(port))
  66. continue;
  67. if (!tb_switch_query_dp_resource(sw, port))
  68. continue;
  69. list_add_tail(&port->list, &tcm->dp_resources);
  70. tb_port_dbg(port, "DP IN resource available\n");
  71. }
  72. }
  73. static void tb_remove_dp_resources(struct tb_switch *sw)
  74. {
  75. struct tb_cm *tcm = tb_priv(sw->tb);
  76. struct tb_port *port, *tmp;
  77. /* Clear children resources first */
  78. tb_switch_for_each_port(sw, port) {
  79. if (tb_port_has_remote(port))
  80. tb_remove_dp_resources(port->remote->sw);
  81. }
  82. list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) {
  83. if (port->sw == sw) {
  84. tb_port_dbg(port, "DP OUT resource unavailable\n");
  85. list_del_init(&port->list);
  86. }
  87. }
  88. }
  89. static void tb_discover_dp_resource(struct tb *tb, struct tb_port *port)
  90. {
  91. struct tb_cm *tcm = tb_priv(tb);
  92. struct tb_port *p;
  93. list_for_each_entry(p, &tcm->dp_resources, list) {
  94. if (p == port)
  95. return;
  96. }
  97. tb_port_dbg(port, "DP %s resource available discovered\n",
  98. tb_port_is_dpin(port) ? "IN" : "OUT");
  99. list_add_tail(&port->list, &tcm->dp_resources);
  100. }
  101. static void tb_discover_dp_resources(struct tb *tb)
  102. {
  103. struct tb_cm *tcm = tb_priv(tb);
  104. struct tb_tunnel *tunnel;
  105. list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
  106. if (tb_tunnel_is_dp(tunnel))
  107. tb_discover_dp_resource(tb, tunnel->dst_port);
  108. }
  109. }
  110. static void tb_switch_discover_tunnels(struct tb_switch *sw,
  111. struct list_head *list,
  112. bool alloc_hopids)
  113. {
  114. struct tb *tb = sw->tb;
  115. struct tb_port *port;
  116. tb_switch_for_each_port(sw, port) {
  117. struct tb_tunnel *tunnel = NULL;
  118. switch (port->config.type) {
  119. case TB_TYPE_DP_HDMI_IN:
  120. tunnel = tb_tunnel_discover_dp(tb, port, alloc_hopids);
  121. /*
  122. * In case of DP tunnel exists, change host router's
  123. * 1st children TMU mode to HiFi for CL0s to work.
  124. */
  125. if (tunnel)
  126. tb_switch_enable_tmu_1st_child(tb->root_switch,
  127. TB_SWITCH_TMU_RATE_HIFI);
  128. break;
  129. case TB_TYPE_PCIE_DOWN:
  130. tunnel = tb_tunnel_discover_pci(tb, port, alloc_hopids);
  131. break;
  132. case TB_TYPE_USB3_DOWN:
  133. tunnel = tb_tunnel_discover_usb3(tb, port, alloc_hopids);
  134. break;
  135. default:
  136. break;
  137. }
  138. if (tunnel)
  139. list_add_tail(&tunnel->list, list);
  140. }
  141. tb_switch_for_each_port(sw, port) {
  142. if (tb_port_has_remote(port)) {
  143. tb_switch_discover_tunnels(port->remote->sw, list,
  144. alloc_hopids);
  145. }
  146. }
  147. }
  148. static void tb_discover_tunnels(struct tb *tb)
  149. {
  150. struct tb_cm *tcm = tb_priv(tb);
  151. struct tb_tunnel *tunnel;
  152. tb_switch_discover_tunnels(tb->root_switch, &tcm->tunnel_list, true);
  153. list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
  154. if (tb_tunnel_is_pci(tunnel)) {
  155. struct tb_switch *parent = tunnel->dst_port->sw;
  156. while (parent != tunnel->src_port->sw) {
  157. parent->boot = true;
  158. parent = tb_switch_parent(parent);
  159. }
  160. } else if (tb_tunnel_is_dp(tunnel)) {
  161. /* Keep the domain from powering down */
  162. pm_runtime_get_sync(&tunnel->src_port->sw->dev);
  163. pm_runtime_get_sync(&tunnel->dst_port->sw->dev);
  164. }
  165. }
  166. }
  167. static int tb_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd)
  168. {
  169. if (tb_switch_is_usb4(port->sw))
  170. return usb4_port_configure_xdomain(port, xd);
  171. return tb_lc_configure_xdomain(port);
  172. }
  173. static void tb_port_unconfigure_xdomain(struct tb_port *port)
  174. {
  175. if (tb_switch_is_usb4(port->sw))
  176. usb4_port_unconfigure_xdomain(port);
  177. else
  178. tb_lc_unconfigure_xdomain(port);
  179. tb_port_enable(port->dual_link_port);
  180. }
  181. static void tb_scan_xdomain(struct tb_port *port)
  182. {
  183. struct tb_switch *sw = port->sw;
  184. struct tb *tb = sw->tb;
  185. struct tb_xdomain *xd;
  186. u64 route;
  187. if (!tb_is_xdomain_enabled())
  188. return;
  189. route = tb_downstream_route(port);
  190. xd = tb_xdomain_find_by_route(tb, route);
  191. if (xd) {
  192. tb_xdomain_put(xd);
  193. return;
  194. }
  195. xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid,
  196. NULL);
  197. if (xd) {
  198. tb_port_at(route, sw)->xdomain = xd;
  199. tb_port_configure_xdomain(port, xd);
  200. tb_xdomain_add(xd);
  201. }
  202. }
  203. static int tb_enable_tmu(struct tb_switch *sw)
  204. {
  205. int ret;
  206. /* If it is already enabled in correct mode, don't touch it */
  207. if (tb_switch_tmu_is_enabled(sw, sw->tmu.unidirectional_request))
  208. return 0;
  209. ret = tb_switch_tmu_disable(sw);
  210. if (ret)
  211. return ret;
  212. ret = tb_switch_tmu_post_time(sw);
  213. if (ret)
  214. return ret;
  215. return tb_switch_tmu_enable(sw);
  216. }
  217. /**
  218. * tb_find_unused_port() - return the first inactive port on @sw
  219. * @sw: Switch to find the port on
  220. * @type: Port type to look for
  221. */
  222. static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
  223. enum tb_port_type type)
  224. {
  225. struct tb_port *port;
  226. tb_switch_for_each_port(sw, port) {
  227. if (tb_is_upstream_port(port))
  228. continue;
  229. if (port->config.type != type)
  230. continue;
  231. if (!port->cap_adap)
  232. continue;
  233. if (tb_port_is_enabled(port))
  234. continue;
  235. return port;
  236. }
  237. return NULL;
  238. }
  239. static struct tb_port *tb_find_usb3_down(struct tb_switch *sw,
  240. const struct tb_port *port)
  241. {
  242. struct tb_port *down;
  243. down = usb4_switch_map_usb3_down(sw, port);
  244. if (down && !tb_usb3_port_is_enabled(down))
  245. return down;
  246. return NULL;
  247. }
  248. static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type,
  249. struct tb_port *src_port,
  250. struct tb_port *dst_port)
  251. {
  252. struct tb_cm *tcm = tb_priv(tb);
  253. struct tb_tunnel *tunnel;
  254. list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
  255. if (tunnel->type == type &&
  256. ((src_port && src_port == tunnel->src_port) ||
  257. (dst_port && dst_port == tunnel->dst_port))) {
  258. return tunnel;
  259. }
  260. }
  261. return NULL;
  262. }
  263. static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb,
  264. struct tb_port *src_port,
  265. struct tb_port *dst_port)
  266. {
  267. struct tb_port *port, *usb3_down;
  268. struct tb_switch *sw;
  269. /* Pick the router that is deepest in the topology */
  270. if (dst_port->sw->config.depth > src_port->sw->config.depth)
  271. sw = dst_port->sw;
  272. else
  273. sw = src_port->sw;
  274. /* Can't be the host router */
  275. if (sw == tb->root_switch)
  276. return NULL;
  277. /* Find the downstream USB4 port that leads to this router */
  278. port = tb_port_at(tb_route(sw), tb->root_switch);
  279. /* Find the corresponding host router USB3 downstream port */
  280. usb3_down = usb4_switch_map_usb3_down(tb->root_switch, port);
  281. if (!usb3_down)
  282. return NULL;
  283. return tb_find_tunnel(tb, TB_TUNNEL_USB3, usb3_down, NULL);
  284. }
  285. static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port,
  286. struct tb_port *dst_port, int *available_up, int *available_down)
  287. {
  288. int usb3_consumed_up, usb3_consumed_down, ret;
  289. struct tb_cm *tcm = tb_priv(tb);
  290. struct tb_tunnel *tunnel;
  291. struct tb_port *port;
  292. tb_port_dbg(dst_port, "calculating available bandwidth\n");
  293. tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
  294. if (tunnel) {
  295. ret = tb_tunnel_consumed_bandwidth(tunnel, &usb3_consumed_up,
  296. &usb3_consumed_down);
  297. if (ret)
  298. return ret;
  299. } else {
  300. usb3_consumed_up = 0;
  301. usb3_consumed_down = 0;
  302. }
  303. *available_up = *available_down = 40000;
  304. /* Find the minimum available bandwidth over all links */
  305. tb_for_each_port_on_path(src_port, dst_port, port) {
  306. int link_speed, link_width, up_bw, down_bw;
  307. if (!tb_port_is_null(port))
  308. continue;
  309. if (tb_is_upstream_port(port)) {
  310. link_speed = port->sw->link_speed;
  311. } else {
  312. link_speed = tb_port_get_link_speed(port);
  313. if (link_speed < 0)
  314. return link_speed;
  315. }
  316. link_width = port->bonded ? 2 : 1;
  317. up_bw = link_speed * link_width * 1000; /* Mb/s */
  318. /* Leave 10% guard band */
  319. up_bw -= up_bw / 10;
  320. down_bw = up_bw;
  321. tb_port_dbg(port, "link total bandwidth %d Mb/s\n", up_bw);
  322. /*
  323. * Find all DP tunnels that cross the port and reduce
  324. * their consumed bandwidth from the available.
  325. */
  326. list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
  327. int dp_consumed_up, dp_consumed_down;
  328. if (!tb_tunnel_is_dp(tunnel))
  329. continue;
  330. if (!tb_tunnel_port_on_path(tunnel, port))
  331. continue;
  332. ret = tb_tunnel_consumed_bandwidth(tunnel,
  333. &dp_consumed_up,
  334. &dp_consumed_down);
  335. if (ret)
  336. return ret;
  337. up_bw -= dp_consumed_up;
  338. down_bw -= dp_consumed_down;
  339. }
  340. /*
  341. * If USB3 is tunneled from the host router down to the
  342. * branch leading to port we need to take USB3 consumed
  343. * bandwidth into account regardless whether it actually
  344. * crosses the port.
  345. */
  346. up_bw -= usb3_consumed_up;
  347. down_bw -= usb3_consumed_down;
  348. if (up_bw < *available_up)
  349. *available_up = up_bw;
  350. if (down_bw < *available_down)
  351. *available_down = down_bw;
  352. }
  353. if (*available_up < 0)
  354. *available_up = 0;
  355. if (*available_down < 0)
  356. *available_down = 0;
  357. return 0;
  358. }
  359. static int tb_release_unused_usb3_bandwidth(struct tb *tb,
  360. struct tb_port *src_port,
  361. struct tb_port *dst_port)
  362. {
  363. struct tb_tunnel *tunnel;
  364. tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
  365. return tunnel ? tb_tunnel_release_unused_bandwidth(tunnel) : 0;
  366. }
  367. static void tb_reclaim_usb3_bandwidth(struct tb *tb, struct tb_port *src_port,
  368. struct tb_port *dst_port)
  369. {
  370. int ret, available_up, available_down;
  371. struct tb_tunnel *tunnel;
  372. tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
  373. if (!tunnel)
  374. return;
  375. tb_dbg(tb, "reclaiming unused bandwidth for USB3\n");
  376. /*
  377. * Calculate available bandwidth for the first hop USB3 tunnel.
  378. * That determines the whole USB3 bandwidth for this branch.
  379. */
  380. ret = tb_available_bandwidth(tb, tunnel->src_port, tunnel->dst_port,
  381. &available_up, &available_down);
  382. if (ret) {
  383. tb_warn(tb, "failed to calculate available bandwidth\n");
  384. return;
  385. }
  386. tb_dbg(tb, "available bandwidth for USB3 %d/%d Mb/s\n",
  387. available_up, available_down);
  388. tb_tunnel_reclaim_available_bandwidth(tunnel, &available_up, &available_down);
  389. }
  390. static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw)
  391. {
  392. struct tb_switch *parent = tb_switch_parent(sw);
  393. int ret, available_up, available_down;
  394. struct tb_port *up, *down, *port;
  395. struct tb_cm *tcm = tb_priv(tb);
  396. struct tb_tunnel *tunnel;
  397. if (!tb_acpi_may_tunnel_usb3()) {
  398. tb_dbg(tb, "USB3 tunneling disabled, not creating tunnel\n");
  399. return 0;
  400. }
  401. up = tb_switch_find_port(sw, TB_TYPE_USB3_UP);
  402. if (!up)
  403. return 0;
  404. if (!sw->link_usb4)
  405. return 0;
  406. /*
  407. * Look up available down port. Since we are chaining it should
  408. * be found right above this switch.
  409. */
  410. port = tb_port_at(tb_route(sw), parent);
  411. down = tb_find_usb3_down(parent, port);
  412. if (!down)
  413. return 0;
  414. if (tb_route(parent)) {
  415. struct tb_port *parent_up;
  416. /*
  417. * Check first that the parent switch has its upstream USB3
  418. * port enabled. Otherwise the chain is not complete and
  419. * there is no point setting up a new tunnel.
  420. */
  421. parent_up = tb_switch_find_port(parent, TB_TYPE_USB3_UP);
  422. if (!parent_up || !tb_port_is_enabled(parent_up))
  423. return 0;
  424. /* Make all unused bandwidth available for the new tunnel */
  425. ret = tb_release_unused_usb3_bandwidth(tb, down, up);
  426. if (ret)
  427. return ret;
  428. }
  429. ret = tb_available_bandwidth(tb, down, up, &available_up,
  430. &available_down);
  431. if (ret)
  432. goto err_reclaim;
  433. tb_port_dbg(up, "available bandwidth for new USB3 tunnel %d/%d Mb/s\n",
  434. available_up, available_down);
  435. tunnel = tb_tunnel_alloc_usb3(tb, up, down, available_up,
  436. available_down);
  437. if (!tunnel) {
  438. ret = -ENOMEM;
  439. goto err_reclaim;
  440. }
  441. if (tb_tunnel_activate(tunnel)) {
  442. tb_port_info(up,
  443. "USB3 tunnel activation failed, aborting\n");
  444. ret = -EIO;
  445. goto err_free;
  446. }
  447. list_add_tail(&tunnel->list, &tcm->tunnel_list);
  448. if (tb_route(parent))
  449. tb_reclaim_usb3_bandwidth(tb, down, up);
  450. return 0;
  451. err_free:
  452. tb_tunnel_free(tunnel);
  453. err_reclaim:
  454. if (tb_route(parent))
  455. tb_reclaim_usb3_bandwidth(tb, down, up);
  456. return ret;
  457. }
  458. static int tb_create_usb3_tunnels(struct tb_switch *sw)
  459. {
  460. struct tb_port *port;
  461. int ret;
  462. if (!tb_acpi_may_tunnel_usb3())
  463. return 0;
  464. if (tb_route(sw)) {
  465. ret = tb_tunnel_usb3(sw->tb, sw);
  466. if (ret)
  467. return ret;
  468. }
  469. tb_switch_for_each_port(sw, port) {
  470. if (!tb_port_has_remote(port))
  471. continue;
  472. ret = tb_create_usb3_tunnels(port->remote->sw);
  473. if (ret)
  474. return ret;
  475. }
  476. return 0;
  477. }
  478. static void tb_scan_port(struct tb_port *port);
  479. /*
  480. * tb_scan_switch() - scan for and initialize downstream switches
  481. */
  482. static void tb_scan_switch(struct tb_switch *sw)
  483. {
  484. struct tb_port *port;
  485. pm_runtime_get_sync(&sw->dev);
  486. tb_switch_for_each_port(sw, port)
  487. tb_scan_port(port);
  488. pm_runtime_mark_last_busy(&sw->dev);
  489. pm_runtime_put_autosuspend(&sw->dev);
  490. }
  491. /*
  492. * tb_scan_port() - check for and initialize switches below port
  493. */
  494. static void tb_scan_port(struct tb_port *port)
  495. {
  496. struct tb_cm *tcm = tb_priv(port->sw->tb);
  497. struct tb_port *upstream_port;
  498. bool discovery = false;
  499. struct tb_switch *sw;
  500. int ret;
  501. if (tb_is_upstream_port(port))
  502. return;
  503. if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 &&
  504. !tb_dp_port_is_enabled(port)) {
  505. tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n");
  506. tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port,
  507. false);
  508. return;
  509. }
  510. if (port->config.type != TB_TYPE_PORT)
  511. return;
  512. if (port->dual_link_port && port->link_nr)
  513. return; /*
  514. * Downstream switch is reachable through two ports.
  515. * Only scan on the primary port (link_nr == 0).
  516. */
  517. if (port->usb4)
  518. pm_runtime_get_sync(&port->usb4->dev);
  519. if (tb_wait_for_port(port, false) <= 0)
  520. goto out_rpm_put;
  521. if (port->remote) {
  522. tb_port_dbg(port, "port already has a remote\n");
  523. goto out_rpm_put;
  524. }
  525. tb_retimer_scan(port, true);
  526. sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
  527. tb_downstream_route(port));
  528. if (IS_ERR(sw)) {
  529. /*
  530. * If there is an error accessing the connected switch
  531. * it may be connected to another domain. Also we allow
  532. * the other domain to be connected to a max depth switch.
  533. */
  534. if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL)
  535. tb_scan_xdomain(port);
  536. goto out_rpm_put;
  537. }
  538. if (tb_switch_configure(sw)) {
  539. tb_switch_put(sw);
  540. goto out_rpm_put;
  541. }
  542. /*
  543. * If there was previously another domain connected remove it
  544. * first.
  545. */
  546. if (port->xdomain) {
  547. tb_xdomain_remove(port->xdomain);
  548. tb_port_unconfigure_xdomain(port);
  549. port->xdomain = NULL;
  550. }
  551. /*
  552. * Do not send uevents until we have discovered all existing
  553. * tunnels and know which switches were authorized already by
  554. * the boot firmware.
  555. */
  556. if (!tcm->hotplug_active) {
  557. dev_set_uevent_suppress(&sw->dev, true);
  558. discovery = true;
  559. }
  560. /*
  561. * At the moment Thunderbolt 2 and beyond (devices with LC) we
  562. * can support runtime PM.
  563. */
  564. sw->rpm = sw->generation > 1;
  565. if (tb_switch_add(sw)) {
  566. tb_switch_put(sw);
  567. goto out_rpm_put;
  568. }
  569. /* Link the switches using both links if available */
  570. upstream_port = tb_upstream_port(sw);
  571. port->remote = upstream_port;
  572. upstream_port->remote = port;
  573. if (port->dual_link_port && upstream_port->dual_link_port) {
  574. port->dual_link_port->remote = upstream_port->dual_link_port;
  575. upstream_port->dual_link_port->remote = port->dual_link_port;
  576. }
  577. /* Enable lane bonding if supported */
  578. tb_switch_lane_bonding_enable(sw);
  579. /* Set the link configured */
  580. tb_switch_configure_link(sw);
  581. /*
  582. * CL0s and CL1 are enabled and supported together.
  583. * Silently ignore CLx enabling in case CLx is not supported.
  584. */
  585. if (discovery) {
  586. tb_sw_dbg(sw, "discovery, not touching CL states\n");
  587. } else {
  588. ret = tb_switch_enable_clx(sw, TB_CL1);
  589. if (ret && ret != -EOPNOTSUPP)
  590. tb_sw_warn(sw, "failed to enable %s on upstream port\n",
  591. tb_switch_clx_name(TB_CL1));
  592. }
  593. if (tb_switch_is_clx_enabled(sw, TB_CL1))
  594. /*
  595. * To support highest CLx state, we set router's TMU to
  596. * Normal-Uni mode.
  597. */
  598. tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_NORMAL, true);
  599. else
  600. /* If CLx disabled, configure router's TMU to HiFi-Bidir mode*/
  601. tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_HIFI, false);
  602. if (tb_enable_tmu(sw))
  603. tb_sw_warn(sw, "failed to enable TMU\n");
  604. /* Scan upstream retimers */
  605. tb_retimer_scan(upstream_port, true);
  606. /*
  607. * Create USB 3.x tunnels only when the switch is plugged to the
  608. * domain. This is because we scan the domain also during discovery
  609. * and want to discover existing USB 3.x tunnels before we create
  610. * any new.
  611. */
  612. if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw))
  613. tb_sw_warn(sw, "USB3 tunnel creation failed\n");
  614. tb_add_dp_resources(sw);
  615. tb_scan_switch(sw);
  616. out_rpm_put:
  617. if (port->usb4) {
  618. pm_runtime_mark_last_busy(&port->usb4->dev);
  619. pm_runtime_put_autosuspend(&port->usb4->dev);
  620. }
  621. }
  622. static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
  623. {
  624. struct tb_port *src_port, *dst_port;
  625. struct tb *tb;
  626. if (!tunnel)
  627. return;
  628. tb_tunnel_deactivate(tunnel);
  629. list_del(&tunnel->list);
  630. tb = tunnel->tb;
  631. src_port = tunnel->src_port;
  632. dst_port = tunnel->dst_port;
  633. switch (tunnel->type) {
  634. case TB_TUNNEL_DP:
  635. /*
  636. * In case of DP tunnel make sure the DP IN resource is
  637. * deallocated properly.
  638. */
  639. tb_switch_dealloc_dp_resource(src_port->sw, src_port);
  640. /* Now we can allow the domain to runtime suspend again */
  641. pm_runtime_mark_last_busy(&dst_port->sw->dev);
  642. pm_runtime_put_autosuspend(&dst_port->sw->dev);
  643. pm_runtime_mark_last_busy(&src_port->sw->dev);
  644. pm_runtime_put_autosuspend(&src_port->sw->dev);
  645. fallthrough;
  646. case TB_TUNNEL_USB3:
  647. tb_reclaim_usb3_bandwidth(tb, src_port, dst_port);
  648. break;
  649. default:
  650. /*
  651. * PCIe and DMA tunnels do not consume guaranteed
  652. * bandwidth.
  653. */
  654. break;
  655. }
  656. tb_tunnel_free(tunnel);
  657. }
  658. /*
  659. * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
  660. */
  661. static void tb_free_invalid_tunnels(struct tb *tb)
  662. {
  663. struct tb_cm *tcm = tb_priv(tb);
  664. struct tb_tunnel *tunnel;
  665. struct tb_tunnel *n;
  666. list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
  667. if (tb_tunnel_is_invalid(tunnel))
  668. tb_deactivate_and_free_tunnel(tunnel);
  669. }
  670. }
  671. /*
  672. * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
  673. */
  674. static void tb_free_unplugged_children(struct tb_switch *sw)
  675. {
  676. struct tb_port *port;
  677. tb_switch_for_each_port(sw, port) {
  678. if (!tb_port_has_remote(port))
  679. continue;
  680. if (port->remote->sw->is_unplugged) {
  681. tb_retimer_remove_all(port);
  682. tb_remove_dp_resources(port->remote->sw);
  683. tb_switch_unconfigure_link(port->remote->sw);
  684. tb_switch_lane_bonding_disable(port->remote->sw);
  685. tb_switch_remove(port->remote->sw);
  686. port->remote = NULL;
  687. if (port->dual_link_port)
  688. port->dual_link_port->remote = NULL;
  689. } else {
  690. tb_free_unplugged_children(port->remote->sw);
  691. }
  692. }
  693. }
  694. static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
  695. const struct tb_port *port)
  696. {
  697. struct tb_port *down = NULL;
  698. /*
  699. * To keep plugging devices consistently in the same PCIe
  700. * hierarchy, do mapping here for switch downstream PCIe ports.
  701. */
  702. if (tb_switch_is_usb4(sw)) {
  703. down = usb4_switch_map_pcie_down(sw, port);
  704. } else if (!tb_route(sw)) {
  705. int phy_port = tb_phy_port_from_link(port->port);
  706. int index;
  707. /*
  708. * Hard-coded Thunderbolt port to PCIe down port mapping
  709. * per controller.
  710. */
  711. if (tb_switch_is_cactus_ridge(sw) ||
  712. tb_switch_is_alpine_ridge(sw))
  713. index = !phy_port ? 6 : 7;
  714. else if (tb_switch_is_falcon_ridge(sw))
  715. index = !phy_port ? 6 : 8;
  716. else if (tb_switch_is_titan_ridge(sw))
  717. index = !phy_port ? 8 : 9;
  718. else
  719. goto out;
  720. /* Validate the hard-coding */
  721. if (WARN_ON(index > sw->config.max_port_number))
  722. goto out;
  723. down = &sw->ports[index];
  724. }
  725. if (down) {
  726. if (WARN_ON(!tb_port_is_pcie_down(down)))
  727. goto out;
  728. if (tb_pci_port_is_enabled(down))
  729. goto out;
  730. return down;
  731. }
  732. out:
  733. return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
  734. }
  735. static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
  736. {
  737. struct tb_port *host_port, *port;
  738. struct tb_cm *tcm = tb_priv(tb);
  739. host_port = tb_route(in->sw) ?
  740. tb_port_at(tb_route(in->sw), tb->root_switch) : NULL;
  741. list_for_each_entry(port, &tcm->dp_resources, list) {
  742. if (!tb_port_is_dpout(port))
  743. continue;
  744. if (tb_port_is_enabled(port)) {
  745. tb_port_dbg(port, "in use\n");
  746. continue;
  747. }
  748. tb_port_dbg(port, "DP OUT available\n");
  749. /*
  750. * Keep the DP tunnel under the topology starting from
  751. * the same host router downstream port.
  752. */
  753. if (host_port && tb_route(port->sw)) {
  754. struct tb_port *p;
  755. p = tb_port_at(tb_route(port->sw), tb->root_switch);
  756. if (p != host_port)
  757. continue;
  758. }
  759. return port;
  760. }
  761. return NULL;
  762. }
  763. static void tb_tunnel_dp(struct tb *tb)
  764. {
  765. int available_up, available_down, ret, link_nr;
  766. struct tb_cm *tcm = tb_priv(tb);
  767. struct tb_port *port, *in, *out;
  768. struct tb_tunnel *tunnel;
  769. if (!tb_acpi_may_tunnel_dp()) {
  770. tb_dbg(tb, "DP tunneling disabled, not creating tunnel\n");
  771. return;
  772. }
  773. /*
  774. * Find pair of inactive DP IN and DP OUT adapters and then
  775. * establish a DP tunnel between them.
  776. */
  777. tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
  778. in = NULL;
  779. out = NULL;
  780. list_for_each_entry(port, &tcm->dp_resources, list) {
  781. if (!tb_port_is_dpin(port))
  782. continue;
  783. if (tb_port_is_enabled(port)) {
  784. tb_port_dbg(port, "in use\n");
  785. continue;
  786. }
  787. tb_port_dbg(port, "DP IN available\n");
  788. out = tb_find_dp_out(tb, port);
  789. if (out) {
  790. in = port;
  791. break;
  792. }
  793. }
  794. if (!in) {
  795. tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
  796. return;
  797. }
  798. if (!out) {
  799. tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n");
  800. return;
  801. }
  802. /*
  803. * This is only applicable to links that are not bonded (so
  804. * when Thunderbolt 1 hardware is involved somewhere in the
  805. * topology). For these try to share the DP bandwidth between
  806. * the two lanes.
  807. */
  808. link_nr = 1;
  809. list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
  810. if (tb_tunnel_is_dp(tunnel)) {
  811. link_nr = 0;
  812. break;
  813. }
  814. }
  815. /*
  816. * DP stream needs the domain to be active so runtime resume
  817. * both ends of the tunnel.
  818. *
  819. * This should bring the routers in the middle active as well
  820. * and keeps the domain from runtime suspending while the DP
  821. * tunnel is active.
  822. */
  823. pm_runtime_get_sync(&in->sw->dev);
  824. pm_runtime_get_sync(&out->sw->dev);
  825. if (tb_switch_alloc_dp_resource(in->sw, in)) {
  826. tb_port_dbg(in, "no resource available for DP IN, not tunneling\n");
  827. goto err_rpm_put;
  828. }
  829. /* Make all unused USB3 bandwidth available for the new DP tunnel */
  830. ret = tb_release_unused_usb3_bandwidth(tb, in, out);
  831. if (ret) {
  832. tb_warn(tb, "failed to release unused bandwidth\n");
  833. goto err_dealloc_dp;
  834. }
  835. ret = tb_available_bandwidth(tb, in, out, &available_up,
  836. &available_down);
  837. if (ret)
  838. goto err_reclaim;
  839. tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n",
  840. available_up, available_down);
  841. tunnel = tb_tunnel_alloc_dp(tb, in, out, link_nr, available_up,
  842. available_down);
  843. if (!tunnel) {
  844. tb_port_dbg(out, "could not allocate DP tunnel\n");
  845. goto err_reclaim;
  846. }
  847. if (tb_tunnel_activate(tunnel)) {
  848. tb_port_info(out, "DP tunnel activation failed, aborting\n");
  849. goto err_free;
  850. }
  851. list_add_tail(&tunnel->list, &tcm->tunnel_list);
  852. tb_reclaim_usb3_bandwidth(tb, in, out);
  853. /*
  854. * In case of DP tunnel exists, change host router's 1st children
  855. * TMU mode to HiFi for CL0s to work.
  856. */
  857. tb_switch_enable_tmu_1st_child(tb->root_switch, TB_SWITCH_TMU_RATE_HIFI);
  858. return;
  859. err_free:
  860. tb_tunnel_free(tunnel);
  861. err_reclaim:
  862. tb_reclaim_usb3_bandwidth(tb, in, out);
  863. err_dealloc_dp:
  864. tb_switch_dealloc_dp_resource(in->sw, in);
  865. err_rpm_put:
  866. pm_runtime_mark_last_busy(&out->sw->dev);
  867. pm_runtime_put_autosuspend(&out->sw->dev);
  868. pm_runtime_mark_last_busy(&in->sw->dev);
  869. pm_runtime_put_autosuspend(&in->sw->dev);
  870. }
  871. static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
  872. {
  873. struct tb_port *in, *out;
  874. struct tb_tunnel *tunnel;
  875. if (tb_port_is_dpin(port)) {
  876. tb_port_dbg(port, "DP IN resource unavailable\n");
  877. in = port;
  878. out = NULL;
  879. } else {
  880. tb_port_dbg(port, "DP OUT resource unavailable\n");
  881. in = NULL;
  882. out = port;
  883. }
  884. tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out);
  885. tb_deactivate_and_free_tunnel(tunnel);
  886. list_del_init(&port->list);
  887. /*
  888. * See if there is another DP OUT port that can be used for
  889. * to create another tunnel.
  890. */
  891. tb_tunnel_dp(tb);
  892. }
  893. static void tb_dp_resource_available(struct tb *tb, struct tb_port *port)
  894. {
  895. struct tb_cm *tcm = tb_priv(tb);
  896. struct tb_port *p;
  897. if (tb_port_is_enabled(port))
  898. return;
  899. list_for_each_entry(p, &tcm->dp_resources, list) {
  900. if (p == port)
  901. return;
  902. }
  903. tb_port_dbg(port, "DP %s resource available\n",
  904. tb_port_is_dpin(port) ? "IN" : "OUT");
  905. list_add_tail(&port->list, &tcm->dp_resources);
  906. /* Look for suitable DP IN <-> DP OUT pairs now */
  907. tb_tunnel_dp(tb);
  908. }
  909. static void tb_disconnect_and_release_dp(struct tb *tb)
  910. {
  911. struct tb_cm *tcm = tb_priv(tb);
  912. struct tb_tunnel *tunnel, *n;
  913. /*
  914. * Tear down all DP tunnels and release their resources. They
  915. * will be re-established after resume based on plug events.
  916. */
  917. list_for_each_entry_safe_reverse(tunnel, n, &tcm->tunnel_list, list) {
  918. if (tb_tunnel_is_dp(tunnel))
  919. tb_deactivate_and_free_tunnel(tunnel);
  920. }
  921. while (!list_empty(&tcm->dp_resources)) {
  922. struct tb_port *port;
  923. port = list_first_entry(&tcm->dp_resources,
  924. struct tb_port, list);
  925. list_del_init(&port->list);
  926. }
  927. }
  928. static int tb_disconnect_pci(struct tb *tb, struct tb_switch *sw)
  929. {
  930. struct tb_tunnel *tunnel;
  931. struct tb_port *up;
  932. up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
  933. if (WARN_ON(!up))
  934. return -ENODEV;
  935. tunnel = tb_find_tunnel(tb, TB_TUNNEL_PCI, NULL, up);
  936. if (WARN_ON(!tunnel))
  937. return -ENODEV;
  938. tb_switch_xhci_disconnect(sw);
  939. tb_tunnel_deactivate(tunnel);
  940. list_del(&tunnel->list);
  941. tb_tunnel_free(tunnel);
  942. return 0;
  943. }
  944. static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
  945. {
  946. struct tb_port *up, *down, *port;
  947. struct tb_cm *tcm = tb_priv(tb);
  948. struct tb_switch *parent_sw;
  949. struct tb_tunnel *tunnel;
  950. up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
  951. if (!up)
  952. return 0;
  953. /*
  954. * Look up available down port. Since we are chaining it should
  955. * be found right above this switch.
  956. */
  957. parent_sw = tb_to_switch(sw->dev.parent);
  958. port = tb_port_at(tb_route(sw), parent_sw);
  959. down = tb_find_pcie_down(parent_sw, port);
  960. if (!down)
  961. return 0;
  962. tunnel = tb_tunnel_alloc_pci(tb, up, down);
  963. if (!tunnel)
  964. return -ENOMEM;
  965. if (tb_tunnel_activate(tunnel)) {
  966. tb_port_info(up,
  967. "PCIe tunnel activation failed, aborting\n");
  968. tb_tunnel_free(tunnel);
  969. return -EIO;
  970. }
  971. /*
  972. * PCIe L1 is needed to enable CL0s for Titan Ridge so enable it
  973. * here.
  974. */
  975. if (tb_switch_pcie_l1_enable(sw))
  976. tb_sw_warn(sw, "failed to enable PCIe L1 for Titan Ridge\n");
  977. if (tb_switch_xhci_connect(sw))
  978. tb_sw_warn(sw, "failed to connect xHCI\n");
  979. list_add_tail(&tunnel->list, &tcm->tunnel_list);
  980. return 0;
  981. }
  982. static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
  983. int transmit_path, int transmit_ring,
  984. int receive_path, int receive_ring)
  985. {
  986. struct tb_cm *tcm = tb_priv(tb);
  987. struct tb_port *nhi_port, *dst_port;
  988. struct tb_tunnel *tunnel;
  989. struct tb_switch *sw;
  990. sw = tb_to_switch(xd->dev.parent);
  991. dst_port = tb_port_at(xd->route, sw);
  992. nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
  993. mutex_lock(&tb->lock);
  994. tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, transmit_path,
  995. transmit_ring, receive_path, receive_ring);
  996. if (!tunnel) {
  997. mutex_unlock(&tb->lock);
  998. return -ENOMEM;
  999. }
  1000. if (tb_tunnel_activate(tunnel)) {
  1001. tb_port_info(nhi_port,
  1002. "DMA tunnel activation failed, aborting\n");
  1003. tb_tunnel_free(tunnel);
  1004. mutex_unlock(&tb->lock);
  1005. return -EIO;
  1006. }
  1007. list_add_tail(&tunnel->list, &tcm->tunnel_list);
  1008. mutex_unlock(&tb->lock);
  1009. return 0;
  1010. }
  1011. static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
  1012. int transmit_path, int transmit_ring,
  1013. int receive_path, int receive_ring)
  1014. {
  1015. struct tb_cm *tcm = tb_priv(tb);
  1016. struct tb_port *nhi_port, *dst_port;
  1017. struct tb_tunnel *tunnel, *n;
  1018. struct tb_switch *sw;
  1019. sw = tb_to_switch(xd->dev.parent);
  1020. dst_port = tb_port_at(xd->route, sw);
  1021. nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
  1022. list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
  1023. if (!tb_tunnel_is_dma(tunnel))
  1024. continue;
  1025. if (tunnel->src_port != nhi_port || tunnel->dst_port != dst_port)
  1026. continue;
  1027. if (tb_tunnel_match_dma(tunnel, transmit_path, transmit_ring,
  1028. receive_path, receive_ring))
  1029. tb_deactivate_and_free_tunnel(tunnel);
  1030. }
  1031. }
  1032. static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
  1033. int transmit_path, int transmit_ring,
  1034. int receive_path, int receive_ring)
  1035. {
  1036. if (!xd->is_unplugged) {
  1037. mutex_lock(&tb->lock);
  1038. __tb_disconnect_xdomain_paths(tb, xd, transmit_path,
  1039. transmit_ring, receive_path,
  1040. receive_ring);
  1041. mutex_unlock(&tb->lock);
  1042. }
  1043. return 0;
  1044. }
  1045. /* hotplug handling */
  1046. /*
  1047. * tb_handle_hotplug() - handle hotplug event
  1048. *
  1049. * Executes on tb->wq.
  1050. */
  1051. static void tb_handle_hotplug(struct work_struct *work)
  1052. {
  1053. struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
  1054. struct tb *tb = ev->tb;
  1055. struct tb_cm *tcm = tb_priv(tb);
  1056. struct tb_switch *sw;
  1057. struct tb_port *port;
  1058. /* Bring the domain back from sleep if it was suspended */
  1059. pm_runtime_get_sync(&tb->dev);
  1060. mutex_lock(&tb->lock);
  1061. if (!tcm->hotplug_active)
  1062. goto out; /* during init, suspend or shutdown */
  1063. sw = tb_switch_find_by_route(tb, ev->route);
  1064. if (!sw) {
  1065. tb_warn(tb,
  1066. "hotplug event from non existent switch %llx:%x (unplug: %d)\n",
  1067. ev->route, ev->port, ev->unplug);
  1068. goto out;
  1069. }
  1070. if (ev->port > sw->config.max_port_number) {
  1071. tb_warn(tb,
  1072. "hotplug event from non existent port %llx:%x (unplug: %d)\n",
  1073. ev->route, ev->port, ev->unplug);
  1074. goto put_sw;
  1075. }
  1076. port = &sw->ports[ev->port];
  1077. if (tb_is_upstream_port(port)) {
  1078. tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
  1079. ev->route, ev->port, ev->unplug);
  1080. goto put_sw;
  1081. }
  1082. pm_runtime_get_sync(&sw->dev);
  1083. if (ev->unplug) {
  1084. tb_retimer_remove_all(port);
  1085. if (tb_port_has_remote(port)) {
  1086. tb_port_dbg(port, "switch unplugged\n");
  1087. tb_sw_set_unplugged(port->remote->sw);
  1088. tb_free_invalid_tunnels(tb);
  1089. tb_remove_dp_resources(port->remote->sw);
  1090. tb_switch_tmu_disable(port->remote->sw);
  1091. tb_switch_unconfigure_link(port->remote->sw);
  1092. tb_switch_lane_bonding_disable(port->remote->sw);
  1093. tb_switch_remove(port->remote->sw);
  1094. port->remote = NULL;
  1095. if (port->dual_link_port)
  1096. port->dual_link_port->remote = NULL;
  1097. /* Maybe we can create another DP tunnel */
  1098. tb_tunnel_dp(tb);
  1099. } else if (port->xdomain) {
  1100. struct tb_xdomain *xd = tb_xdomain_get(port->xdomain);
  1101. tb_port_dbg(port, "xdomain unplugged\n");
  1102. /*
  1103. * Service drivers are unbound during
  1104. * tb_xdomain_remove() so setting XDomain as
  1105. * unplugged here prevents deadlock if they call
  1106. * tb_xdomain_disable_paths(). We will tear down
  1107. * all the tunnels below.
  1108. */
  1109. xd->is_unplugged = true;
  1110. tb_xdomain_remove(xd);
  1111. port->xdomain = NULL;
  1112. __tb_disconnect_xdomain_paths(tb, xd, -1, -1, -1, -1);
  1113. tb_xdomain_put(xd);
  1114. tb_port_unconfigure_xdomain(port);
  1115. } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
  1116. tb_dp_resource_unavailable(tb, port);
  1117. } else if (!port->port) {
  1118. tb_sw_dbg(sw, "xHCI disconnect request\n");
  1119. tb_switch_xhci_disconnect(sw);
  1120. } else {
  1121. tb_port_dbg(port,
  1122. "got unplug event for disconnected port, ignoring\n");
  1123. }
  1124. } else if (port->remote) {
  1125. tb_port_dbg(port, "got plug event for connected port, ignoring\n");
  1126. } else if (!port->port && sw->authorized) {
  1127. tb_sw_dbg(sw, "xHCI connect request\n");
  1128. tb_switch_xhci_connect(sw);
  1129. } else {
  1130. if (tb_port_is_null(port)) {
  1131. tb_port_dbg(port, "hotplug: scanning\n");
  1132. tb_scan_port(port);
  1133. if (!port->remote)
  1134. tb_port_dbg(port, "hotplug: no switch found\n");
  1135. } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
  1136. tb_dp_resource_available(tb, port);
  1137. }
  1138. }
  1139. pm_runtime_mark_last_busy(&sw->dev);
  1140. pm_runtime_put_autosuspend(&sw->dev);
  1141. put_sw:
  1142. tb_switch_put(sw);
  1143. out:
  1144. mutex_unlock(&tb->lock);
  1145. pm_runtime_mark_last_busy(&tb->dev);
  1146. pm_runtime_put_autosuspend(&tb->dev);
  1147. kfree(ev);
  1148. }
  1149. /*
  1150. * tb_schedule_hotplug_handler() - callback function for the control channel
  1151. *
  1152. * Delegates to tb_handle_hotplug.
  1153. */
  1154. static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
  1155. const void *buf, size_t size)
  1156. {
  1157. const struct cfg_event_pkg *pkg = buf;
  1158. u64 route;
  1159. if (type != TB_CFG_PKG_EVENT) {
  1160. tb_warn(tb, "unexpected event %#x, ignoring\n", type);
  1161. return;
  1162. }
  1163. route = tb_cfg_get_route(&pkg->header);
  1164. if (tb_cfg_ack_plug(tb->ctl, route, pkg->port, pkg->unplug)) {
  1165. tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
  1166. pkg->port);
  1167. }
  1168. tb_queue_hotplug(tb, route, pkg->port, pkg->unplug);
  1169. }
  1170. static void tb_stop(struct tb *tb)
  1171. {
  1172. struct tb_cm *tcm = tb_priv(tb);
  1173. struct tb_tunnel *tunnel;
  1174. struct tb_tunnel *n;
  1175. cancel_delayed_work(&tcm->remove_work);
  1176. /* tunnels are only present after everything has been initialized */
  1177. list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
  1178. /*
  1179. * DMA tunnels require the driver to be functional so we
  1180. * tear them down. Other protocol tunnels can be left
  1181. * intact.
  1182. */
  1183. if (tb_tunnel_is_dma(tunnel))
  1184. tb_tunnel_deactivate(tunnel);
  1185. tb_tunnel_free(tunnel);
  1186. }
  1187. tb_switch_remove(tb->root_switch);
  1188. tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
  1189. }
  1190. static int tb_scan_finalize_switch(struct device *dev, void *data)
  1191. {
  1192. if (tb_is_switch(dev)) {
  1193. struct tb_switch *sw = tb_to_switch(dev);
  1194. /*
  1195. * If we found that the switch was already setup by the
  1196. * boot firmware, mark it as authorized now before we
  1197. * send uevent to userspace.
  1198. */
  1199. if (sw->boot)
  1200. sw->authorized = 1;
  1201. dev_set_uevent_suppress(dev, false);
  1202. kobject_uevent(&dev->kobj, KOBJ_ADD);
  1203. device_for_each_child(dev, NULL, tb_scan_finalize_switch);
  1204. }
  1205. return 0;
  1206. }
  1207. static int tb_start(struct tb *tb)
  1208. {
  1209. struct tb_cm *tcm = tb_priv(tb);
  1210. int ret;
  1211. tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
  1212. if (IS_ERR(tb->root_switch))
  1213. return PTR_ERR(tb->root_switch);
  1214. /*
  1215. * ICM firmware upgrade needs running firmware and in native
  1216. * mode that is not available so disable firmware upgrade of the
  1217. * root switch.
  1218. *
  1219. * However, USB4 routers support NVM firmware upgrade if they
  1220. * implement the necessary router operations.
  1221. */
  1222. tb->root_switch->no_nvm_upgrade = !tb_switch_is_usb4(tb->root_switch);
  1223. /* All USB4 routers support runtime PM */
  1224. tb->root_switch->rpm = tb_switch_is_usb4(tb->root_switch);
  1225. ret = tb_switch_configure(tb->root_switch);
  1226. if (ret) {
  1227. tb_switch_put(tb->root_switch);
  1228. return ret;
  1229. }
  1230. /* Announce the switch to the world */
  1231. ret = tb_switch_add(tb->root_switch);
  1232. if (ret) {
  1233. tb_switch_put(tb->root_switch);
  1234. return ret;
  1235. }
  1236. /*
  1237. * To support highest CLx state, we set host router's TMU to
  1238. * Normal mode.
  1239. */
  1240. tb_switch_tmu_configure(tb->root_switch, TB_SWITCH_TMU_RATE_NORMAL,
  1241. false);
  1242. /* Enable TMU if it is off */
  1243. tb_switch_tmu_enable(tb->root_switch);
  1244. /* Full scan to discover devices added before the driver was loaded. */
  1245. tb_scan_switch(tb->root_switch);
  1246. /* Find out tunnels created by the boot firmware */
  1247. tb_discover_tunnels(tb);
  1248. /* Add DP resources from the DP tunnels created by the boot firmware */
  1249. tb_discover_dp_resources(tb);
  1250. /*
  1251. * If the boot firmware did not create USB 3.x tunnels create them
  1252. * now for the whole topology.
  1253. */
  1254. tb_create_usb3_tunnels(tb->root_switch);
  1255. /* Add DP IN resources for the root switch */
  1256. tb_add_dp_resources(tb->root_switch);
  1257. /* Make the discovered switches available to the userspace */
  1258. device_for_each_child(&tb->root_switch->dev, NULL,
  1259. tb_scan_finalize_switch);
  1260. /* Allow tb_handle_hotplug to progress events */
  1261. tcm->hotplug_active = true;
  1262. return 0;
  1263. }
  1264. static int tb_suspend_noirq(struct tb *tb)
  1265. {
  1266. struct tb_cm *tcm = tb_priv(tb);
  1267. tb_dbg(tb, "suspending...\n");
  1268. tb_disconnect_and_release_dp(tb);
  1269. tb_switch_suspend(tb->root_switch, false);
  1270. tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
  1271. tb_dbg(tb, "suspend finished\n");
  1272. return 0;
  1273. }
  1274. static void tb_restore_children(struct tb_switch *sw)
  1275. {
  1276. struct tb_port *port;
  1277. int ret;
  1278. /* No need to restore if the router is already unplugged */
  1279. if (sw->is_unplugged)
  1280. return;
  1281. /*
  1282. * CL0s and CL1 are enabled and supported together.
  1283. * Silently ignore CLx re-enabling in case CLx is not supported.
  1284. */
  1285. ret = tb_switch_enable_clx(sw, TB_CL1);
  1286. if (ret && ret != -EOPNOTSUPP)
  1287. tb_sw_warn(sw, "failed to re-enable %s on upstream port\n",
  1288. tb_switch_clx_name(TB_CL1));
  1289. if (tb_switch_is_clx_enabled(sw, TB_CL1))
  1290. /*
  1291. * To support highest CLx state, we set router's TMU to
  1292. * Normal-Uni mode.
  1293. */
  1294. tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_NORMAL, true);
  1295. else
  1296. /* If CLx disabled, configure router's TMU to HiFi-Bidir mode*/
  1297. tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_HIFI, false);
  1298. if (tb_enable_tmu(sw))
  1299. tb_sw_warn(sw, "failed to restore TMU configuration\n");
  1300. tb_switch_for_each_port(sw, port) {
  1301. if (!tb_port_has_remote(port) && !port->xdomain)
  1302. continue;
  1303. if (port->remote) {
  1304. tb_switch_lane_bonding_enable(port->remote->sw);
  1305. tb_switch_configure_link(port->remote->sw);
  1306. tb_restore_children(port->remote->sw);
  1307. } else if (port->xdomain) {
  1308. tb_port_configure_xdomain(port, port->xdomain);
  1309. }
  1310. }
  1311. }
  1312. static int tb_resume_noirq(struct tb *tb)
  1313. {
  1314. struct tb_cm *tcm = tb_priv(tb);
  1315. struct tb_tunnel *tunnel, *n;
  1316. unsigned int usb3_delay = 0;
  1317. LIST_HEAD(tunnels);
  1318. tb_dbg(tb, "resuming...\n");
  1319. /* remove any pci devices the firmware might have setup */
  1320. tb_switch_reset(tb->root_switch);
  1321. tb_switch_resume(tb->root_switch);
  1322. tb_free_invalid_tunnels(tb);
  1323. tb_free_unplugged_children(tb->root_switch);
  1324. tb_restore_children(tb->root_switch);
  1325. /*
  1326. * If we get here from suspend to disk the boot firmware or the
  1327. * restore kernel might have created tunnels of its own. Since
  1328. * we cannot be sure they are usable for us we find and tear
  1329. * them down.
  1330. */
  1331. tb_switch_discover_tunnels(tb->root_switch, &tunnels, false);
  1332. list_for_each_entry_safe_reverse(tunnel, n, &tunnels, list) {
  1333. if (tb_tunnel_is_usb3(tunnel))
  1334. usb3_delay = 500;
  1335. tb_tunnel_deactivate(tunnel);
  1336. tb_tunnel_free(tunnel);
  1337. }
  1338. /* Re-create our tunnels now */
  1339. list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
  1340. /* USB3 requires delay before it can be re-activated */
  1341. if (tb_tunnel_is_usb3(tunnel)) {
  1342. msleep(usb3_delay);
  1343. /* Only need to do it once */
  1344. usb3_delay = 0;
  1345. }
  1346. tb_tunnel_restart(tunnel);
  1347. }
  1348. if (!list_empty(&tcm->tunnel_list)) {
  1349. /*
  1350. * the pcie links need some time to get going.
  1351. * 100ms works for me...
  1352. */
  1353. tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
  1354. msleep(100);
  1355. }
  1356. /* Allow tb_handle_hotplug to progress events */
  1357. tcm->hotplug_active = true;
  1358. tb_dbg(tb, "resume finished\n");
  1359. return 0;
  1360. }
  1361. static int tb_free_unplugged_xdomains(struct tb_switch *sw)
  1362. {
  1363. struct tb_port *port;
  1364. int ret = 0;
  1365. tb_switch_for_each_port(sw, port) {
  1366. if (tb_is_upstream_port(port))
  1367. continue;
  1368. if (port->xdomain && port->xdomain->is_unplugged) {
  1369. tb_retimer_remove_all(port);
  1370. tb_xdomain_remove(port->xdomain);
  1371. tb_port_unconfigure_xdomain(port);
  1372. port->xdomain = NULL;
  1373. ret++;
  1374. } else if (port->remote) {
  1375. ret += tb_free_unplugged_xdomains(port->remote->sw);
  1376. }
  1377. }
  1378. return ret;
  1379. }
  1380. static int tb_freeze_noirq(struct tb *tb)
  1381. {
  1382. struct tb_cm *tcm = tb_priv(tb);
  1383. tcm->hotplug_active = false;
  1384. return 0;
  1385. }
  1386. static int tb_thaw_noirq(struct tb *tb)
  1387. {
  1388. struct tb_cm *tcm = tb_priv(tb);
  1389. tcm->hotplug_active = true;
  1390. return 0;
  1391. }
  1392. static void tb_complete(struct tb *tb)
  1393. {
  1394. /*
  1395. * Release any unplugged XDomains and if there is a case where
  1396. * another domain is swapped in place of unplugged XDomain we
  1397. * need to run another rescan.
  1398. */
  1399. mutex_lock(&tb->lock);
  1400. if (tb_free_unplugged_xdomains(tb->root_switch))
  1401. tb_scan_switch(tb->root_switch);
  1402. mutex_unlock(&tb->lock);
  1403. }
  1404. static int tb_runtime_suspend(struct tb *tb)
  1405. {
  1406. struct tb_cm *tcm = tb_priv(tb);
  1407. mutex_lock(&tb->lock);
  1408. tb_switch_suspend(tb->root_switch, true);
  1409. tcm->hotplug_active = false;
  1410. mutex_unlock(&tb->lock);
  1411. return 0;
  1412. }
  1413. static void tb_remove_work(struct work_struct *work)
  1414. {
  1415. struct tb_cm *tcm = container_of(work, struct tb_cm, remove_work.work);
  1416. struct tb *tb = tcm_to_tb(tcm);
  1417. mutex_lock(&tb->lock);
  1418. if (tb->root_switch) {
  1419. tb_free_unplugged_children(tb->root_switch);
  1420. tb_free_unplugged_xdomains(tb->root_switch);
  1421. }
  1422. mutex_unlock(&tb->lock);
  1423. }
  1424. static int tb_runtime_resume(struct tb *tb)
  1425. {
  1426. struct tb_cm *tcm = tb_priv(tb);
  1427. struct tb_tunnel *tunnel, *n;
  1428. mutex_lock(&tb->lock);
  1429. tb_switch_resume(tb->root_switch);
  1430. tb_free_invalid_tunnels(tb);
  1431. tb_restore_children(tb->root_switch);
  1432. list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
  1433. tb_tunnel_restart(tunnel);
  1434. tcm->hotplug_active = true;
  1435. mutex_unlock(&tb->lock);
  1436. /*
  1437. * Schedule cleanup of any unplugged devices. Run this in a
  1438. * separate thread to avoid possible deadlock if the device
  1439. * removal runtime resumes the unplugged device.
  1440. */
  1441. queue_delayed_work(tb->wq, &tcm->remove_work, msecs_to_jiffies(50));
  1442. return 0;
  1443. }
  1444. static const struct tb_cm_ops tb_cm_ops = {
  1445. .start = tb_start,
  1446. .stop = tb_stop,
  1447. .suspend_noirq = tb_suspend_noirq,
  1448. .resume_noirq = tb_resume_noirq,
  1449. .freeze_noirq = tb_freeze_noirq,
  1450. .thaw_noirq = tb_thaw_noirq,
  1451. .complete = tb_complete,
  1452. .runtime_suspend = tb_runtime_suspend,
  1453. .runtime_resume = tb_runtime_resume,
  1454. .handle_event = tb_handle_event,
  1455. .disapprove_switch = tb_disconnect_pci,
  1456. .approve_switch = tb_tunnel_pci,
  1457. .approve_xdomain_paths = tb_approve_xdomain_paths,
  1458. .disconnect_xdomain_paths = tb_disconnect_xdomain_paths,
  1459. };
  1460. /*
  1461. * During suspend the Thunderbolt controller is reset and all PCIe
  1462. * tunnels are lost. The NHI driver will try to reestablish all tunnels
  1463. * during resume. This adds device links between the tunneled PCIe
  1464. * downstream ports and the NHI so that the device core will make sure
  1465. * NHI is resumed first before the rest.
  1466. */
  1467. static void tb_apple_add_links(struct tb_nhi *nhi)
  1468. {
  1469. struct pci_dev *upstream, *pdev;
  1470. if (!x86_apple_machine)
  1471. return;
  1472. switch (nhi->pdev->device) {
  1473. case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
  1474. case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
  1475. case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
  1476. case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
  1477. break;
  1478. default:
  1479. return;
  1480. }
  1481. upstream = pci_upstream_bridge(nhi->pdev);
  1482. while (upstream) {
  1483. if (!pci_is_pcie(upstream))
  1484. return;
  1485. if (pci_pcie_type(upstream) == PCI_EXP_TYPE_UPSTREAM)
  1486. break;
  1487. upstream = pci_upstream_bridge(upstream);
  1488. }
  1489. if (!upstream)
  1490. return;
  1491. /*
  1492. * For each hotplug downstream port, create add device link
  1493. * back to NHI so that PCIe tunnels can be re-established after
  1494. * sleep.
  1495. */
  1496. for_each_pci_bridge(pdev, upstream->subordinate) {
  1497. const struct device_link *link;
  1498. if (!pci_is_pcie(pdev))
  1499. continue;
  1500. if (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM ||
  1501. !pdev->is_hotplug_bridge)
  1502. continue;
  1503. link = device_link_add(&pdev->dev, &nhi->pdev->dev,
  1504. DL_FLAG_AUTOREMOVE_SUPPLIER |
  1505. DL_FLAG_PM_RUNTIME);
  1506. if (link) {
  1507. dev_dbg(&nhi->pdev->dev, "created link from %s\n",
  1508. dev_name(&pdev->dev));
  1509. } else {
  1510. dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n",
  1511. dev_name(&pdev->dev));
  1512. }
  1513. }
  1514. }
  1515. struct tb *tb_probe(struct tb_nhi *nhi)
  1516. {
  1517. struct tb_cm *tcm;
  1518. struct tb *tb;
  1519. tb = tb_domain_alloc(nhi, TB_TIMEOUT, sizeof(*tcm));
  1520. if (!tb)
  1521. return NULL;
  1522. if (tb_acpi_may_tunnel_pcie())
  1523. tb->security_level = TB_SECURITY_USER;
  1524. else
  1525. tb->security_level = TB_SECURITY_NOPCIE;
  1526. tb->cm_ops = &tb_cm_ops;
  1527. tcm = tb_priv(tb);
  1528. INIT_LIST_HEAD(&tcm->tunnel_list);
  1529. INIT_LIST_HEAD(&tcm->dp_resources);
  1530. INIT_DELAYED_WORK(&tcm->remove_work, tb_remove_work);
  1531. tb_dbg(tb, "using software connection manager\n");
  1532. tb_apple_add_links(nhi);
  1533. tb_acpi_add_links(nhi);
  1534. return tb;
  1535. }