tmu.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Thunderbolt Time Management Unit (TMU) support
  4. *
  5. * Copyright (C) 2019, Intel Corporation
  6. * Authors: Mika Westerberg <[email protected]>
  7. * Rajmohan Mani <[email protected]>
  8. */
  9. #include <linux/delay.h>
  10. #include "tb.h"
  11. static int tb_switch_set_tmu_mode_params(struct tb_switch *sw,
  12. enum tb_switch_tmu_rate rate)
  13. {
  14. u32 freq_meas_wind[2] = { 30, 800 };
  15. u32 avg_const[2] = { 4, 8 };
  16. u32 freq, avg, val;
  17. int ret;
  18. if (rate == TB_SWITCH_TMU_RATE_NORMAL) {
  19. freq = freq_meas_wind[0];
  20. avg = avg_const[0];
  21. } else if (rate == TB_SWITCH_TMU_RATE_HIFI) {
  22. freq = freq_meas_wind[1];
  23. avg = avg_const[1];
  24. } else {
  25. return 0;
  26. }
  27. ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
  28. sw->tmu.cap + TMU_RTR_CS_0, 1);
  29. if (ret)
  30. return ret;
  31. val &= ~TMU_RTR_CS_0_FREQ_WIND_MASK;
  32. val |= FIELD_PREP(TMU_RTR_CS_0_FREQ_WIND_MASK, freq);
  33. ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
  34. sw->tmu.cap + TMU_RTR_CS_0, 1);
  35. if (ret)
  36. return ret;
  37. ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
  38. sw->tmu.cap + TMU_RTR_CS_15, 1);
  39. if (ret)
  40. return ret;
  41. val &= ~TMU_RTR_CS_15_FREQ_AVG_MASK &
  42. ~TMU_RTR_CS_15_DELAY_AVG_MASK &
  43. ~TMU_RTR_CS_15_OFFSET_AVG_MASK &
  44. ~TMU_RTR_CS_15_ERROR_AVG_MASK;
  45. val |= FIELD_PREP(TMU_RTR_CS_15_FREQ_AVG_MASK, avg) |
  46. FIELD_PREP(TMU_RTR_CS_15_DELAY_AVG_MASK, avg) |
  47. FIELD_PREP(TMU_RTR_CS_15_OFFSET_AVG_MASK, avg) |
  48. FIELD_PREP(TMU_RTR_CS_15_ERROR_AVG_MASK, avg);
  49. return tb_sw_write(sw, &val, TB_CFG_SWITCH,
  50. sw->tmu.cap + TMU_RTR_CS_15, 1);
  51. }
  52. static const char *tb_switch_tmu_mode_name(const struct tb_switch *sw)
  53. {
  54. bool root_switch = !tb_route(sw);
  55. switch (sw->tmu.rate) {
  56. case TB_SWITCH_TMU_RATE_OFF:
  57. return "off";
  58. case TB_SWITCH_TMU_RATE_HIFI:
  59. /* Root switch does not have upstream directionality */
  60. if (root_switch)
  61. return "HiFi";
  62. if (sw->tmu.unidirectional)
  63. return "uni-directional, HiFi";
  64. return "bi-directional, HiFi";
  65. case TB_SWITCH_TMU_RATE_NORMAL:
  66. if (root_switch)
  67. return "normal";
  68. return "uni-directional, normal";
  69. default:
  70. return "unknown";
  71. }
  72. }
  73. static bool tb_switch_tmu_ucap_supported(struct tb_switch *sw)
  74. {
  75. int ret;
  76. u32 val;
  77. ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
  78. sw->tmu.cap + TMU_RTR_CS_0, 1);
  79. if (ret)
  80. return false;
  81. return !!(val & TMU_RTR_CS_0_UCAP);
  82. }
  83. static int tb_switch_tmu_rate_read(struct tb_switch *sw)
  84. {
  85. int ret;
  86. u32 val;
  87. ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
  88. sw->tmu.cap + TMU_RTR_CS_3, 1);
  89. if (ret)
  90. return ret;
  91. val >>= TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT;
  92. return val;
  93. }
  94. static int tb_switch_tmu_rate_write(struct tb_switch *sw, int rate)
  95. {
  96. int ret;
  97. u32 val;
  98. ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
  99. sw->tmu.cap + TMU_RTR_CS_3, 1);
  100. if (ret)
  101. return ret;
  102. val &= ~TMU_RTR_CS_3_TS_PACKET_INTERVAL_MASK;
  103. val |= rate << TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT;
  104. return tb_sw_write(sw, &val, TB_CFG_SWITCH,
  105. sw->tmu.cap + TMU_RTR_CS_3, 1);
  106. }
  107. static int tb_port_tmu_write(struct tb_port *port, u8 offset, u32 mask,
  108. u32 value)
  109. {
  110. u32 data;
  111. int ret;
  112. ret = tb_port_read(port, &data, TB_CFG_PORT, port->cap_tmu + offset, 1);
  113. if (ret)
  114. return ret;
  115. data &= ~mask;
  116. data |= value;
  117. return tb_port_write(port, &data, TB_CFG_PORT,
  118. port->cap_tmu + offset, 1);
  119. }
  120. static int tb_port_tmu_set_unidirectional(struct tb_port *port,
  121. bool unidirectional)
  122. {
  123. u32 val;
  124. if (!port->sw->tmu.has_ucap)
  125. return 0;
  126. val = unidirectional ? TMU_ADP_CS_3_UDM : 0;
  127. return tb_port_tmu_write(port, TMU_ADP_CS_3, TMU_ADP_CS_3_UDM, val);
  128. }
  129. static inline int tb_port_tmu_unidirectional_disable(struct tb_port *port)
  130. {
  131. return tb_port_tmu_set_unidirectional(port, false);
  132. }
  133. static inline int tb_port_tmu_unidirectional_enable(struct tb_port *port)
  134. {
  135. return tb_port_tmu_set_unidirectional(port, true);
  136. }
  137. static bool tb_port_tmu_is_unidirectional(struct tb_port *port)
  138. {
  139. int ret;
  140. u32 val;
  141. ret = tb_port_read(port, &val, TB_CFG_PORT,
  142. port->cap_tmu + TMU_ADP_CS_3, 1);
  143. if (ret)
  144. return false;
  145. return val & TMU_ADP_CS_3_UDM;
  146. }
  147. static int tb_port_tmu_time_sync(struct tb_port *port, bool time_sync)
  148. {
  149. u32 val = time_sync ? TMU_ADP_CS_6_DTS : 0;
  150. return tb_port_tmu_write(port, TMU_ADP_CS_6, TMU_ADP_CS_6_DTS, val);
  151. }
  152. static int tb_port_tmu_time_sync_disable(struct tb_port *port)
  153. {
  154. return tb_port_tmu_time_sync(port, true);
  155. }
  156. static int tb_port_tmu_time_sync_enable(struct tb_port *port)
  157. {
  158. return tb_port_tmu_time_sync(port, false);
  159. }
  160. static int tb_switch_tmu_set_time_disruption(struct tb_switch *sw, bool set)
  161. {
  162. u32 val, offset, bit;
  163. int ret;
  164. if (tb_switch_is_usb4(sw)) {
  165. offset = sw->tmu.cap + TMU_RTR_CS_0;
  166. bit = TMU_RTR_CS_0_TD;
  167. } else {
  168. offset = sw->cap_vsec_tmu + TB_TIME_VSEC_3_CS_26;
  169. bit = TB_TIME_VSEC_3_CS_26_TD;
  170. }
  171. ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1);
  172. if (ret)
  173. return ret;
  174. if (set)
  175. val |= bit;
  176. else
  177. val &= ~bit;
  178. return tb_sw_write(sw, &val, TB_CFG_SWITCH, offset, 1);
  179. }
  180. /**
  181. * tb_switch_tmu_init() - Initialize switch TMU structures
  182. * @sw: Switch to initialized
  183. *
  184. * This function must be called before other TMU related functions to
  185. * makes the internal structures are filled in correctly. Does not
  186. * change any hardware configuration.
  187. */
  188. int tb_switch_tmu_init(struct tb_switch *sw)
  189. {
  190. struct tb_port *port;
  191. int ret;
  192. if (tb_switch_is_icm(sw))
  193. return 0;
  194. ret = tb_switch_find_cap(sw, TB_SWITCH_CAP_TMU);
  195. if (ret > 0)
  196. sw->tmu.cap = ret;
  197. tb_switch_for_each_port(sw, port) {
  198. int cap;
  199. cap = tb_port_find_cap(port, TB_PORT_CAP_TIME1);
  200. if (cap > 0)
  201. port->cap_tmu = cap;
  202. }
  203. ret = tb_switch_tmu_rate_read(sw);
  204. if (ret < 0)
  205. return ret;
  206. sw->tmu.rate = ret;
  207. sw->tmu.has_ucap = tb_switch_tmu_ucap_supported(sw);
  208. if (sw->tmu.has_ucap) {
  209. tb_sw_dbg(sw, "TMU: supports uni-directional mode\n");
  210. if (tb_route(sw)) {
  211. struct tb_port *up = tb_upstream_port(sw);
  212. sw->tmu.unidirectional =
  213. tb_port_tmu_is_unidirectional(up);
  214. }
  215. } else {
  216. sw->tmu.unidirectional = false;
  217. }
  218. tb_sw_dbg(sw, "TMU: current mode: %s\n", tb_switch_tmu_mode_name(sw));
  219. return 0;
  220. }
  221. /**
  222. * tb_switch_tmu_post_time() - Update switch local time
  223. * @sw: Switch whose time to update
  224. *
  225. * Updates switch local time using time posting procedure.
  226. */
  227. int tb_switch_tmu_post_time(struct tb_switch *sw)
  228. {
  229. unsigned int post_time_high_offset, post_time_high = 0;
  230. unsigned int post_local_time_offset, post_time_offset;
  231. struct tb_switch *root_switch = sw->tb->root_switch;
  232. u64 hi, mid, lo, local_time, post_time;
  233. int i, ret, retries = 100;
  234. u32 gm_local_time[3];
  235. if (!tb_route(sw))
  236. return 0;
  237. if (!tb_switch_is_usb4(sw))
  238. return 0;
  239. /* Need to be able to read the grand master time */
  240. if (!root_switch->tmu.cap)
  241. return 0;
  242. ret = tb_sw_read(root_switch, gm_local_time, TB_CFG_SWITCH,
  243. root_switch->tmu.cap + TMU_RTR_CS_1,
  244. ARRAY_SIZE(gm_local_time));
  245. if (ret)
  246. return ret;
  247. for (i = 0; i < ARRAY_SIZE(gm_local_time); i++)
  248. tb_sw_dbg(root_switch, "local_time[%d]=0x%08x\n", i,
  249. gm_local_time[i]);
  250. /* Convert to nanoseconds (drop fractional part) */
  251. hi = gm_local_time[2] & TMU_RTR_CS_3_LOCAL_TIME_NS_MASK;
  252. mid = gm_local_time[1];
  253. lo = (gm_local_time[0] & TMU_RTR_CS_1_LOCAL_TIME_NS_MASK) >>
  254. TMU_RTR_CS_1_LOCAL_TIME_NS_SHIFT;
  255. local_time = hi << 48 | mid << 16 | lo;
  256. /* Tell the switch that time sync is disrupted for a while */
  257. ret = tb_switch_tmu_set_time_disruption(sw, true);
  258. if (ret)
  259. return ret;
  260. post_local_time_offset = sw->tmu.cap + TMU_RTR_CS_22;
  261. post_time_offset = sw->tmu.cap + TMU_RTR_CS_24;
  262. post_time_high_offset = sw->tmu.cap + TMU_RTR_CS_25;
  263. /*
  264. * Write the Grandmaster time to the Post Local Time registers
  265. * of the new switch.
  266. */
  267. ret = tb_sw_write(sw, &local_time, TB_CFG_SWITCH,
  268. post_local_time_offset, 2);
  269. if (ret)
  270. goto out;
  271. /*
  272. * Have the new switch update its local time by:
  273. * 1) writing 0x1 to the Post Time Low register and 0xffffffff to
  274. * Post Time High register.
  275. * 2) write 0 to Post Time High register and then wait for
  276. * the completion of the post_time register becomes 0.
  277. * This means the time has been converged properly.
  278. */
  279. post_time = 0xffffffff00000001ULL;
  280. ret = tb_sw_write(sw, &post_time, TB_CFG_SWITCH, post_time_offset, 2);
  281. if (ret)
  282. goto out;
  283. ret = tb_sw_write(sw, &post_time_high, TB_CFG_SWITCH,
  284. post_time_high_offset, 1);
  285. if (ret)
  286. goto out;
  287. do {
  288. usleep_range(5, 10);
  289. ret = tb_sw_read(sw, &post_time, TB_CFG_SWITCH,
  290. post_time_offset, 2);
  291. if (ret)
  292. goto out;
  293. } while (--retries && post_time);
  294. if (!retries) {
  295. ret = -ETIMEDOUT;
  296. goto out;
  297. }
  298. tb_sw_dbg(sw, "TMU: updated local time to %#llx\n", local_time);
  299. out:
  300. tb_switch_tmu_set_time_disruption(sw, false);
  301. return ret;
  302. }
  303. /**
  304. * tb_switch_tmu_disable() - Disable TMU of a switch
  305. * @sw: Switch whose TMU to disable
  306. *
  307. * Turns off TMU of @sw if it is enabled. If not enabled does nothing.
  308. */
  309. int tb_switch_tmu_disable(struct tb_switch *sw)
  310. {
  311. /*
  312. * No need to disable TMU on devices that don't support CLx since
  313. * on these devices e.g. Alpine Ridge and earlier, the TMU mode
  314. * HiFi bi-directional is enabled by default and we don't change it.
  315. */
  316. if (!tb_switch_is_clx_supported(sw))
  317. return 0;
  318. /* Already disabled? */
  319. if (sw->tmu.rate == TB_SWITCH_TMU_RATE_OFF)
  320. return 0;
  321. if (tb_route(sw)) {
  322. bool unidirectional = sw->tmu.unidirectional;
  323. struct tb_switch *parent = tb_switch_parent(sw);
  324. struct tb_port *down, *up;
  325. int ret;
  326. down = tb_port_at(tb_route(sw), parent);
  327. up = tb_upstream_port(sw);
  328. /*
  329. * In case of uni-directional time sync, TMU handshake is
  330. * initiated by upstream router. In case of bi-directional
  331. * time sync, TMU handshake is initiated by downstream router.
  332. * We change downstream router's rate to off for both uni/bidir
  333. * cases although it is needed only for the bi-directional mode.
  334. * We avoid changing upstream router's mode since it might
  335. * have another downstream router plugged, that is set to
  336. * uni-directional mode and we don't want to change it's TMU
  337. * mode.
  338. */
  339. ret = tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_OFF);
  340. if (ret)
  341. return ret;
  342. tb_port_tmu_time_sync_disable(up);
  343. ret = tb_port_tmu_time_sync_disable(down);
  344. if (ret)
  345. return ret;
  346. if (unidirectional) {
  347. /* The switch may be unplugged so ignore any errors */
  348. tb_port_tmu_unidirectional_disable(up);
  349. ret = tb_port_tmu_unidirectional_disable(down);
  350. if (ret)
  351. return ret;
  352. }
  353. } else {
  354. tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_OFF);
  355. }
  356. sw->tmu.unidirectional = false;
  357. sw->tmu.rate = TB_SWITCH_TMU_RATE_OFF;
  358. tb_sw_dbg(sw, "TMU: disabled\n");
  359. return 0;
  360. }
  361. static void __tb_switch_tmu_off(struct tb_switch *sw, bool unidirectional)
  362. {
  363. struct tb_switch *parent = tb_switch_parent(sw);
  364. struct tb_port *down, *up;
  365. down = tb_port_at(tb_route(sw), parent);
  366. up = tb_upstream_port(sw);
  367. /*
  368. * In case of any failure in one of the steps when setting
  369. * bi-directional or uni-directional TMU mode, get back to the TMU
  370. * configurations in off mode. In case of additional failures in
  371. * the functions below, ignore them since the caller shall already
  372. * report a failure.
  373. */
  374. tb_port_tmu_time_sync_disable(down);
  375. tb_port_tmu_time_sync_disable(up);
  376. if (unidirectional)
  377. tb_switch_tmu_rate_write(parent, TB_SWITCH_TMU_RATE_OFF);
  378. else
  379. tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_OFF);
  380. tb_switch_set_tmu_mode_params(sw, sw->tmu.rate);
  381. tb_port_tmu_unidirectional_disable(down);
  382. tb_port_tmu_unidirectional_disable(up);
  383. }
  384. /*
  385. * This function is called when the previous TMU mode was
  386. * TB_SWITCH_TMU_RATE_OFF.
  387. */
  388. static int __tb_switch_tmu_enable_bidirectional(struct tb_switch *sw)
  389. {
  390. struct tb_switch *parent = tb_switch_parent(sw);
  391. struct tb_port *up, *down;
  392. int ret;
  393. up = tb_upstream_port(sw);
  394. down = tb_port_at(tb_route(sw), parent);
  395. ret = tb_port_tmu_unidirectional_disable(up);
  396. if (ret)
  397. return ret;
  398. ret = tb_port_tmu_unidirectional_disable(down);
  399. if (ret)
  400. goto out;
  401. ret = tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_HIFI);
  402. if (ret)
  403. goto out;
  404. ret = tb_port_tmu_time_sync_enable(up);
  405. if (ret)
  406. goto out;
  407. ret = tb_port_tmu_time_sync_enable(down);
  408. if (ret)
  409. goto out;
  410. return 0;
  411. out:
  412. __tb_switch_tmu_off(sw, false);
  413. return ret;
  414. }
  415. static int tb_switch_tmu_objection_mask(struct tb_switch *sw)
  416. {
  417. u32 val;
  418. int ret;
  419. ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
  420. sw->cap_vsec_tmu + TB_TIME_VSEC_3_CS_9, 1);
  421. if (ret)
  422. return ret;
  423. val &= ~TB_TIME_VSEC_3_CS_9_TMU_OBJ_MASK;
  424. return tb_sw_write(sw, &val, TB_CFG_SWITCH,
  425. sw->cap_vsec_tmu + TB_TIME_VSEC_3_CS_9, 1);
  426. }
  427. static int tb_switch_tmu_unidirectional_enable(struct tb_switch *sw)
  428. {
  429. struct tb_port *up = tb_upstream_port(sw);
  430. return tb_port_tmu_write(up, TMU_ADP_CS_6,
  431. TMU_ADP_CS_6_DISABLE_TMU_OBJ_MASK,
  432. TMU_ADP_CS_6_DISABLE_TMU_OBJ_MASK);
  433. }
  434. /*
  435. * This function is called when the previous TMU mode was
  436. * TB_SWITCH_TMU_RATE_OFF.
  437. */
  438. static int __tb_switch_tmu_enable_unidirectional(struct tb_switch *sw)
  439. {
  440. struct tb_switch *parent = tb_switch_parent(sw);
  441. struct tb_port *up, *down;
  442. int ret;
  443. up = tb_upstream_port(sw);
  444. down = tb_port_at(tb_route(sw), parent);
  445. ret = tb_switch_tmu_rate_write(parent, sw->tmu.rate_request);
  446. if (ret)
  447. return ret;
  448. ret = tb_switch_set_tmu_mode_params(sw, sw->tmu.rate_request);
  449. if (ret)
  450. return ret;
  451. ret = tb_port_tmu_unidirectional_enable(up);
  452. if (ret)
  453. goto out;
  454. ret = tb_port_tmu_time_sync_enable(up);
  455. if (ret)
  456. goto out;
  457. ret = tb_port_tmu_unidirectional_enable(down);
  458. if (ret)
  459. goto out;
  460. ret = tb_port_tmu_time_sync_enable(down);
  461. if (ret)
  462. goto out;
  463. return 0;
  464. out:
  465. __tb_switch_tmu_off(sw, true);
  466. return ret;
  467. }
  468. static void __tb_switch_tmu_change_mode_prev(struct tb_switch *sw)
  469. {
  470. struct tb_switch *parent = tb_switch_parent(sw);
  471. struct tb_port *down, *up;
  472. down = tb_port_at(tb_route(sw), parent);
  473. up = tb_upstream_port(sw);
  474. /*
  475. * In case of any failure in one of the steps when change mode,
  476. * get back to the TMU configurations in previous mode.
  477. * In case of additional failures in the functions below,
  478. * ignore them since the caller shall already report a failure.
  479. */
  480. tb_port_tmu_set_unidirectional(down, sw->tmu.unidirectional);
  481. if (sw->tmu.unidirectional_request)
  482. tb_switch_tmu_rate_write(parent, sw->tmu.rate);
  483. else
  484. tb_switch_tmu_rate_write(sw, sw->tmu.rate);
  485. tb_switch_set_tmu_mode_params(sw, sw->tmu.rate);
  486. tb_port_tmu_set_unidirectional(up, sw->tmu.unidirectional);
  487. }
  488. static int __tb_switch_tmu_change_mode(struct tb_switch *sw)
  489. {
  490. struct tb_switch *parent = tb_switch_parent(sw);
  491. struct tb_port *up, *down;
  492. int ret;
  493. up = tb_upstream_port(sw);
  494. down = tb_port_at(tb_route(sw), parent);
  495. ret = tb_port_tmu_set_unidirectional(down, sw->tmu.unidirectional_request);
  496. if (ret)
  497. goto out;
  498. if (sw->tmu.unidirectional_request)
  499. ret = tb_switch_tmu_rate_write(parent, sw->tmu.rate_request);
  500. else
  501. ret = tb_switch_tmu_rate_write(sw, sw->tmu.rate_request);
  502. if (ret)
  503. return ret;
  504. ret = tb_switch_set_tmu_mode_params(sw, sw->tmu.rate_request);
  505. if (ret)
  506. return ret;
  507. ret = tb_port_tmu_set_unidirectional(up, sw->tmu.unidirectional_request);
  508. if (ret)
  509. goto out;
  510. ret = tb_port_tmu_time_sync_enable(down);
  511. if (ret)
  512. goto out;
  513. ret = tb_port_tmu_time_sync_enable(up);
  514. if (ret)
  515. goto out;
  516. return 0;
  517. out:
  518. __tb_switch_tmu_change_mode_prev(sw);
  519. return ret;
  520. }
  521. /**
  522. * tb_switch_tmu_enable() - Enable TMU on a router
  523. * @sw: Router whose TMU to enable
  524. *
  525. * Enables TMU of a router to be in uni-directional Normal/HiFi
  526. * or bi-directional HiFi mode. Calling tb_switch_tmu_configure() is required
  527. * before calling this function, to select the mode Normal/HiFi and
  528. * directionality (uni-directional/bi-directional).
  529. * In HiFi mode all tunneling should work. In Normal mode, DP tunneling can't
  530. * work. Uni-directional mode is required for CLx (Link Low-Power) to work.
  531. */
  532. int tb_switch_tmu_enable(struct tb_switch *sw)
  533. {
  534. bool unidirectional = sw->tmu.unidirectional_request;
  535. int ret;
  536. if (unidirectional && !sw->tmu.has_ucap)
  537. return -EOPNOTSUPP;
  538. /*
  539. * No need to enable TMU on devices that don't support CLx since on
  540. * these devices e.g. Alpine Ridge and earlier, the TMU mode HiFi
  541. * bi-directional is enabled by default.
  542. */
  543. if (!tb_switch_is_clx_supported(sw))
  544. return 0;
  545. if (tb_switch_tmu_is_enabled(sw, sw->tmu.unidirectional_request))
  546. return 0;
  547. if (tb_switch_is_titan_ridge(sw) && unidirectional) {
  548. /*
  549. * Titan Ridge supports CL0s and CL1 only. CL0s and CL1 are
  550. * enabled and supported together.
  551. */
  552. if (!tb_switch_is_clx_enabled(sw, TB_CL1))
  553. return -EOPNOTSUPP;
  554. ret = tb_switch_tmu_objection_mask(sw);
  555. if (ret)
  556. return ret;
  557. ret = tb_switch_tmu_unidirectional_enable(sw);
  558. if (ret)
  559. return ret;
  560. }
  561. ret = tb_switch_tmu_set_time_disruption(sw, true);
  562. if (ret)
  563. return ret;
  564. if (tb_route(sw)) {
  565. /*
  566. * The used mode changes are from OFF to
  567. * HiFi-Uni/HiFi-BiDir/Normal-Uni or from Normal-Uni to
  568. * HiFi-Uni.
  569. */
  570. if (sw->tmu.rate == TB_SWITCH_TMU_RATE_OFF) {
  571. if (unidirectional)
  572. ret = __tb_switch_tmu_enable_unidirectional(sw);
  573. else
  574. ret = __tb_switch_tmu_enable_bidirectional(sw);
  575. if (ret)
  576. return ret;
  577. } else if (sw->tmu.rate == TB_SWITCH_TMU_RATE_NORMAL) {
  578. ret = __tb_switch_tmu_change_mode(sw);
  579. if (ret)
  580. return ret;
  581. }
  582. sw->tmu.unidirectional = unidirectional;
  583. } else {
  584. /*
  585. * Host router port configurations are written as
  586. * part of configurations for downstream port of the parent
  587. * of the child node - see above.
  588. * Here only the host router' rate configuration is written.
  589. */
  590. ret = tb_switch_tmu_rate_write(sw, sw->tmu.rate_request);
  591. if (ret)
  592. return ret;
  593. }
  594. sw->tmu.rate = sw->tmu.rate_request;
  595. tb_sw_dbg(sw, "TMU: mode set to: %s\n", tb_switch_tmu_mode_name(sw));
  596. return tb_switch_tmu_set_time_disruption(sw, false);
  597. }
  598. /**
  599. * tb_switch_tmu_configure() - Configure the TMU rate and directionality
  600. * @sw: Router whose mode to change
  601. * @rate: Rate to configure Off/Normal/HiFi
  602. * @unidirectional: If uni-directional (bi-directional otherwise)
  603. *
  604. * Selects the rate of the TMU and directionality (uni-directional or
  605. * bi-directional). Must be called before tb_switch_tmu_enable().
  606. */
  607. void tb_switch_tmu_configure(struct tb_switch *sw,
  608. enum tb_switch_tmu_rate rate, bool unidirectional)
  609. {
  610. sw->tmu.unidirectional_request = unidirectional;
  611. sw->tmu.rate_request = rate;
  612. }
  613. static int tb_switch_tmu_config_enable(struct device *dev, void *rate)
  614. {
  615. if (tb_is_switch(dev)) {
  616. struct tb_switch *sw = tb_to_switch(dev);
  617. tb_switch_tmu_configure(sw, *(enum tb_switch_tmu_rate *)rate,
  618. tb_switch_is_clx_enabled(sw, TB_CL1));
  619. if (tb_switch_tmu_enable(sw))
  620. tb_sw_dbg(sw, "fail switching TMU mode for 1st depth router\n");
  621. }
  622. return 0;
  623. }
  624. /**
  625. * tb_switch_enable_tmu_1st_child - Configure and enable TMU for 1st chidren
  626. * @sw: The router to configure and enable it's children TMU
  627. * @rate: Rate of the TMU to configure the router's chidren to
  628. *
  629. * Configures and enables the TMU mode of 1st depth children of the specified
  630. * router to the specified rate.
  631. */
  632. void tb_switch_enable_tmu_1st_child(struct tb_switch *sw,
  633. enum tb_switch_tmu_rate rate)
  634. {
  635. device_for_each_child(&sw->dev, &rate,
  636. tb_switch_tmu_config_enable);
  637. }