switch.c 90 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Thunderbolt driver - switch/port utility functions
  4. *
  5. * Copyright (c) 2014 Andreas Noever <[email protected]>
  6. * Copyright (C) 2018, Intel Corporation
  7. */
  8. #include <linux/delay.h>
  9. #include <linux/idr.h>
  10. #include <linux/nvmem-provider.h>
  11. #include <linux/pm_runtime.h>
  12. #include <linux/sched/signal.h>
  13. #include <linux/sizes.h>
  14. #include <linux/slab.h>
  15. #include <linux/module.h>
  16. #include "tb.h"
  17. /* Switch NVM support */
  18. struct nvm_auth_status {
  19. struct list_head list;
  20. uuid_t uuid;
  21. u32 status;
  22. };
  23. static bool clx_enabled = true;
  24. module_param_named(clx, clx_enabled, bool, 0444);
  25. MODULE_PARM_DESC(clx, "allow low power states on the high-speed lanes (default: true)");
  26. /*
  27. * Hold NVM authentication failure status per switch This information
  28. * needs to stay around even when the switch gets power cycled so we
  29. * keep it separately.
  30. */
  31. static LIST_HEAD(nvm_auth_status_cache);
  32. static DEFINE_MUTEX(nvm_auth_status_lock);
  33. static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw)
  34. {
  35. struct nvm_auth_status *st;
  36. list_for_each_entry(st, &nvm_auth_status_cache, list) {
  37. if (uuid_equal(&st->uuid, sw->uuid))
  38. return st;
  39. }
  40. return NULL;
  41. }
  42. static void nvm_get_auth_status(const struct tb_switch *sw, u32 *status)
  43. {
  44. struct nvm_auth_status *st;
  45. mutex_lock(&nvm_auth_status_lock);
  46. st = __nvm_get_auth_status(sw);
  47. mutex_unlock(&nvm_auth_status_lock);
  48. *status = st ? st->status : 0;
  49. }
  50. static void nvm_set_auth_status(const struct tb_switch *sw, u32 status)
  51. {
  52. struct nvm_auth_status *st;
  53. if (WARN_ON(!sw->uuid))
  54. return;
  55. mutex_lock(&nvm_auth_status_lock);
  56. st = __nvm_get_auth_status(sw);
  57. if (!st) {
  58. st = kzalloc(sizeof(*st), GFP_KERNEL);
  59. if (!st)
  60. goto unlock;
  61. memcpy(&st->uuid, sw->uuid, sizeof(st->uuid));
  62. INIT_LIST_HEAD(&st->list);
  63. list_add_tail(&st->list, &nvm_auth_status_cache);
  64. }
  65. st->status = status;
  66. unlock:
  67. mutex_unlock(&nvm_auth_status_lock);
  68. }
  69. static void nvm_clear_auth_status(const struct tb_switch *sw)
  70. {
  71. struct nvm_auth_status *st;
  72. mutex_lock(&nvm_auth_status_lock);
  73. st = __nvm_get_auth_status(sw);
  74. if (st) {
  75. list_del(&st->list);
  76. kfree(st);
  77. }
  78. mutex_unlock(&nvm_auth_status_lock);
  79. }
  80. static int nvm_validate_and_write(struct tb_switch *sw)
  81. {
  82. unsigned int image_size;
  83. const u8 *buf;
  84. int ret;
  85. ret = tb_nvm_validate(sw->nvm);
  86. if (ret)
  87. return ret;
  88. ret = tb_nvm_write_headers(sw->nvm);
  89. if (ret)
  90. return ret;
  91. buf = sw->nvm->buf_data_start;
  92. image_size = sw->nvm->buf_data_size;
  93. if (tb_switch_is_usb4(sw))
  94. ret = usb4_switch_nvm_write(sw, 0, buf, image_size);
  95. else
  96. ret = dma_port_flash_write(sw->dma_port, 0, buf, image_size);
  97. if (ret)
  98. return ret;
  99. sw->nvm->flushed = true;
  100. return 0;
  101. }
  102. static int nvm_authenticate_host_dma_port(struct tb_switch *sw)
  103. {
  104. int ret = 0;
  105. /*
  106. * Root switch NVM upgrade requires that we disconnect the
  107. * existing paths first (in case it is not in safe mode
  108. * already).
  109. */
  110. if (!sw->safe_mode) {
  111. u32 status;
  112. ret = tb_domain_disconnect_all_paths(sw->tb);
  113. if (ret)
  114. return ret;
  115. /*
  116. * The host controller goes away pretty soon after this if
  117. * everything goes well so getting timeout is expected.
  118. */
  119. ret = dma_port_flash_update_auth(sw->dma_port);
  120. if (!ret || ret == -ETIMEDOUT)
  121. return 0;
  122. /*
  123. * Any error from update auth operation requires power
  124. * cycling of the host router.
  125. */
  126. tb_sw_warn(sw, "failed to authenticate NVM, power cycling\n");
  127. if (dma_port_flash_update_auth_status(sw->dma_port, &status) > 0)
  128. nvm_set_auth_status(sw, status);
  129. }
  130. /*
  131. * From safe mode we can get out by just power cycling the
  132. * switch.
  133. */
  134. dma_port_power_cycle(sw->dma_port);
  135. return ret;
  136. }
  137. static int nvm_authenticate_device_dma_port(struct tb_switch *sw)
  138. {
  139. int ret, retries = 10;
  140. ret = dma_port_flash_update_auth(sw->dma_port);
  141. switch (ret) {
  142. case 0:
  143. case -ETIMEDOUT:
  144. case -EACCES:
  145. case -EINVAL:
  146. /* Power cycle is required */
  147. break;
  148. default:
  149. return ret;
  150. }
  151. /*
  152. * Poll here for the authentication status. It takes some time
  153. * for the device to respond (we get timeout for a while). Once
  154. * we get response the device needs to be power cycled in order
  155. * to the new NVM to be taken into use.
  156. */
  157. do {
  158. u32 status;
  159. ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
  160. if (ret < 0 && ret != -ETIMEDOUT)
  161. return ret;
  162. if (ret > 0) {
  163. if (status) {
  164. tb_sw_warn(sw, "failed to authenticate NVM\n");
  165. nvm_set_auth_status(sw, status);
  166. }
  167. tb_sw_info(sw, "power cycling the switch now\n");
  168. dma_port_power_cycle(sw->dma_port);
  169. return 0;
  170. }
  171. msleep(500);
  172. } while (--retries);
  173. return -ETIMEDOUT;
  174. }
  175. static void nvm_authenticate_start_dma_port(struct tb_switch *sw)
  176. {
  177. struct pci_dev *root_port;
  178. /*
  179. * During host router NVM upgrade we should not allow root port to
  180. * go into D3cold because some root ports cannot trigger PME
  181. * itself. To be on the safe side keep the root port in D0 during
  182. * the whole upgrade process.
  183. */
  184. root_port = pcie_find_root_port(sw->tb->nhi->pdev);
  185. if (root_port)
  186. pm_runtime_get_noresume(&root_port->dev);
  187. }
  188. static void nvm_authenticate_complete_dma_port(struct tb_switch *sw)
  189. {
  190. struct pci_dev *root_port;
  191. root_port = pcie_find_root_port(sw->tb->nhi->pdev);
  192. if (root_port)
  193. pm_runtime_put(&root_port->dev);
  194. }
  195. static inline bool nvm_readable(struct tb_switch *sw)
  196. {
  197. if (tb_switch_is_usb4(sw)) {
  198. /*
  199. * USB4 devices must support NVM operations but it is
  200. * optional for hosts. Therefore we query the NVM sector
  201. * size here and if it is supported assume NVM
  202. * operations are implemented.
  203. */
  204. return usb4_switch_nvm_sector_size(sw) > 0;
  205. }
  206. /* Thunderbolt 2 and 3 devices support NVM through DMA port */
  207. return !!sw->dma_port;
  208. }
  209. static inline bool nvm_upgradeable(struct tb_switch *sw)
  210. {
  211. if (sw->no_nvm_upgrade)
  212. return false;
  213. return nvm_readable(sw);
  214. }
  215. static int nvm_authenticate(struct tb_switch *sw, bool auth_only)
  216. {
  217. int ret;
  218. if (tb_switch_is_usb4(sw)) {
  219. if (auth_only) {
  220. ret = usb4_switch_nvm_set_offset(sw, 0);
  221. if (ret)
  222. return ret;
  223. }
  224. sw->nvm->authenticating = true;
  225. return usb4_switch_nvm_authenticate(sw);
  226. } else if (auth_only) {
  227. return -EOPNOTSUPP;
  228. }
  229. sw->nvm->authenticating = true;
  230. if (!tb_route(sw)) {
  231. nvm_authenticate_start_dma_port(sw);
  232. ret = nvm_authenticate_host_dma_port(sw);
  233. } else {
  234. ret = nvm_authenticate_device_dma_port(sw);
  235. }
  236. return ret;
  237. }
  238. /**
  239. * tb_switch_nvm_read() - Read router NVM
  240. * @sw: Router whose NVM to read
  241. * @address: Start address on the NVM
  242. * @buf: Buffer where the read data is copied
  243. * @size: Size of the buffer in bytes
  244. *
  245. * Reads from router NVM and returns the requested data in @buf. Locking
  246. * is up to the caller. Returns %0 in success and negative errno in case
  247. * of failure.
  248. */
  249. int tb_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf,
  250. size_t size)
  251. {
  252. if (tb_switch_is_usb4(sw))
  253. return usb4_switch_nvm_read(sw, address, buf, size);
  254. return dma_port_flash_read(sw->dma_port, address, buf, size);
  255. }
  256. static int nvm_read(void *priv, unsigned int offset, void *val, size_t bytes)
  257. {
  258. struct tb_nvm *nvm = priv;
  259. struct tb_switch *sw = tb_to_switch(nvm->dev);
  260. int ret;
  261. pm_runtime_get_sync(&sw->dev);
  262. if (!mutex_trylock(&sw->tb->lock)) {
  263. ret = restart_syscall();
  264. goto out;
  265. }
  266. ret = tb_switch_nvm_read(sw, offset, val, bytes);
  267. mutex_unlock(&sw->tb->lock);
  268. out:
  269. pm_runtime_mark_last_busy(&sw->dev);
  270. pm_runtime_put_autosuspend(&sw->dev);
  271. return ret;
  272. }
  273. static int nvm_write(void *priv, unsigned int offset, void *val, size_t bytes)
  274. {
  275. struct tb_nvm *nvm = priv;
  276. struct tb_switch *sw = tb_to_switch(nvm->dev);
  277. int ret;
  278. if (!mutex_trylock(&sw->tb->lock))
  279. return restart_syscall();
  280. /*
  281. * Since writing the NVM image might require some special steps,
  282. * for example when CSS headers are written, we cache the image
  283. * locally here and handle the special cases when the user asks
  284. * us to authenticate the image.
  285. */
  286. ret = tb_nvm_write_buf(nvm, offset, val, bytes);
  287. mutex_unlock(&sw->tb->lock);
  288. return ret;
  289. }
  290. static int tb_switch_nvm_add(struct tb_switch *sw)
  291. {
  292. struct tb_nvm *nvm;
  293. int ret;
  294. if (!nvm_readable(sw))
  295. return 0;
  296. nvm = tb_nvm_alloc(&sw->dev);
  297. if (IS_ERR(nvm)) {
  298. ret = PTR_ERR(nvm) == -EOPNOTSUPP ? 0 : PTR_ERR(nvm);
  299. goto err_nvm;
  300. }
  301. ret = tb_nvm_read_version(nvm);
  302. if (ret)
  303. goto err_nvm;
  304. /*
  305. * If the switch is in safe-mode the only accessible portion of
  306. * the NVM is the non-active one where userspace is expected to
  307. * write new functional NVM.
  308. */
  309. if (!sw->safe_mode) {
  310. ret = tb_nvm_add_active(nvm, nvm_read);
  311. if (ret)
  312. goto err_nvm;
  313. }
  314. if (!sw->no_nvm_upgrade) {
  315. ret = tb_nvm_add_non_active(nvm, nvm_write);
  316. if (ret)
  317. goto err_nvm;
  318. }
  319. sw->nvm = nvm;
  320. return 0;
  321. err_nvm:
  322. tb_sw_dbg(sw, "NVM upgrade disabled\n");
  323. sw->no_nvm_upgrade = true;
  324. if (!IS_ERR(nvm))
  325. tb_nvm_free(nvm);
  326. return ret;
  327. }
  328. static void tb_switch_nvm_remove(struct tb_switch *sw)
  329. {
  330. struct tb_nvm *nvm;
  331. nvm = sw->nvm;
  332. sw->nvm = NULL;
  333. if (!nvm)
  334. return;
  335. /* Remove authentication status in case the switch is unplugged */
  336. if (!nvm->authenticating)
  337. nvm_clear_auth_status(sw);
  338. tb_nvm_free(nvm);
  339. }
  340. /* port utility functions */
  341. static const char *tb_port_type(const struct tb_regs_port_header *port)
  342. {
  343. switch (port->type >> 16) {
  344. case 0:
  345. switch ((u8) port->type) {
  346. case 0:
  347. return "Inactive";
  348. case 1:
  349. return "Port";
  350. case 2:
  351. return "NHI";
  352. default:
  353. return "unknown";
  354. }
  355. case 0x2:
  356. return "Ethernet";
  357. case 0x8:
  358. return "SATA";
  359. case 0xe:
  360. return "DP/HDMI";
  361. case 0x10:
  362. return "PCIe";
  363. case 0x20:
  364. return "USB";
  365. default:
  366. return "unknown";
  367. }
  368. }
  369. static void tb_dump_port(struct tb *tb, const struct tb_port *port)
  370. {
  371. const struct tb_regs_port_header *regs = &port->config;
  372. tb_dbg(tb,
  373. " Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n",
  374. regs->port_number, regs->vendor_id, regs->device_id,
  375. regs->revision, regs->thunderbolt_version, tb_port_type(regs),
  376. regs->type);
  377. tb_dbg(tb, " Max hop id (in/out): %d/%d\n",
  378. regs->max_in_hop_id, regs->max_out_hop_id);
  379. tb_dbg(tb, " Max counters: %d\n", regs->max_counters);
  380. tb_dbg(tb, " NFC Credits: %#x\n", regs->nfc_credits);
  381. tb_dbg(tb, " Credits (total/control): %u/%u\n", port->total_credits,
  382. port->ctl_credits);
  383. }
  384. /**
  385. * tb_port_state() - get connectedness state of a port
  386. * @port: the port to check
  387. *
  388. * The port must have a TB_CAP_PHY (i.e. it should be a real port).
  389. *
  390. * Return: Returns an enum tb_port_state on success or an error code on failure.
  391. */
  392. int tb_port_state(struct tb_port *port)
  393. {
  394. struct tb_cap_phy phy;
  395. int res;
  396. if (port->cap_phy == 0) {
  397. tb_port_WARN(port, "does not have a PHY\n");
  398. return -EINVAL;
  399. }
  400. res = tb_port_read(port, &phy, TB_CFG_PORT, port->cap_phy, 2);
  401. if (res)
  402. return res;
  403. return phy.state;
  404. }
  405. /**
  406. * tb_wait_for_port() - wait for a port to become ready
  407. * @port: Port to wait
  408. * @wait_if_unplugged: Wait also when port is unplugged
  409. *
  410. * Wait up to 1 second for a port to reach state TB_PORT_UP. If
  411. * wait_if_unplugged is set then we also wait if the port is in state
  412. * TB_PORT_UNPLUGGED (it takes a while for the device to be registered after
  413. * switch resume). Otherwise we only wait if a device is registered but the link
  414. * has not yet been established.
  415. *
  416. * Return: Returns an error code on failure. Returns 0 if the port is not
  417. * connected or failed to reach state TB_PORT_UP within one second. Returns 1
  418. * if the port is connected and in state TB_PORT_UP.
  419. */
  420. int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged)
  421. {
  422. int retries = 10;
  423. int state;
  424. if (!port->cap_phy) {
  425. tb_port_WARN(port, "does not have PHY\n");
  426. return -EINVAL;
  427. }
  428. if (tb_is_upstream_port(port)) {
  429. tb_port_WARN(port, "is the upstream port\n");
  430. return -EINVAL;
  431. }
  432. while (retries--) {
  433. state = tb_port_state(port);
  434. if (state < 0)
  435. return state;
  436. if (state == TB_PORT_DISABLED) {
  437. tb_port_dbg(port, "is disabled (state: 0)\n");
  438. return 0;
  439. }
  440. if (state == TB_PORT_UNPLUGGED) {
  441. if (wait_if_unplugged) {
  442. /* used during resume */
  443. tb_port_dbg(port,
  444. "is unplugged (state: 7), retrying...\n");
  445. msleep(100);
  446. continue;
  447. }
  448. tb_port_dbg(port, "is unplugged (state: 7)\n");
  449. return 0;
  450. }
  451. if (state == TB_PORT_UP) {
  452. tb_port_dbg(port, "is connected, link is up (state: 2)\n");
  453. return 1;
  454. }
  455. /*
  456. * After plug-in the state is TB_PORT_CONNECTING. Give it some
  457. * time.
  458. */
  459. tb_port_dbg(port,
  460. "is connected, link is not up (state: %d), retrying...\n",
  461. state);
  462. msleep(100);
  463. }
  464. tb_port_warn(port,
  465. "failed to reach state TB_PORT_UP. Ignoring port...\n");
  466. return 0;
  467. }
  468. /**
  469. * tb_port_add_nfc_credits() - add/remove non flow controlled credits to port
  470. * @port: Port to add/remove NFC credits
  471. * @credits: Credits to add/remove
  472. *
  473. * Change the number of NFC credits allocated to @port by @credits. To remove
  474. * NFC credits pass a negative amount of credits.
  475. *
  476. * Return: Returns 0 on success or an error code on failure.
  477. */
  478. int tb_port_add_nfc_credits(struct tb_port *port, int credits)
  479. {
  480. u32 nfc_credits;
  481. if (credits == 0 || port->sw->is_unplugged)
  482. return 0;
  483. /*
  484. * USB4 restricts programming NFC buffers to lane adapters only
  485. * so skip other ports.
  486. */
  487. if (tb_switch_is_usb4(port->sw) && !tb_port_is_null(port))
  488. return 0;
  489. nfc_credits = port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK;
  490. if (credits < 0)
  491. credits = max_t(int, -nfc_credits, credits);
  492. nfc_credits += credits;
  493. tb_port_dbg(port, "adding %d NFC credits to %lu", credits,
  494. port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK);
  495. port->config.nfc_credits &= ~ADP_CS_4_NFC_BUFFERS_MASK;
  496. port->config.nfc_credits |= nfc_credits;
  497. return tb_port_write(port, &port->config.nfc_credits,
  498. TB_CFG_PORT, ADP_CS_4, 1);
  499. }
  500. /**
  501. * tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER
  502. * @port: Port whose counters to clear
  503. * @counter: Counter index to clear
  504. *
  505. * Return: Returns 0 on success or an error code on failure.
  506. */
  507. int tb_port_clear_counter(struct tb_port *port, int counter)
  508. {
  509. u32 zero[3] = { 0, 0, 0 };
  510. tb_port_dbg(port, "clearing counter %d\n", counter);
  511. return tb_port_write(port, zero, TB_CFG_COUNTERS, 3 * counter, 3);
  512. }
  513. /**
  514. * tb_port_unlock() - Unlock downstream port
  515. * @port: Port to unlock
  516. *
  517. * Needed for USB4 but can be called for any CIO/USB4 ports. Makes the
  518. * downstream router accessible for CM.
  519. */
  520. int tb_port_unlock(struct tb_port *port)
  521. {
  522. if (tb_switch_is_icm(port->sw))
  523. return 0;
  524. if (!tb_port_is_null(port))
  525. return -EINVAL;
  526. if (tb_switch_is_usb4(port->sw))
  527. return usb4_port_unlock(port);
  528. return 0;
  529. }
  530. static int __tb_port_enable(struct tb_port *port, bool enable)
  531. {
  532. int ret;
  533. u32 phy;
  534. if (!tb_port_is_null(port))
  535. return -EINVAL;
  536. ret = tb_port_read(port, &phy, TB_CFG_PORT,
  537. port->cap_phy + LANE_ADP_CS_1, 1);
  538. if (ret)
  539. return ret;
  540. if (enable)
  541. phy &= ~LANE_ADP_CS_1_LD;
  542. else
  543. phy |= LANE_ADP_CS_1_LD;
  544. ret = tb_port_write(port, &phy, TB_CFG_PORT,
  545. port->cap_phy + LANE_ADP_CS_1, 1);
  546. if (ret)
  547. return ret;
  548. tb_port_dbg(port, "lane %sabled\n", enable ? "en" : "dis");
  549. return 0;
  550. }
  551. /**
  552. * tb_port_enable() - Enable lane adapter
  553. * @port: Port to enable (can be %NULL)
  554. *
  555. * This is used for lane 0 and 1 adapters to enable it.
  556. */
  557. int tb_port_enable(struct tb_port *port)
  558. {
  559. return __tb_port_enable(port, true);
  560. }
  561. /**
  562. * tb_port_disable() - Disable lane adapter
  563. * @port: Port to disable (can be %NULL)
  564. *
  565. * This is used for lane 0 and 1 adapters to disable it.
  566. */
  567. int tb_port_disable(struct tb_port *port)
  568. {
  569. return __tb_port_enable(port, false);
  570. }
  571. /*
  572. * tb_init_port() - initialize a port
  573. *
  574. * This is a helper method for tb_switch_alloc. Does not check or initialize
  575. * any downstream switches.
  576. *
  577. * Return: Returns 0 on success or an error code on failure.
  578. */
  579. static int tb_init_port(struct tb_port *port)
  580. {
  581. int res;
  582. int cap;
  583. INIT_LIST_HEAD(&port->list);
  584. /* Control adapter does not have configuration space */
  585. if (!port->port)
  586. return 0;
  587. res = tb_port_read(port, &port->config, TB_CFG_PORT, 0, 8);
  588. if (res) {
  589. if (res == -ENODEV) {
  590. tb_dbg(port->sw->tb, " Port %d: not implemented\n",
  591. port->port);
  592. port->disabled = true;
  593. return 0;
  594. }
  595. return res;
  596. }
  597. /* Port 0 is the switch itself and has no PHY. */
  598. if (port->config.type == TB_TYPE_PORT) {
  599. cap = tb_port_find_cap(port, TB_PORT_CAP_PHY);
  600. if (cap > 0)
  601. port->cap_phy = cap;
  602. else
  603. tb_port_WARN(port, "non switch port without a PHY\n");
  604. cap = tb_port_find_cap(port, TB_PORT_CAP_USB4);
  605. if (cap > 0)
  606. port->cap_usb4 = cap;
  607. /*
  608. * USB4 ports the buffers allocated for the control path
  609. * can be read from the path config space. Legacy
  610. * devices we use hard-coded value.
  611. */
  612. if (tb_switch_is_usb4(port->sw)) {
  613. struct tb_regs_hop hop;
  614. if (!tb_port_read(port, &hop, TB_CFG_HOPS, 0, 2))
  615. port->ctl_credits = hop.initial_credits;
  616. }
  617. if (!port->ctl_credits)
  618. port->ctl_credits = 2;
  619. } else {
  620. cap = tb_port_find_cap(port, TB_PORT_CAP_ADAP);
  621. if (cap > 0)
  622. port->cap_adap = cap;
  623. }
  624. port->total_credits =
  625. (port->config.nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
  626. ADP_CS_4_TOTAL_BUFFERS_SHIFT;
  627. tb_dump_port(port->sw->tb, port);
  628. return 0;
  629. }
  630. static int tb_port_alloc_hopid(struct tb_port *port, bool in, int min_hopid,
  631. int max_hopid)
  632. {
  633. int port_max_hopid;
  634. struct ida *ida;
  635. if (in) {
  636. port_max_hopid = port->config.max_in_hop_id;
  637. ida = &port->in_hopids;
  638. } else {
  639. port_max_hopid = port->config.max_out_hop_id;
  640. ida = &port->out_hopids;
  641. }
  642. /*
  643. * NHI can use HopIDs 1-max for other adapters HopIDs 0-7 are
  644. * reserved.
  645. */
  646. if (!tb_port_is_nhi(port) && min_hopid < TB_PATH_MIN_HOPID)
  647. min_hopid = TB_PATH_MIN_HOPID;
  648. if (max_hopid < 0 || max_hopid > port_max_hopid)
  649. max_hopid = port_max_hopid;
  650. return ida_simple_get(ida, min_hopid, max_hopid + 1, GFP_KERNEL);
  651. }
  652. /**
  653. * tb_port_alloc_in_hopid() - Allocate input HopID from port
  654. * @port: Port to allocate HopID for
  655. * @min_hopid: Minimum acceptable input HopID
  656. * @max_hopid: Maximum acceptable input HopID
  657. *
  658. * Return: HopID between @min_hopid and @max_hopid or negative errno in
  659. * case of error.
  660. */
  661. int tb_port_alloc_in_hopid(struct tb_port *port, int min_hopid, int max_hopid)
  662. {
  663. return tb_port_alloc_hopid(port, true, min_hopid, max_hopid);
  664. }
  665. /**
  666. * tb_port_alloc_out_hopid() - Allocate output HopID from port
  667. * @port: Port to allocate HopID for
  668. * @min_hopid: Minimum acceptable output HopID
  669. * @max_hopid: Maximum acceptable output HopID
  670. *
  671. * Return: HopID between @min_hopid and @max_hopid or negative errno in
  672. * case of error.
  673. */
  674. int tb_port_alloc_out_hopid(struct tb_port *port, int min_hopid, int max_hopid)
  675. {
  676. return tb_port_alloc_hopid(port, false, min_hopid, max_hopid);
  677. }
  678. /**
  679. * tb_port_release_in_hopid() - Release allocated input HopID from port
  680. * @port: Port whose HopID to release
  681. * @hopid: HopID to release
  682. */
  683. void tb_port_release_in_hopid(struct tb_port *port, int hopid)
  684. {
  685. ida_simple_remove(&port->in_hopids, hopid);
  686. }
  687. /**
  688. * tb_port_release_out_hopid() - Release allocated output HopID from port
  689. * @port: Port whose HopID to release
  690. * @hopid: HopID to release
  691. */
  692. void tb_port_release_out_hopid(struct tb_port *port, int hopid)
  693. {
  694. ida_simple_remove(&port->out_hopids, hopid);
  695. }
  696. static inline bool tb_switch_is_reachable(const struct tb_switch *parent,
  697. const struct tb_switch *sw)
  698. {
  699. u64 mask = (1ULL << parent->config.depth * 8) - 1;
  700. return (tb_route(parent) & mask) == (tb_route(sw) & mask);
  701. }
  702. /**
  703. * tb_next_port_on_path() - Return next port for given port on a path
  704. * @start: Start port of the walk
  705. * @end: End port of the walk
  706. * @prev: Previous port (%NULL if this is the first)
  707. *
  708. * This function can be used to walk from one port to another if they
  709. * are connected through zero or more switches. If the @prev is dual
  710. * link port, the function follows that link and returns another end on
  711. * that same link.
  712. *
  713. * If the @end port has been reached, return %NULL.
  714. *
  715. * Domain tb->lock must be held when this function is called.
  716. */
  717. struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end,
  718. struct tb_port *prev)
  719. {
  720. struct tb_port *next;
  721. if (!prev)
  722. return start;
  723. if (prev->sw == end->sw) {
  724. if (prev == end)
  725. return NULL;
  726. return end;
  727. }
  728. if (tb_switch_is_reachable(prev->sw, end->sw)) {
  729. next = tb_port_at(tb_route(end->sw), prev->sw);
  730. /* Walk down the topology if next == prev */
  731. if (prev->remote &&
  732. (next == prev || next->dual_link_port == prev))
  733. next = prev->remote;
  734. } else {
  735. if (tb_is_upstream_port(prev)) {
  736. next = prev->remote;
  737. } else {
  738. next = tb_upstream_port(prev->sw);
  739. /*
  740. * Keep the same link if prev and next are both
  741. * dual link ports.
  742. */
  743. if (next->dual_link_port &&
  744. next->link_nr != prev->link_nr) {
  745. next = next->dual_link_port;
  746. }
  747. }
  748. }
  749. return next != prev ? next : NULL;
  750. }
  751. /**
  752. * tb_port_get_link_speed() - Get current link speed
  753. * @port: Port to check (USB4 or CIO)
  754. *
  755. * Returns link speed in Gb/s or negative errno in case of failure.
  756. */
  757. int tb_port_get_link_speed(struct tb_port *port)
  758. {
  759. u32 val, speed;
  760. int ret;
  761. if (!port->cap_phy)
  762. return -EINVAL;
  763. ret = tb_port_read(port, &val, TB_CFG_PORT,
  764. port->cap_phy + LANE_ADP_CS_1, 1);
  765. if (ret)
  766. return ret;
  767. speed = (val & LANE_ADP_CS_1_CURRENT_SPEED_MASK) >>
  768. LANE_ADP_CS_1_CURRENT_SPEED_SHIFT;
  769. return speed == LANE_ADP_CS_1_CURRENT_SPEED_GEN3 ? 20 : 10;
  770. }
  771. /**
  772. * tb_port_get_link_width() - Get current link width
  773. * @port: Port to check (USB4 or CIO)
  774. *
  775. * Returns link width. Return values can be 1 (Single-Lane), 2 (Dual-Lane)
  776. * or negative errno in case of failure.
  777. */
  778. int tb_port_get_link_width(struct tb_port *port)
  779. {
  780. u32 val;
  781. int ret;
  782. if (!port->cap_phy)
  783. return -EINVAL;
  784. ret = tb_port_read(port, &val, TB_CFG_PORT,
  785. port->cap_phy + LANE_ADP_CS_1, 1);
  786. if (ret)
  787. return ret;
  788. return (val & LANE_ADP_CS_1_CURRENT_WIDTH_MASK) >>
  789. LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT;
  790. }
  791. static bool tb_port_is_width_supported(struct tb_port *port, int width)
  792. {
  793. u32 phy, widths;
  794. int ret;
  795. if (!port->cap_phy)
  796. return false;
  797. ret = tb_port_read(port, &phy, TB_CFG_PORT,
  798. port->cap_phy + LANE_ADP_CS_0, 1);
  799. if (ret)
  800. return false;
  801. widths = (phy & LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK) >>
  802. LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT;
  803. return !!(widths & width);
  804. }
  805. /**
  806. * tb_port_set_link_width() - Set target link width of the lane adapter
  807. * @port: Lane adapter
  808. * @width: Target link width (%1 or %2)
  809. *
  810. * Sets the target link width of the lane adapter to @width. Does not
  811. * enable/disable lane bonding. For that call tb_port_set_lane_bonding().
  812. *
  813. * Return: %0 in case of success and negative errno in case of error
  814. */
  815. int tb_port_set_link_width(struct tb_port *port, unsigned int width)
  816. {
  817. u32 val;
  818. int ret;
  819. if (!port->cap_phy)
  820. return -EINVAL;
  821. ret = tb_port_read(port, &val, TB_CFG_PORT,
  822. port->cap_phy + LANE_ADP_CS_1, 1);
  823. if (ret)
  824. return ret;
  825. val &= ~LANE_ADP_CS_1_TARGET_WIDTH_MASK;
  826. switch (width) {
  827. case 1:
  828. val |= LANE_ADP_CS_1_TARGET_WIDTH_SINGLE <<
  829. LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
  830. break;
  831. case 2:
  832. val |= LANE_ADP_CS_1_TARGET_WIDTH_DUAL <<
  833. LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
  834. break;
  835. default:
  836. return -EINVAL;
  837. }
  838. return tb_port_write(port, &val, TB_CFG_PORT,
  839. port->cap_phy + LANE_ADP_CS_1, 1);
  840. }
  841. /**
  842. * tb_port_set_lane_bonding() - Enable/disable lane bonding
  843. * @port: Lane adapter
  844. * @bonding: enable/disable bonding
  845. *
  846. * Enables or disables lane bonding. This should be called after target
  847. * link width has been set (tb_port_set_link_width()). Note in most
  848. * cases one should use tb_port_lane_bonding_enable() instead to enable
  849. * lane bonding.
  850. *
  851. * As a side effect sets @port->bonding accordingly (and does the same
  852. * for lane 1 too).
  853. *
  854. * Return: %0 in case of success and negative errno in case of error
  855. */
  856. int tb_port_set_lane_bonding(struct tb_port *port, bool bonding)
  857. {
  858. u32 val;
  859. int ret;
  860. if (!port->cap_phy)
  861. return -EINVAL;
  862. ret = tb_port_read(port, &val, TB_CFG_PORT,
  863. port->cap_phy + LANE_ADP_CS_1, 1);
  864. if (ret)
  865. return ret;
  866. if (bonding)
  867. val |= LANE_ADP_CS_1_LB;
  868. else
  869. val &= ~LANE_ADP_CS_1_LB;
  870. ret = tb_port_write(port, &val, TB_CFG_PORT,
  871. port->cap_phy + LANE_ADP_CS_1, 1);
  872. if (ret)
  873. return ret;
  874. /*
  875. * When lane 0 bonding is set it will affect lane 1 too so
  876. * update both.
  877. */
  878. port->bonded = bonding;
  879. port->dual_link_port->bonded = bonding;
  880. return 0;
  881. }
  882. /**
  883. * tb_port_lane_bonding_enable() - Enable bonding on port
  884. * @port: port to enable
  885. *
  886. * Enable bonding by setting the link width of the port and the other
  887. * port in case of dual link port. Does not wait for the link to
  888. * actually reach the bonded state so caller needs to call
  889. * tb_port_wait_for_link_width() before enabling any paths through the
  890. * link to make sure the link is in expected state.
  891. *
  892. * Return: %0 in case of success and negative errno in case of error
  893. */
  894. int tb_port_lane_bonding_enable(struct tb_port *port)
  895. {
  896. int ret;
  897. /*
  898. * Enable lane bonding for both links if not already enabled by
  899. * for example the boot firmware.
  900. */
  901. ret = tb_port_get_link_width(port);
  902. if (ret == 1) {
  903. ret = tb_port_set_link_width(port, 2);
  904. if (ret)
  905. goto err_lane0;
  906. }
  907. ret = tb_port_get_link_width(port->dual_link_port);
  908. if (ret == 1) {
  909. ret = tb_port_set_link_width(port->dual_link_port, 2);
  910. if (ret)
  911. goto err_lane0;
  912. }
  913. ret = tb_port_set_lane_bonding(port, true);
  914. if (ret)
  915. goto err_lane1;
  916. return 0;
  917. err_lane1:
  918. tb_port_set_link_width(port->dual_link_port, 1);
  919. err_lane0:
  920. tb_port_set_link_width(port, 1);
  921. return ret;
  922. }
  923. /**
  924. * tb_port_lane_bonding_disable() - Disable bonding on port
  925. * @port: port to disable
  926. *
  927. * Disable bonding by setting the link width of the port and the
  928. * other port in case of dual link port.
  929. */
  930. void tb_port_lane_bonding_disable(struct tb_port *port)
  931. {
  932. tb_port_set_lane_bonding(port, false);
  933. tb_port_set_link_width(port->dual_link_port, 1);
  934. tb_port_set_link_width(port, 1);
  935. }
  936. /**
  937. * tb_port_wait_for_link_width() - Wait until link reaches specific width
  938. * @port: Port to wait for
  939. * @width: Expected link width (%1 or %2)
  940. * @timeout_msec: Timeout in ms how long to wait
  941. *
  942. * Should be used after both ends of the link have been bonded (or
  943. * bonding has been disabled) to wait until the link actually reaches
  944. * the expected state. Returns %-ETIMEDOUT if the @width was not reached
  945. * within the given timeout, %0 if it did.
  946. */
  947. int tb_port_wait_for_link_width(struct tb_port *port, int width,
  948. int timeout_msec)
  949. {
  950. ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
  951. int ret;
  952. do {
  953. ret = tb_port_get_link_width(port);
  954. if (ret < 0) {
  955. /*
  956. * Sometimes we get port locked error when
  957. * polling the lanes so we can ignore it and
  958. * retry.
  959. */
  960. if (ret != -EACCES)
  961. return ret;
  962. } else if (ret == width) {
  963. return 0;
  964. }
  965. usleep_range(1000, 2000);
  966. } while (ktime_before(ktime_get(), timeout));
  967. return -ETIMEDOUT;
  968. }
  969. static int tb_port_do_update_credits(struct tb_port *port)
  970. {
  971. u32 nfc_credits;
  972. int ret;
  973. ret = tb_port_read(port, &nfc_credits, TB_CFG_PORT, ADP_CS_4, 1);
  974. if (ret)
  975. return ret;
  976. if (nfc_credits != port->config.nfc_credits) {
  977. u32 total;
  978. total = (nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
  979. ADP_CS_4_TOTAL_BUFFERS_SHIFT;
  980. tb_port_dbg(port, "total credits changed %u -> %u\n",
  981. port->total_credits, total);
  982. port->config.nfc_credits = nfc_credits;
  983. port->total_credits = total;
  984. }
  985. return 0;
  986. }
  987. /**
  988. * tb_port_update_credits() - Re-read port total credits
  989. * @port: Port to update
  990. *
  991. * After the link is bonded (or bonding was disabled) the port total
  992. * credits may change, so this function needs to be called to re-read
  993. * the credits. Updates also the second lane adapter.
  994. */
  995. int tb_port_update_credits(struct tb_port *port)
  996. {
  997. int ret;
  998. ret = tb_port_do_update_credits(port);
  999. if (ret)
  1000. return ret;
  1001. return tb_port_do_update_credits(port->dual_link_port);
  1002. }
  1003. static int __tb_port_pm_secondary_set(struct tb_port *port, bool secondary)
  1004. {
  1005. u32 phy;
  1006. int ret;
  1007. ret = tb_port_read(port, &phy, TB_CFG_PORT,
  1008. port->cap_phy + LANE_ADP_CS_1, 1);
  1009. if (ret)
  1010. return ret;
  1011. if (secondary)
  1012. phy |= LANE_ADP_CS_1_PMS;
  1013. else
  1014. phy &= ~LANE_ADP_CS_1_PMS;
  1015. return tb_port_write(port, &phy, TB_CFG_PORT,
  1016. port->cap_phy + LANE_ADP_CS_1, 1);
  1017. }
  1018. static int tb_port_pm_secondary_enable(struct tb_port *port)
  1019. {
  1020. return __tb_port_pm_secondary_set(port, true);
  1021. }
  1022. static int tb_port_pm_secondary_disable(struct tb_port *port)
  1023. {
  1024. return __tb_port_pm_secondary_set(port, false);
  1025. }
  1026. /* Called for USB4 or Titan Ridge routers only */
  1027. static bool tb_port_clx_supported(struct tb_port *port, unsigned int clx_mask)
  1028. {
  1029. u32 val, mask = 0;
  1030. bool ret;
  1031. /* Don't enable CLx in case of two single-lane links */
  1032. if (!port->bonded && port->dual_link_port)
  1033. return false;
  1034. /* Don't enable CLx in case of inter-domain link */
  1035. if (port->xdomain)
  1036. return false;
  1037. if (tb_switch_is_usb4(port->sw)) {
  1038. if (!usb4_port_clx_supported(port))
  1039. return false;
  1040. } else if (!tb_lc_is_clx_supported(port)) {
  1041. return false;
  1042. }
  1043. if (clx_mask & TB_CL1) {
  1044. /* CL0s and CL1 are enabled and supported together */
  1045. mask |= LANE_ADP_CS_0_CL0S_SUPPORT | LANE_ADP_CS_0_CL1_SUPPORT;
  1046. }
  1047. if (clx_mask & TB_CL2)
  1048. mask |= LANE_ADP_CS_0_CL2_SUPPORT;
  1049. ret = tb_port_read(port, &val, TB_CFG_PORT,
  1050. port->cap_phy + LANE_ADP_CS_0, 1);
  1051. if (ret)
  1052. return false;
  1053. return !!(val & mask);
  1054. }
  1055. static int __tb_port_clx_set(struct tb_port *port, enum tb_clx clx, bool enable)
  1056. {
  1057. u32 phy, mask;
  1058. int ret;
  1059. /* CL0s and CL1 are enabled and supported together */
  1060. if (clx == TB_CL1)
  1061. mask = LANE_ADP_CS_1_CL0S_ENABLE | LANE_ADP_CS_1_CL1_ENABLE;
  1062. else
  1063. /* For now we support only CL0s and CL1. Not CL2 */
  1064. return -EOPNOTSUPP;
  1065. ret = tb_port_read(port, &phy, TB_CFG_PORT,
  1066. port->cap_phy + LANE_ADP_CS_1, 1);
  1067. if (ret)
  1068. return ret;
  1069. if (enable)
  1070. phy |= mask;
  1071. else
  1072. phy &= ~mask;
  1073. return tb_port_write(port, &phy, TB_CFG_PORT,
  1074. port->cap_phy + LANE_ADP_CS_1, 1);
  1075. }
  1076. static int tb_port_clx_disable(struct tb_port *port, enum tb_clx clx)
  1077. {
  1078. return __tb_port_clx_set(port, clx, false);
  1079. }
  1080. static int tb_port_clx_enable(struct tb_port *port, enum tb_clx clx)
  1081. {
  1082. return __tb_port_clx_set(port, clx, true);
  1083. }
  1084. /**
  1085. * tb_port_is_clx_enabled() - Is given CL state enabled
  1086. * @port: USB4 port to check
  1087. * @clx_mask: Mask of CL states to check
  1088. *
  1089. * Returns true if any of the given CL states is enabled for @port.
  1090. */
  1091. bool tb_port_is_clx_enabled(struct tb_port *port, unsigned int clx_mask)
  1092. {
  1093. u32 val, mask = 0;
  1094. int ret;
  1095. if (!tb_port_clx_supported(port, clx_mask))
  1096. return false;
  1097. if (clx_mask & TB_CL1)
  1098. mask |= LANE_ADP_CS_1_CL0S_ENABLE | LANE_ADP_CS_1_CL1_ENABLE;
  1099. if (clx_mask & TB_CL2)
  1100. mask |= LANE_ADP_CS_1_CL2_ENABLE;
  1101. ret = tb_port_read(port, &val, TB_CFG_PORT,
  1102. port->cap_phy + LANE_ADP_CS_1, 1);
  1103. if (ret)
  1104. return false;
  1105. return !!(val & mask);
  1106. }
  1107. static int tb_port_start_lane_initialization(struct tb_port *port)
  1108. {
  1109. int ret;
  1110. if (tb_switch_is_usb4(port->sw))
  1111. return 0;
  1112. ret = tb_lc_start_lane_initialization(port);
  1113. return ret == -EINVAL ? 0 : ret;
  1114. }
  1115. /*
  1116. * Returns true if the port had something (router, XDomain) connected
  1117. * before suspend.
  1118. */
  1119. static bool tb_port_resume(struct tb_port *port)
  1120. {
  1121. bool has_remote = tb_port_has_remote(port);
  1122. if (port->usb4) {
  1123. usb4_port_device_resume(port->usb4);
  1124. } else if (!has_remote) {
  1125. /*
  1126. * For disconnected downstream lane adapters start lane
  1127. * initialization now so we detect future connects.
  1128. *
  1129. * For XDomain start the lane initialzation now so the
  1130. * link gets re-established.
  1131. *
  1132. * This is only needed for non-USB4 ports.
  1133. */
  1134. if (!tb_is_upstream_port(port) || port->xdomain)
  1135. tb_port_start_lane_initialization(port);
  1136. }
  1137. return has_remote || port->xdomain;
  1138. }
  1139. /**
  1140. * tb_port_is_enabled() - Is the adapter port enabled
  1141. * @port: Port to check
  1142. */
  1143. bool tb_port_is_enabled(struct tb_port *port)
  1144. {
  1145. switch (port->config.type) {
  1146. case TB_TYPE_PCIE_UP:
  1147. case TB_TYPE_PCIE_DOWN:
  1148. return tb_pci_port_is_enabled(port);
  1149. case TB_TYPE_DP_HDMI_IN:
  1150. case TB_TYPE_DP_HDMI_OUT:
  1151. return tb_dp_port_is_enabled(port);
  1152. case TB_TYPE_USB3_UP:
  1153. case TB_TYPE_USB3_DOWN:
  1154. return tb_usb3_port_is_enabled(port);
  1155. default:
  1156. return false;
  1157. }
  1158. }
  1159. /**
  1160. * tb_usb3_port_is_enabled() - Is the USB3 adapter port enabled
  1161. * @port: USB3 adapter port to check
  1162. */
  1163. bool tb_usb3_port_is_enabled(struct tb_port *port)
  1164. {
  1165. u32 data;
  1166. if (tb_port_read(port, &data, TB_CFG_PORT,
  1167. port->cap_adap + ADP_USB3_CS_0, 1))
  1168. return false;
  1169. return !!(data & ADP_USB3_CS_0_PE);
  1170. }
  1171. /**
  1172. * tb_usb3_port_enable() - Enable USB3 adapter port
  1173. * @port: USB3 adapter port to enable
  1174. * @enable: Enable/disable the USB3 adapter
  1175. */
  1176. int tb_usb3_port_enable(struct tb_port *port, bool enable)
  1177. {
  1178. u32 word = enable ? (ADP_USB3_CS_0_PE | ADP_USB3_CS_0_V)
  1179. : ADP_USB3_CS_0_V;
  1180. if (!port->cap_adap)
  1181. return -ENXIO;
  1182. return tb_port_write(port, &word, TB_CFG_PORT,
  1183. port->cap_adap + ADP_USB3_CS_0, 1);
  1184. }
  1185. /**
  1186. * tb_pci_port_is_enabled() - Is the PCIe adapter port enabled
  1187. * @port: PCIe port to check
  1188. */
  1189. bool tb_pci_port_is_enabled(struct tb_port *port)
  1190. {
  1191. u32 data;
  1192. if (tb_port_read(port, &data, TB_CFG_PORT,
  1193. port->cap_adap + ADP_PCIE_CS_0, 1))
  1194. return false;
  1195. return !!(data & ADP_PCIE_CS_0_PE);
  1196. }
  1197. /**
  1198. * tb_pci_port_enable() - Enable PCIe adapter port
  1199. * @port: PCIe port to enable
  1200. * @enable: Enable/disable the PCIe adapter
  1201. */
  1202. int tb_pci_port_enable(struct tb_port *port, bool enable)
  1203. {
  1204. u32 word = enable ? ADP_PCIE_CS_0_PE : 0x0;
  1205. if (!port->cap_adap)
  1206. return -ENXIO;
  1207. return tb_port_write(port, &word, TB_CFG_PORT,
  1208. port->cap_adap + ADP_PCIE_CS_0, 1);
  1209. }
  1210. /**
  1211. * tb_dp_port_hpd_is_active() - Is HPD already active
  1212. * @port: DP out port to check
  1213. *
  1214. * Checks if the DP OUT adapter port has HDP bit already set.
  1215. */
  1216. int tb_dp_port_hpd_is_active(struct tb_port *port)
  1217. {
  1218. u32 data;
  1219. int ret;
  1220. ret = tb_port_read(port, &data, TB_CFG_PORT,
  1221. port->cap_adap + ADP_DP_CS_2, 1);
  1222. if (ret)
  1223. return ret;
  1224. return !!(data & ADP_DP_CS_2_HDP);
  1225. }
  1226. /**
  1227. * tb_dp_port_hpd_clear() - Clear HPD from DP IN port
  1228. * @port: Port to clear HPD
  1229. *
  1230. * If the DP IN port has HDP set, this function can be used to clear it.
  1231. */
  1232. int tb_dp_port_hpd_clear(struct tb_port *port)
  1233. {
  1234. u32 data;
  1235. int ret;
  1236. ret = tb_port_read(port, &data, TB_CFG_PORT,
  1237. port->cap_adap + ADP_DP_CS_3, 1);
  1238. if (ret)
  1239. return ret;
  1240. data |= ADP_DP_CS_3_HDPC;
  1241. return tb_port_write(port, &data, TB_CFG_PORT,
  1242. port->cap_adap + ADP_DP_CS_3, 1);
  1243. }
  1244. /**
  1245. * tb_dp_port_set_hops() - Set video/aux Hop IDs for DP port
  1246. * @port: DP IN/OUT port to set hops
  1247. * @video: Video Hop ID
  1248. * @aux_tx: AUX TX Hop ID
  1249. * @aux_rx: AUX RX Hop ID
  1250. *
  1251. * Programs specified Hop IDs for DP IN/OUT port. Can be called for USB4
  1252. * router DP adapters too but does not program the values as the fields
  1253. * are read-only.
  1254. */
  1255. int tb_dp_port_set_hops(struct tb_port *port, unsigned int video,
  1256. unsigned int aux_tx, unsigned int aux_rx)
  1257. {
  1258. u32 data[2];
  1259. int ret;
  1260. if (tb_switch_is_usb4(port->sw))
  1261. return 0;
  1262. ret = tb_port_read(port, data, TB_CFG_PORT,
  1263. port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
  1264. if (ret)
  1265. return ret;
  1266. data[0] &= ~ADP_DP_CS_0_VIDEO_HOPID_MASK;
  1267. data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK;
  1268. data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK;
  1269. data[0] |= (video << ADP_DP_CS_0_VIDEO_HOPID_SHIFT) &
  1270. ADP_DP_CS_0_VIDEO_HOPID_MASK;
  1271. data[1] |= aux_tx & ADP_DP_CS_1_AUX_TX_HOPID_MASK;
  1272. data[1] |= (aux_rx << ADP_DP_CS_1_AUX_RX_HOPID_SHIFT) &
  1273. ADP_DP_CS_1_AUX_RX_HOPID_MASK;
  1274. return tb_port_write(port, data, TB_CFG_PORT,
  1275. port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
  1276. }
  1277. /**
  1278. * tb_dp_port_is_enabled() - Is DP adapter port enabled
  1279. * @port: DP adapter port to check
  1280. */
  1281. bool tb_dp_port_is_enabled(struct tb_port *port)
  1282. {
  1283. u32 data[2];
  1284. if (tb_port_read(port, data, TB_CFG_PORT, port->cap_adap + ADP_DP_CS_0,
  1285. ARRAY_SIZE(data)))
  1286. return false;
  1287. return !!(data[0] & (ADP_DP_CS_0_VE | ADP_DP_CS_0_AE));
  1288. }
  1289. /**
  1290. * tb_dp_port_enable() - Enables/disables DP paths of a port
  1291. * @port: DP IN/OUT port
  1292. * @enable: Enable/disable DP path
  1293. *
  1294. * Once Hop IDs are programmed DP paths can be enabled or disabled by
  1295. * calling this function.
  1296. */
  1297. int tb_dp_port_enable(struct tb_port *port, bool enable)
  1298. {
  1299. u32 data[2];
  1300. int ret;
  1301. ret = tb_port_read(port, data, TB_CFG_PORT,
  1302. port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
  1303. if (ret)
  1304. return ret;
  1305. if (enable)
  1306. data[0] |= ADP_DP_CS_0_VE | ADP_DP_CS_0_AE;
  1307. else
  1308. data[0] &= ~(ADP_DP_CS_0_VE | ADP_DP_CS_0_AE);
  1309. return tb_port_write(port, data, TB_CFG_PORT,
  1310. port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
  1311. }
  1312. /* switch utility functions */
  1313. static const char *tb_switch_generation_name(const struct tb_switch *sw)
  1314. {
  1315. switch (sw->generation) {
  1316. case 1:
  1317. return "Thunderbolt 1";
  1318. case 2:
  1319. return "Thunderbolt 2";
  1320. case 3:
  1321. return "Thunderbolt 3";
  1322. case 4:
  1323. return "USB4";
  1324. default:
  1325. return "Unknown";
  1326. }
  1327. }
  1328. static void tb_dump_switch(const struct tb *tb, const struct tb_switch *sw)
  1329. {
  1330. const struct tb_regs_switch_header *regs = &sw->config;
  1331. tb_dbg(tb, " %s Switch: %x:%x (Revision: %d, TB Version: %d)\n",
  1332. tb_switch_generation_name(sw), regs->vendor_id, regs->device_id,
  1333. regs->revision, regs->thunderbolt_version);
  1334. tb_dbg(tb, " Max Port Number: %d\n", regs->max_port_number);
  1335. tb_dbg(tb, " Config:\n");
  1336. tb_dbg(tb,
  1337. " Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n",
  1338. regs->upstream_port_number, regs->depth,
  1339. (((u64) regs->route_hi) << 32) | regs->route_lo,
  1340. regs->enabled, regs->plug_events_delay);
  1341. tb_dbg(tb, " unknown1: %#x unknown4: %#x\n",
  1342. regs->__unknown1, regs->__unknown4);
  1343. }
  1344. /**
  1345. * tb_switch_reset() - reconfigure route, enable and send TB_CFG_PKG_RESET
  1346. * @sw: Switch to reset
  1347. *
  1348. * Return: Returns 0 on success or an error code on failure.
  1349. */
  1350. int tb_switch_reset(struct tb_switch *sw)
  1351. {
  1352. struct tb_cfg_result res;
  1353. if (sw->generation > 1)
  1354. return 0;
  1355. tb_sw_dbg(sw, "resetting switch\n");
  1356. res.err = tb_sw_write(sw, ((u32 *) &sw->config) + 2,
  1357. TB_CFG_SWITCH, 2, 2);
  1358. if (res.err)
  1359. return res.err;
  1360. res = tb_cfg_reset(sw->tb->ctl, tb_route(sw));
  1361. if (res.err > 0)
  1362. return -EIO;
  1363. return res.err;
  1364. }
  1365. /**
  1366. * tb_switch_wait_for_bit() - Wait for specified value of bits in offset
  1367. * @sw: Router to read the offset value from
  1368. * @offset: Offset in the router config space to read from
  1369. * @bit: Bit mask in the offset to wait for
  1370. * @value: Value of the bits to wait for
  1371. * @timeout_msec: Timeout in ms how long to wait
  1372. *
  1373. * Wait till the specified bits in specified offset reach specified value.
  1374. * Returns %0 in case of success, %-ETIMEDOUT if the @value was not reached
  1375. * within the given timeout or a negative errno in case of failure.
  1376. */
  1377. int tb_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit,
  1378. u32 value, int timeout_msec)
  1379. {
  1380. ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
  1381. do {
  1382. u32 val;
  1383. int ret;
  1384. ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1);
  1385. if (ret)
  1386. return ret;
  1387. if ((val & bit) == value)
  1388. return 0;
  1389. usleep_range(50, 100);
  1390. } while (ktime_before(ktime_get(), timeout));
  1391. return -ETIMEDOUT;
  1392. }
  1393. /*
  1394. * tb_plug_events_active() - enable/disable plug events on a switch
  1395. *
  1396. * Also configures a sane plug_events_delay of 255ms.
  1397. *
  1398. * Return: Returns 0 on success or an error code on failure.
  1399. */
  1400. static int tb_plug_events_active(struct tb_switch *sw, bool active)
  1401. {
  1402. u32 data;
  1403. int res;
  1404. if (tb_switch_is_icm(sw) || tb_switch_is_usb4(sw))
  1405. return 0;
  1406. sw->config.plug_events_delay = 0xff;
  1407. res = tb_sw_write(sw, ((u32 *) &sw->config) + 4, TB_CFG_SWITCH, 4, 1);
  1408. if (res)
  1409. return res;
  1410. res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1);
  1411. if (res)
  1412. return res;
  1413. if (active) {
  1414. data = data & 0xFFFFFF83;
  1415. switch (sw->config.device_id) {
  1416. case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
  1417. case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
  1418. case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
  1419. break;
  1420. default:
  1421. /*
  1422. * Skip Alpine Ridge, it needs to have vendor
  1423. * specific USB hotplug event enabled for the
  1424. * internal xHCI to work.
  1425. */
  1426. if (!tb_switch_is_alpine_ridge(sw))
  1427. data |= TB_PLUG_EVENTS_USB_DISABLE;
  1428. }
  1429. } else {
  1430. data = data | 0x7c;
  1431. }
  1432. return tb_sw_write(sw, &data, TB_CFG_SWITCH,
  1433. sw->cap_plug_events + 1, 1);
  1434. }
  1435. static ssize_t authorized_show(struct device *dev,
  1436. struct device_attribute *attr,
  1437. char *buf)
  1438. {
  1439. struct tb_switch *sw = tb_to_switch(dev);
  1440. return sysfs_emit(buf, "%u\n", sw->authorized);
  1441. }
  1442. static int disapprove_switch(struct device *dev, void *not_used)
  1443. {
  1444. char *envp[] = { "AUTHORIZED=0", NULL };
  1445. struct tb_switch *sw;
  1446. sw = tb_to_switch(dev);
  1447. if (sw && sw->authorized) {
  1448. int ret;
  1449. /* First children */
  1450. ret = device_for_each_child_reverse(&sw->dev, NULL, disapprove_switch);
  1451. if (ret)
  1452. return ret;
  1453. ret = tb_domain_disapprove_switch(sw->tb, sw);
  1454. if (ret)
  1455. return ret;
  1456. sw->authorized = 0;
  1457. kobject_uevent_env(&sw->dev.kobj, KOBJ_CHANGE, envp);
  1458. }
  1459. return 0;
  1460. }
  1461. static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
  1462. {
  1463. char envp_string[13];
  1464. int ret = -EINVAL;
  1465. char *envp[] = { envp_string, NULL };
  1466. if (!mutex_trylock(&sw->tb->lock))
  1467. return restart_syscall();
  1468. if (!!sw->authorized == !!val)
  1469. goto unlock;
  1470. switch (val) {
  1471. /* Disapprove switch */
  1472. case 0:
  1473. if (tb_route(sw)) {
  1474. ret = disapprove_switch(&sw->dev, NULL);
  1475. goto unlock;
  1476. }
  1477. break;
  1478. /* Approve switch */
  1479. case 1:
  1480. if (sw->key)
  1481. ret = tb_domain_approve_switch_key(sw->tb, sw);
  1482. else
  1483. ret = tb_domain_approve_switch(sw->tb, sw);
  1484. break;
  1485. /* Challenge switch */
  1486. case 2:
  1487. if (sw->key)
  1488. ret = tb_domain_challenge_switch_key(sw->tb, sw);
  1489. break;
  1490. default:
  1491. break;
  1492. }
  1493. if (!ret) {
  1494. sw->authorized = val;
  1495. /*
  1496. * Notify status change to the userspace, informing the new
  1497. * value of /sys/bus/thunderbolt/devices/.../authorized.
  1498. */
  1499. sprintf(envp_string, "AUTHORIZED=%u", sw->authorized);
  1500. kobject_uevent_env(&sw->dev.kobj, KOBJ_CHANGE, envp);
  1501. }
  1502. unlock:
  1503. mutex_unlock(&sw->tb->lock);
  1504. return ret;
  1505. }
  1506. static ssize_t authorized_store(struct device *dev,
  1507. struct device_attribute *attr,
  1508. const char *buf, size_t count)
  1509. {
  1510. struct tb_switch *sw = tb_to_switch(dev);
  1511. unsigned int val;
  1512. ssize_t ret;
  1513. ret = kstrtouint(buf, 0, &val);
  1514. if (ret)
  1515. return ret;
  1516. if (val > 2)
  1517. return -EINVAL;
  1518. pm_runtime_get_sync(&sw->dev);
  1519. ret = tb_switch_set_authorized(sw, val);
  1520. pm_runtime_mark_last_busy(&sw->dev);
  1521. pm_runtime_put_autosuspend(&sw->dev);
  1522. return ret ? ret : count;
  1523. }
  1524. static DEVICE_ATTR_RW(authorized);
  1525. static ssize_t boot_show(struct device *dev, struct device_attribute *attr,
  1526. char *buf)
  1527. {
  1528. struct tb_switch *sw = tb_to_switch(dev);
  1529. return sysfs_emit(buf, "%u\n", sw->boot);
  1530. }
  1531. static DEVICE_ATTR_RO(boot);
  1532. static ssize_t device_show(struct device *dev, struct device_attribute *attr,
  1533. char *buf)
  1534. {
  1535. struct tb_switch *sw = tb_to_switch(dev);
  1536. return sysfs_emit(buf, "%#x\n", sw->device);
  1537. }
  1538. static DEVICE_ATTR_RO(device);
  1539. static ssize_t
  1540. device_name_show(struct device *dev, struct device_attribute *attr, char *buf)
  1541. {
  1542. struct tb_switch *sw = tb_to_switch(dev);
  1543. return sysfs_emit(buf, "%s\n", sw->device_name ?: "");
  1544. }
  1545. static DEVICE_ATTR_RO(device_name);
  1546. static ssize_t
  1547. generation_show(struct device *dev, struct device_attribute *attr, char *buf)
  1548. {
  1549. struct tb_switch *sw = tb_to_switch(dev);
  1550. return sysfs_emit(buf, "%u\n", sw->generation);
  1551. }
  1552. static DEVICE_ATTR_RO(generation);
  1553. static ssize_t key_show(struct device *dev, struct device_attribute *attr,
  1554. char *buf)
  1555. {
  1556. struct tb_switch *sw = tb_to_switch(dev);
  1557. ssize_t ret;
  1558. if (!mutex_trylock(&sw->tb->lock))
  1559. return restart_syscall();
  1560. if (sw->key)
  1561. ret = sysfs_emit(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key);
  1562. else
  1563. ret = sysfs_emit(buf, "\n");
  1564. mutex_unlock(&sw->tb->lock);
  1565. return ret;
  1566. }
  1567. static ssize_t key_store(struct device *dev, struct device_attribute *attr,
  1568. const char *buf, size_t count)
  1569. {
  1570. struct tb_switch *sw = tb_to_switch(dev);
  1571. u8 key[TB_SWITCH_KEY_SIZE];
  1572. ssize_t ret = count;
  1573. bool clear = false;
  1574. if (!strcmp(buf, "\n"))
  1575. clear = true;
  1576. else if (hex2bin(key, buf, sizeof(key)))
  1577. return -EINVAL;
  1578. if (!mutex_trylock(&sw->tb->lock))
  1579. return restart_syscall();
  1580. if (sw->authorized) {
  1581. ret = -EBUSY;
  1582. } else {
  1583. kfree(sw->key);
  1584. if (clear) {
  1585. sw->key = NULL;
  1586. } else {
  1587. sw->key = kmemdup(key, sizeof(key), GFP_KERNEL);
  1588. if (!sw->key)
  1589. ret = -ENOMEM;
  1590. }
  1591. }
  1592. mutex_unlock(&sw->tb->lock);
  1593. return ret;
  1594. }
  1595. static DEVICE_ATTR(key, 0600, key_show, key_store);
  1596. static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
  1597. char *buf)
  1598. {
  1599. struct tb_switch *sw = tb_to_switch(dev);
  1600. return sysfs_emit(buf, "%u.0 Gb/s\n", sw->link_speed);
  1601. }
  1602. /*
  1603. * Currently all lanes must run at the same speed but we expose here
  1604. * both directions to allow possible asymmetric links in the future.
  1605. */
  1606. static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL);
  1607. static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL);
  1608. static ssize_t lanes_show(struct device *dev, struct device_attribute *attr,
  1609. char *buf)
  1610. {
  1611. struct tb_switch *sw = tb_to_switch(dev);
  1612. return sysfs_emit(buf, "%u\n", sw->link_width);
  1613. }
  1614. /*
  1615. * Currently link has same amount of lanes both directions (1 or 2) but
  1616. * expose them separately to allow possible asymmetric links in the future.
  1617. */
  1618. static DEVICE_ATTR(rx_lanes, 0444, lanes_show, NULL);
  1619. static DEVICE_ATTR(tx_lanes, 0444, lanes_show, NULL);
  1620. static ssize_t nvm_authenticate_show(struct device *dev,
  1621. struct device_attribute *attr, char *buf)
  1622. {
  1623. struct tb_switch *sw = tb_to_switch(dev);
  1624. u32 status;
  1625. nvm_get_auth_status(sw, &status);
  1626. return sysfs_emit(buf, "%#x\n", status);
  1627. }
  1628. static ssize_t nvm_authenticate_sysfs(struct device *dev, const char *buf,
  1629. bool disconnect)
  1630. {
  1631. struct tb_switch *sw = tb_to_switch(dev);
  1632. int val, ret;
  1633. pm_runtime_get_sync(&sw->dev);
  1634. if (!mutex_trylock(&sw->tb->lock)) {
  1635. ret = restart_syscall();
  1636. goto exit_rpm;
  1637. }
  1638. if (sw->no_nvm_upgrade) {
  1639. ret = -EOPNOTSUPP;
  1640. goto exit_unlock;
  1641. }
  1642. /* If NVMem devices are not yet added */
  1643. if (!sw->nvm) {
  1644. ret = -EAGAIN;
  1645. goto exit_unlock;
  1646. }
  1647. ret = kstrtoint(buf, 10, &val);
  1648. if (ret)
  1649. goto exit_unlock;
  1650. /* Always clear the authentication status */
  1651. nvm_clear_auth_status(sw);
  1652. if (val > 0) {
  1653. if (val == AUTHENTICATE_ONLY) {
  1654. if (disconnect)
  1655. ret = -EINVAL;
  1656. else
  1657. ret = nvm_authenticate(sw, true);
  1658. } else {
  1659. if (!sw->nvm->flushed) {
  1660. if (!sw->nvm->buf) {
  1661. ret = -EINVAL;
  1662. goto exit_unlock;
  1663. }
  1664. ret = nvm_validate_and_write(sw);
  1665. if (ret || val == WRITE_ONLY)
  1666. goto exit_unlock;
  1667. }
  1668. if (val == WRITE_AND_AUTHENTICATE) {
  1669. if (disconnect)
  1670. ret = tb_lc_force_power(sw);
  1671. else
  1672. ret = nvm_authenticate(sw, false);
  1673. }
  1674. }
  1675. }
  1676. exit_unlock:
  1677. mutex_unlock(&sw->tb->lock);
  1678. exit_rpm:
  1679. pm_runtime_mark_last_busy(&sw->dev);
  1680. pm_runtime_put_autosuspend(&sw->dev);
  1681. return ret;
  1682. }
  1683. static ssize_t nvm_authenticate_store(struct device *dev,
  1684. struct device_attribute *attr, const char *buf, size_t count)
  1685. {
  1686. int ret = nvm_authenticate_sysfs(dev, buf, false);
  1687. if (ret)
  1688. return ret;
  1689. return count;
  1690. }
  1691. static DEVICE_ATTR_RW(nvm_authenticate);
  1692. static ssize_t nvm_authenticate_on_disconnect_show(struct device *dev,
  1693. struct device_attribute *attr, char *buf)
  1694. {
  1695. return nvm_authenticate_show(dev, attr, buf);
  1696. }
  1697. static ssize_t nvm_authenticate_on_disconnect_store(struct device *dev,
  1698. struct device_attribute *attr, const char *buf, size_t count)
  1699. {
  1700. int ret;
  1701. ret = nvm_authenticate_sysfs(dev, buf, true);
  1702. return ret ? ret : count;
  1703. }
  1704. static DEVICE_ATTR_RW(nvm_authenticate_on_disconnect);
  1705. static ssize_t nvm_version_show(struct device *dev,
  1706. struct device_attribute *attr, char *buf)
  1707. {
  1708. struct tb_switch *sw = tb_to_switch(dev);
  1709. int ret;
  1710. if (!mutex_trylock(&sw->tb->lock))
  1711. return restart_syscall();
  1712. if (sw->safe_mode)
  1713. ret = -ENODATA;
  1714. else if (!sw->nvm)
  1715. ret = -EAGAIN;
  1716. else
  1717. ret = sysfs_emit(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor);
  1718. mutex_unlock(&sw->tb->lock);
  1719. return ret;
  1720. }
  1721. static DEVICE_ATTR_RO(nvm_version);
  1722. static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
  1723. char *buf)
  1724. {
  1725. struct tb_switch *sw = tb_to_switch(dev);
  1726. return sysfs_emit(buf, "%#x\n", sw->vendor);
  1727. }
  1728. static DEVICE_ATTR_RO(vendor);
  1729. static ssize_t
  1730. vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf)
  1731. {
  1732. struct tb_switch *sw = tb_to_switch(dev);
  1733. return sysfs_emit(buf, "%s\n", sw->vendor_name ?: "");
  1734. }
  1735. static DEVICE_ATTR_RO(vendor_name);
  1736. static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr,
  1737. char *buf)
  1738. {
  1739. struct tb_switch *sw = tb_to_switch(dev);
  1740. return sysfs_emit(buf, "%pUb\n", sw->uuid);
  1741. }
  1742. static DEVICE_ATTR_RO(unique_id);
  1743. static struct attribute *switch_attrs[] = {
  1744. &dev_attr_authorized.attr,
  1745. &dev_attr_boot.attr,
  1746. &dev_attr_device.attr,
  1747. &dev_attr_device_name.attr,
  1748. &dev_attr_generation.attr,
  1749. &dev_attr_key.attr,
  1750. &dev_attr_nvm_authenticate.attr,
  1751. &dev_attr_nvm_authenticate_on_disconnect.attr,
  1752. &dev_attr_nvm_version.attr,
  1753. &dev_attr_rx_speed.attr,
  1754. &dev_attr_rx_lanes.attr,
  1755. &dev_attr_tx_speed.attr,
  1756. &dev_attr_tx_lanes.attr,
  1757. &dev_attr_vendor.attr,
  1758. &dev_attr_vendor_name.attr,
  1759. &dev_attr_unique_id.attr,
  1760. NULL,
  1761. };
  1762. static umode_t switch_attr_is_visible(struct kobject *kobj,
  1763. struct attribute *attr, int n)
  1764. {
  1765. struct device *dev = kobj_to_dev(kobj);
  1766. struct tb_switch *sw = tb_to_switch(dev);
  1767. if (attr == &dev_attr_authorized.attr) {
  1768. if (sw->tb->security_level == TB_SECURITY_NOPCIE ||
  1769. sw->tb->security_level == TB_SECURITY_DPONLY)
  1770. return 0;
  1771. } else if (attr == &dev_attr_device.attr) {
  1772. if (!sw->device)
  1773. return 0;
  1774. } else if (attr == &dev_attr_device_name.attr) {
  1775. if (!sw->device_name)
  1776. return 0;
  1777. } else if (attr == &dev_attr_vendor.attr) {
  1778. if (!sw->vendor)
  1779. return 0;
  1780. } else if (attr == &dev_attr_vendor_name.attr) {
  1781. if (!sw->vendor_name)
  1782. return 0;
  1783. } else if (attr == &dev_attr_key.attr) {
  1784. if (tb_route(sw) &&
  1785. sw->tb->security_level == TB_SECURITY_SECURE &&
  1786. sw->security_level == TB_SECURITY_SECURE)
  1787. return attr->mode;
  1788. return 0;
  1789. } else if (attr == &dev_attr_rx_speed.attr ||
  1790. attr == &dev_attr_rx_lanes.attr ||
  1791. attr == &dev_attr_tx_speed.attr ||
  1792. attr == &dev_attr_tx_lanes.attr) {
  1793. if (tb_route(sw))
  1794. return attr->mode;
  1795. return 0;
  1796. } else if (attr == &dev_attr_nvm_authenticate.attr) {
  1797. if (nvm_upgradeable(sw))
  1798. return attr->mode;
  1799. return 0;
  1800. } else if (attr == &dev_attr_nvm_version.attr) {
  1801. if (nvm_readable(sw))
  1802. return attr->mode;
  1803. return 0;
  1804. } else if (attr == &dev_attr_boot.attr) {
  1805. if (tb_route(sw))
  1806. return attr->mode;
  1807. return 0;
  1808. } else if (attr == &dev_attr_nvm_authenticate_on_disconnect.attr) {
  1809. if (sw->quirks & QUIRK_FORCE_POWER_LINK_CONTROLLER)
  1810. return attr->mode;
  1811. return 0;
  1812. }
  1813. return sw->safe_mode ? 0 : attr->mode;
  1814. }
  1815. static const struct attribute_group switch_group = {
  1816. .is_visible = switch_attr_is_visible,
  1817. .attrs = switch_attrs,
  1818. };
  1819. static const struct attribute_group *switch_groups[] = {
  1820. &switch_group,
  1821. NULL,
  1822. };
  1823. static void tb_switch_release(struct device *dev)
  1824. {
  1825. struct tb_switch *sw = tb_to_switch(dev);
  1826. struct tb_port *port;
  1827. dma_port_free(sw->dma_port);
  1828. tb_switch_for_each_port(sw, port) {
  1829. ida_destroy(&port->in_hopids);
  1830. ida_destroy(&port->out_hopids);
  1831. }
  1832. kfree(sw->uuid);
  1833. kfree(sw->device_name);
  1834. kfree(sw->vendor_name);
  1835. kfree(sw->ports);
  1836. kfree(sw->drom);
  1837. kfree(sw->key);
  1838. kfree(sw);
  1839. }
  1840. static int tb_switch_uevent(struct device *dev, struct kobj_uevent_env *env)
  1841. {
  1842. struct tb_switch *sw = tb_to_switch(dev);
  1843. const char *type;
  1844. if (sw->config.thunderbolt_version == USB4_VERSION_1_0) {
  1845. if (add_uevent_var(env, "USB4_VERSION=1.0"))
  1846. return -ENOMEM;
  1847. }
  1848. if (!tb_route(sw)) {
  1849. type = "host";
  1850. } else {
  1851. const struct tb_port *port;
  1852. bool hub = false;
  1853. /* Device is hub if it has any downstream ports */
  1854. tb_switch_for_each_port(sw, port) {
  1855. if (!port->disabled && !tb_is_upstream_port(port) &&
  1856. tb_port_is_null(port)) {
  1857. hub = true;
  1858. break;
  1859. }
  1860. }
  1861. type = hub ? "hub" : "device";
  1862. }
  1863. if (add_uevent_var(env, "USB4_TYPE=%s", type))
  1864. return -ENOMEM;
  1865. return 0;
  1866. }
  1867. /*
  1868. * Currently only need to provide the callbacks. Everything else is handled
  1869. * in the connection manager.
  1870. */
  1871. static int __maybe_unused tb_switch_runtime_suspend(struct device *dev)
  1872. {
  1873. struct tb_switch *sw = tb_to_switch(dev);
  1874. const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
  1875. if (cm_ops->runtime_suspend_switch)
  1876. return cm_ops->runtime_suspend_switch(sw);
  1877. return 0;
  1878. }
  1879. static int __maybe_unused tb_switch_runtime_resume(struct device *dev)
  1880. {
  1881. struct tb_switch *sw = tb_to_switch(dev);
  1882. const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
  1883. if (cm_ops->runtime_resume_switch)
  1884. return cm_ops->runtime_resume_switch(sw);
  1885. return 0;
  1886. }
  1887. static const struct dev_pm_ops tb_switch_pm_ops = {
  1888. SET_RUNTIME_PM_OPS(tb_switch_runtime_suspend, tb_switch_runtime_resume,
  1889. NULL)
  1890. };
  1891. struct device_type tb_switch_type = {
  1892. .name = "thunderbolt_device",
  1893. .release = tb_switch_release,
  1894. .uevent = tb_switch_uevent,
  1895. .pm = &tb_switch_pm_ops,
  1896. };
  1897. static int tb_switch_get_generation(struct tb_switch *sw)
  1898. {
  1899. switch (sw->config.device_id) {
  1900. case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
  1901. case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
  1902. case PCI_DEVICE_ID_INTEL_LIGHT_PEAK:
  1903. case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C:
  1904. case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
  1905. case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
  1906. case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE:
  1907. case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE:
  1908. return 1;
  1909. case PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE:
  1910. case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE:
  1911. case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE:
  1912. return 2;
  1913. case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
  1914. case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
  1915. case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE:
  1916. case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
  1917. case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
  1918. case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE:
  1919. case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE:
  1920. case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE:
  1921. case PCI_DEVICE_ID_INTEL_ICL_NHI0:
  1922. case PCI_DEVICE_ID_INTEL_ICL_NHI1:
  1923. return 3;
  1924. default:
  1925. if (tb_switch_is_usb4(sw))
  1926. return 4;
  1927. /*
  1928. * For unknown switches assume generation to be 1 to be
  1929. * on the safe side.
  1930. */
  1931. tb_sw_warn(sw, "unsupported switch device id %#x\n",
  1932. sw->config.device_id);
  1933. return 1;
  1934. }
  1935. }
  1936. static bool tb_switch_exceeds_max_depth(const struct tb_switch *sw, int depth)
  1937. {
  1938. int max_depth;
  1939. if (tb_switch_is_usb4(sw) ||
  1940. (sw->tb->root_switch && tb_switch_is_usb4(sw->tb->root_switch)))
  1941. max_depth = USB4_SWITCH_MAX_DEPTH;
  1942. else
  1943. max_depth = TB_SWITCH_MAX_DEPTH;
  1944. return depth > max_depth;
  1945. }
  1946. /**
  1947. * tb_switch_alloc() - allocate a switch
  1948. * @tb: Pointer to the owning domain
  1949. * @parent: Parent device for this switch
  1950. * @route: Route string for this switch
  1951. *
  1952. * Allocates and initializes a switch. Will not upload configuration to
  1953. * the switch. For that you need to call tb_switch_configure()
  1954. * separately. The returned switch should be released by calling
  1955. * tb_switch_put().
  1956. *
  1957. * Return: Pointer to the allocated switch or ERR_PTR() in case of
  1958. * failure.
  1959. */
  1960. struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
  1961. u64 route)
  1962. {
  1963. struct tb_switch *sw;
  1964. int upstream_port;
  1965. int i, ret, depth;
  1966. /* Unlock the downstream port so we can access the switch below */
  1967. if (route) {
  1968. struct tb_switch *parent_sw = tb_to_switch(parent);
  1969. struct tb_port *down;
  1970. down = tb_port_at(route, parent_sw);
  1971. tb_port_unlock(down);
  1972. }
  1973. depth = tb_route_length(route);
  1974. upstream_port = tb_cfg_get_upstream_port(tb->ctl, route);
  1975. if (upstream_port < 0)
  1976. return ERR_PTR(upstream_port);
  1977. sw = kzalloc(sizeof(*sw), GFP_KERNEL);
  1978. if (!sw)
  1979. return ERR_PTR(-ENOMEM);
  1980. sw->tb = tb;
  1981. ret = tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5);
  1982. if (ret)
  1983. goto err_free_sw_ports;
  1984. sw->generation = tb_switch_get_generation(sw);
  1985. tb_dbg(tb, "current switch config:\n");
  1986. tb_dump_switch(tb, sw);
  1987. /* configure switch */
  1988. sw->config.upstream_port_number = upstream_port;
  1989. sw->config.depth = depth;
  1990. sw->config.route_hi = upper_32_bits(route);
  1991. sw->config.route_lo = lower_32_bits(route);
  1992. sw->config.enabled = 0;
  1993. /* Make sure we do not exceed maximum topology limit */
  1994. if (tb_switch_exceeds_max_depth(sw, depth)) {
  1995. ret = -EADDRNOTAVAIL;
  1996. goto err_free_sw_ports;
  1997. }
  1998. /* initialize ports */
  1999. sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports),
  2000. GFP_KERNEL);
  2001. if (!sw->ports) {
  2002. ret = -ENOMEM;
  2003. goto err_free_sw_ports;
  2004. }
  2005. for (i = 0; i <= sw->config.max_port_number; i++) {
  2006. /* minimum setup for tb_find_cap and tb_drom_read to work */
  2007. sw->ports[i].sw = sw;
  2008. sw->ports[i].port = i;
  2009. /* Control port does not need HopID allocation */
  2010. if (i) {
  2011. ida_init(&sw->ports[i].in_hopids);
  2012. ida_init(&sw->ports[i].out_hopids);
  2013. }
  2014. }
  2015. ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS);
  2016. if (ret > 0)
  2017. sw->cap_plug_events = ret;
  2018. ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_TIME2);
  2019. if (ret > 0)
  2020. sw->cap_vsec_tmu = ret;
  2021. ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER);
  2022. if (ret > 0)
  2023. sw->cap_lc = ret;
  2024. ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_CP_LP);
  2025. if (ret > 0)
  2026. sw->cap_lp = ret;
  2027. /* Root switch is always authorized */
  2028. if (!route)
  2029. sw->authorized = true;
  2030. device_initialize(&sw->dev);
  2031. sw->dev.parent = parent;
  2032. sw->dev.bus = &tb_bus_type;
  2033. sw->dev.type = &tb_switch_type;
  2034. sw->dev.groups = switch_groups;
  2035. dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
  2036. return sw;
  2037. err_free_sw_ports:
  2038. kfree(sw->ports);
  2039. kfree(sw);
  2040. return ERR_PTR(ret);
  2041. }
  2042. /**
  2043. * tb_switch_alloc_safe_mode() - allocate a switch that is in safe mode
  2044. * @tb: Pointer to the owning domain
  2045. * @parent: Parent device for this switch
  2046. * @route: Route string for this switch
  2047. *
  2048. * This creates a switch in safe mode. This means the switch pretty much
  2049. * lacks all capabilities except DMA configuration port before it is
  2050. * flashed with a valid NVM firmware.
  2051. *
  2052. * The returned switch must be released by calling tb_switch_put().
  2053. *
  2054. * Return: Pointer to the allocated switch or ERR_PTR() in case of failure
  2055. */
  2056. struct tb_switch *
  2057. tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route)
  2058. {
  2059. struct tb_switch *sw;
  2060. sw = kzalloc(sizeof(*sw), GFP_KERNEL);
  2061. if (!sw)
  2062. return ERR_PTR(-ENOMEM);
  2063. sw->tb = tb;
  2064. sw->config.depth = tb_route_length(route);
  2065. sw->config.route_hi = upper_32_bits(route);
  2066. sw->config.route_lo = lower_32_bits(route);
  2067. sw->safe_mode = true;
  2068. device_initialize(&sw->dev);
  2069. sw->dev.parent = parent;
  2070. sw->dev.bus = &tb_bus_type;
  2071. sw->dev.type = &tb_switch_type;
  2072. sw->dev.groups = switch_groups;
  2073. dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
  2074. return sw;
  2075. }
  2076. /**
  2077. * tb_switch_configure() - Uploads configuration to the switch
  2078. * @sw: Switch to configure
  2079. *
  2080. * Call this function before the switch is added to the system. It will
  2081. * upload configuration to the switch and makes it available for the
  2082. * connection manager to use. Can be called to the switch again after
  2083. * resume from low power states to re-initialize it.
  2084. *
  2085. * Return: %0 in case of success and negative errno in case of failure
  2086. */
  2087. int tb_switch_configure(struct tb_switch *sw)
  2088. {
  2089. struct tb *tb = sw->tb;
  2090. u64 route;
  2091. int ret;
  2092. route = tb_route(sw);
  2093. tb_dbg(tb, "%s Switch at %#llx (depth: %d, up port: %d)\n",
  2094. sw->config.enabled ? "restoring" : "initializing", route,
  2095. tb_route_length(route), sw->config.upstream_port_number);
  2096. sw->config.enabled = 1;
  2097. if (tb_switch_is_usb4(sw)) {
  2098. /*
  2099. * For USB4 devices, we need to program the CM version
  2100. * accordingly so that it knows to expose all the
  2101. * additional capabilities.
  2102. */
  2103. sw->config.cmuv = USB4_VERSION_1_0;
  2104. sw->config.plug_events_delay = 0xa;
  2105. /* Enumerate the switch */
  2106. ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
  2107. ROUTER_CS_1, 4);
  2108. if (ret)
  2109. return ret;
  2110. ret = usb4_switch_setup(sw);
  2111. } else {
  2112. if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL)
  2113. tb_sw_warn(sw, "unknown switch vendor id %#x\n",
  2114. sw->config.vendor_id);
  2115. if (!sw->cap_plug_events) {
  2116. tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n");
  2117. return -ENODEV;
  2118. }
  2119. /* Enumerate the switch */
  2120. ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
  2121. ROUTER_CS_1, 3);
  2122. }
  2123. if (ret)
  2124. return ret;
  2125. return tb_plug_events_active(sw, true);
  2126. }
  2127. static int tb_switch_set_uuid(struct tb_switch *sw)
  2128. {
  2129. bool uid = false;
  2130. u32 uuid[4];
  2131. int ret;
  2132. if (sw->uuid)
  2133. return 0;
  2134. if (tb_switch_is_usb4(sw)) {
  2135. ret = usb4_switch_read_uid(sw, &sw->uid);
  2136. if (ret)
  2137. return ret;
  2138. uid = true;
  2139. } else {
  2140. /*
  2141. * The newer controllers include fused UUID as part of
  2142. * link controller specific registers
  2143. */
  2144. ret = tb_lc_read_uuid(sw, uuid);
  2145. if (ret) {
  2146. if (ret != -EINVAL)
  2147. return ret;
  2148. uid = true;
  2149. }
  2150. }
  2151. if (uid) {
  2152. /*
  2153. * ICM generates UUID based on UID and fills the upper
  2154. * two words with ones. This is not strictly following
  2155. * UUID format but we want to be compatible with it so
  2156. * we do the same here.
  2157. */
  2158. uuid[0] = sw->uid & 0xffffffff;
  2159. uuid[1] = (sw->uid >> 32) & 0xffffffff;
  2160. uuid[2] = 0xffffffff;
  2161. uuid[3] = 0xffffffff;
  2162. }
  2163. sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
  2164. if (!sw->uuid)
  2165. return -ENOMEM;
  2166. return 0;
  2167. }
  2168. static int tb_switch_add_dma_port(struct tb_switch *sw)
  2169. {
  2170. u32 status;
  2171. int ret;
  2172. switch (sw->generation) {
  2173. case 2:
  2174. /* Only root switch can be upgraded */
  2175. if (tb_route(sw))
  2176. return 0;
  2177. fallthrough;
  2178. case 3:
  2179. case 4:
  2180. ret = tb_switch_set_uuid(sw);
  2181. if (ret)
  2182. return ret;
  2183. break;
  2184. default:
  2185. /*
  2186. * DMA port is the only thing available when the switch
  2187. * is in safe mode.
  2188. */
  2189. if (!sw->safe_mode)
  2190. return 0;
  2191. break;
  2192. }
  2193. if (sw->no_nvm_upgrade)
  2194. return 0;
  2195. if (tb_switch_is_usb4(sw)) {
  2196. ret = usb4_switch_nvm_authenticate_status(sw, &status);
  2197. if (ret)
  2198. return ret;
  2199. if (status) {
  2200. tb_sw_info(sw, "switch flash authentication failed\n");
  2201. nvm_set_auth_status(sw, status);
  2202. }
  2203. return 0;
  2204. }
  2205. /* Root switch DMA port requires running firmware */
  2206. if (!tb_route(sw) && !tb_switch_is_icm(sw))
  2207. return 0;
  2208. sw->dma_port = dma_port_alloc(sw);
  2209. if (!sw->dma_port)
  2210. return 0;
  2211. /*
  2212. * If there is status already set then authentication failed
  2213. * when the dma_port_flash_update_auth() returned. Power cycling
  2214. * is not needed (it was done already) so only thing we do here
  2215. * is to unblock runtime PM of the root port.
  2216. */
  2217. nvm_get_auth_status(sw, &status);
  2218. if (status) {
  2219. if (!tb_route(sw))
  2220. nvm_authenticate_complete_dma_port(sw);
  2221. return 0;
  2222. }
  2223. /*
  2224. * Check status of the previous flash authentication. If there
  2225. * is one we need to power cycle the switch in any case to make
  2226. * it functional again.
  2227. */
  2228. ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
  2229. if (ret <= 0)
  2230. return ret;
  2231. /* Now we can allow root port to suspend again */
  2232. if (!tb_route(sw))
  2233. nvm_authenticate_complete_dma_port(sw);
  2234. if (status) {
  2235. tb_sw_info(sw, "switch flash authentication failed\n");
  2236. nvm_set_auth_status(sw, status);
  2237. }
  2238. tb_sw_info(sw, "power cycling the switch now\n");
  2239. dma_port_power_cycle(sw->dma_port);
  2240. /*
  2241. * We return error here which causes the switch adding failure.
  2242. * It should appear back after power cycle is complete.
  2243. */
  2244. return -ESHUTDOWN;
  2245. }
  2246. static void tb_switch_default_link_ports(struct tb_switch *sw)
  2247. {
  2248. int i;
  2249. for (i = 1; i <= sw->config.max_port_number; i++) {
  2250. struct tb_port *port = &sw->ports[i];
  2251. struct tb_port *subordinate;
  2252. if (!tb_port_is_null(port))
  2253. continue;
  2254. /* Check for the subordinate port */
  2255. if (i == sw->config.max_port_number ||
  2256. !tb_port_is_null(&sw->ports[i + 1]))
  2257. continue;
  2258. /* Link them if not already done so (by DROM) */
  2259. subordinate = &sw->ports[i + 1];
  2260. if (!port->dual_link_port && !subordinate->dual_link_port) {
  2261. port->link_nr = 0;
  2262. port->dual_link_port = subordinate;
  2263. subordinate->link_nr = 1;
  2264. subordinate->dual_link_port = port;
  2265. tb_sw_dbg(sw, "linked ports %d <-> %d\n",
  2266. port->port, subordinate->port);
  2267. }
  2268. }
  2269. }
  2270. static bool tb_switch_lane_bonding_possible(struct tb_switch *sw)
  2271. {
  2272. const struct tb_port *up = tb_upstream_port(sw);
  2273. if (!up->dual_link_port || !up->dual_link_port->remote)
  2274. return false;
  2275. if (tb_switch_is_usb4(sw))
  2276. return usb4_switch_lane_bonding_possible(sw);
  2277. return tb_lc_lane_bonding_possible(sw);
  2278. }
  2279. static int tb_switch_update_link_attributes(struct tb_switch *sw)
  2280. {
  2281. struct tb_port *up;
  2282. bool change = false;
  2283. int ret;
  2284. if (!tb_route(sw) || tb_switch_is_icm(sw))
  2285. return 0;
  2286. up = tb_upstream_port(sw);
  2287. ret = tb_port_get_link_speed(up);
  2288. if (ret < 0)
  2289. return ret;
  2290. if (sw->link_speed != ret)
  2291. change = true;
  2292. sw->link_speed = ret;
  2293. ret = tb_port_get_link_width(up);
  2294. if (ret < 0)
  2295. return ret;
  2296. if (sw->link_width != ret)
  2297. change = true;
  2298. sw->link_width = ret;
  2299. /* Notify userspace that there is possible link attribute change */
  2300. if (device_is_registered(&sw->dev) && change)
  2301. kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE);
  2302. return 0;
  2303. }
  2304. /**
  2305. * tb_switch_lane_bonding_enable() - Enable lane bonding
  2306. * @sw: Switch to enable lane bonding
  2307. *
  2308. * Connection manager can call this function to enable lane bonding of a
  2309. * switch. If conditions are correct and both switches support the feature,
  2310. * lanes are bonded. It is safe to call this to any switch.
  2311. */
  2312. int tb_switch_lane_bonding_enable(struct tb_switch *sw)
  2313. {
  2314. struct tb_switch *parent = tb_to_switch(sw->dev.parent);
  2315. struct tb_port *up, *down;
  2316. u64 route = tb_route(sw);
  2317. int ret;
  2318. if (!route)
  2319. return 0;
  2320. if (!tb_switch_lane_bonding_possible(sw))
  2321. return 0;
  2322. up = tb_upstream_port(sw);
  2323. down = tb_port_at(route, parent);
  2324. if (!tb_port_is_width_supported(up, 2) ||
  2325. !tb_port_is_width_supported(down, 2))
  2326. return 0;
  2327. /*
  2328. * Both lanes need to be in CL0. Here we assume lane 0 already be in
  2329. * CL0 and check just for lane 1.
  2330. */
  2331. if (tb_wait_for_port(down->dual_link_port, false) <= 0)
  2332. return -ENOTCONN;
  2333. ret = tb_port_lane_bonding_enable(up);
  2334. if (ret) {
  2335. tb_port_warn(up, "failed to enable lane bonding\n");
  2336. return ret;
  2337. }
  2338. ret = tb_port_lane_bonding_enable(down);
  2339. if (ret) {
  2340. tb_port_warn(down, "failed to enable lane bonding\n");
  2341. tb_port_lane_bonding_disable(up);
  2342. return ret;
  2343. }
  2344. ret = tb_port_wait_for_link_width(down, 2, 100);
  2345. if (ret) {
  2346. tb_port_warn(down, "timeout enabling lane bonding\n");
  2347. return ret;
  2348. }
  2349. tb_port_update_credits(down);
  2350. tb_port_update_credits(up);
  2351. tb_switch_update_link_attributes(sw);
  2352. tb_sw_dbg(sw, "lane bonding enabled\n");
  2353. return ret;
  2354. }
  2355. /**
  2356. * tb_switch_lane_bonding_disable() - Disable lane bonding
  2357. * @sw: Switch whose lane bonding to disable
  2358. *
  2359. * Disables lane bonding between @sw and parent. This can be called even
  2360. * if lanes were not bonded originally.
  2361. */
  2362. void tb_switch_lane_bonding_disable(struct tb_switch *sw)
  2363. {
  2364. struct tb_switch *parent = tb_to_switch(sw->dev.parent);
  2365. struct tb_port *up, *down;
  2366. if (!tb_route(sw))
  2367. return;
  2368. up = tb_upstream_port(sw);
  2369. if (!up->bonded)
  2370. return;
  2371. down = tb_port_at(tb_route(sw), parent);
  2372. tb_port_lane_bonding_disable(up);
  2373. tb_port_lane_bonding_disable(down);
  2374. /*
  2375. * It is fine if we get other errors as the router might have
  2376. * been unplugged.
  2377. */
  2378. if (tb_port_wait_for_link_width(down, 1, 100) == -ETIMEDOUT)
  2379. tb_sw_warn(sw, "timeout disabling lane bonding\n");
  2380. tb_port_update_credits(down);
  2381. tb_port_update_credits(up);
  2382. tb_switch_update_link_attributes(sw);
  2383. tb_sw_dbg(sw, "lane bonding disabled\n");
  2384. }
  2385. /**
  2386. * tb_switch_configure_link() - Set link configured
  2387. * @sw: Switch whose link is configured
  2388. *
  2389. * Sets the link upstream from @sw configured (from both ends) so that
  2390. * it will not be disconnected when the domain exits sleep. Can be
  2391. * called for any switch.
  2392. *
  2393. * It is recommended that this is called after lane bonding is enabled.
  2394. *
  2395. * Returns %0 on success and negative errno in case of error.
  2396. */
  2397. int tb_switch_configure_link(struct tb_switch *sw)
  2398. {
  2399. struct tb_port *up, *down;
  2400. int ret;
  2401. if (!tb_route(sw) || tb_switch_is_icm(sw))
  2402. return 0;
  2403. up = tb_upstream_port(sw);
  2404. if (tb_switch_is_usb4(up->sw))
  2405. ret = usb4_port_configure(up);
  2406. else
  2407. ret = tb_lc_configure_port(up);
  2408. if (ret)
  2409. return ret;
  2410. down = up->remote;
  2411. if (tb_switch_is_usb4(down->sw))
  2412. return usb4_port_configure(down);
  2413. return tb_lc_configure_port(down);
  2414. }
  2415. /**
  2416. * tb_switch_unconfigure_link() - Unconfigure link
  2417. * @sw: Switch whose link is unconfigured
  2418. *
  2419. * Sets the link unconfigured so the @sw will be disconnected if the
  2420. * domain exists sleep.
  2421. */
  2422. void tb_switch_unconfigure_link(struct tb_switch *sw)
  2423. {
  2424. struct tb_port *up, *down;
  2425. if (sw->is_unplugged)
  2426. return;
  2427. if (!tb_route(sw) || tb_switch_is_icm(sw))
  2428. return;
  2429. up = tb_upstream_port(sw);
  2430. if (tb_switch_is_usb4(up->sw))
  2431. usb4_port_unconfigure(up);
  2432. else
  2433. tb_lc_unconfigure_port(up);
  2434. down = up->remote;
  2435. if (tb_switch_is_usb4(down->sw))
  2436. usb4_port_unconfigure(down);
  2437. else
  2438. tb_lc_unconfigure_port(down);
  2439. }
  2440. static void tb_switch_credits_init(struct tb_switch *sw)
  2441. {
  2442. if (tb_switch_is_icm(sw))
  2443. return;
  2444. if (!tb_switch_is_usb4(sw))
  2445. return;
  2446. if (usb4_switch_credits_init(sw))
  2447. tb_sw_info(sw, "failed to determine preferred buffer allocation, using defaults\n");
  2448. }
  2449. static int tb_switch_port_hotplug_enable(struct tb_switch *sw)
  2450. {
  2451. struct tb_port *port;
  2452. if (tb_switch_is_icm(sw))
  2453. return 0;
  2454. tb_switch_for_each_port(sw, port) {
  2455. int res;
  2456. if (!port->cap_usb4)
  2457. continue;
  2458. res = usb4_port_hotplug_enable(port);
  2459. if (res)
  2460. return res;
  2461. }
  2462. return 0;
  2463. }
  2464. /**
  2465. * tb_switch_add() - Add a switch to the domain
  2466. * @sw: Switch to add
  2467. *
  2468. * This is the last step in adding switch to the domain. It will read
  2469. * identification information from DROM and initializes ports so that
  2470. * they can be used to connect other switches. The switch will be
  2471. * exposed to the userspace when this function successfully returns. To
  2472. * remove and release the switch, call tb_switch_remove().
  2473. *
  2474. * Return: %0 in case of success and negative errno in case of failure
  2475. */
  2476. int tb_switch_add(struct tb_switch *sw)
  2477. {
  2478. int i, ret;
  2479. /*
  2480. * Initialize DMA control port now before we read DROM. Recent
  2481. * host controllers have more complete DROM on NVM that includes
  2482. * vendor and model identification strings which we then expose
  2483. * to the userspace. NVM can be accessed through DMA
  2484. * configuration based mailbox.
  2485. */
  2486. ret = tb_switch_add_dma_port(sw);
  2487. if (ret) {
  2488. dev_err(&sw->dev, "failed to add DMA port\n");
  2489. return ret;
  2490. }
  2491. if (!sw->safe_mode) {
  2492. tb_switch_credits_init(sw);
  2493. /* read drom */
  2494. ret = tb_drom_read(sw);
  2495. if (ret)
  2496. dev_warn(&sw->dev, "reading DROM failed: %d\n", ret);
  2497. tb_sw_dbg(sw, "uid: %#llx\n", sw->uid);
  2498. ret = tb_switch_set_uuid(sw);
  2499. if (ret) {
  2500. dev_err(&sw->dev, "failed to set UUID\n");
  2501. return ret;
  2502. }
  2503. for (i = 0; i <= sw->config.max_port_number; i++) {
  2504. if (sw->ports[i].disabled) {
  2505. tb_port_dbg(&sw->ports[i], "disabled by eeprom\n");
  2506. continue;
  2507. }
  2508. ret = tb_init_port(&sw->ports[i]);
  2509. if (ret) {
  2510. dev_err(&sw->dev, "failed to initialize port %d\n", i);
  2511. return ret;
  2512. }
  2513. }
  2514. tb_check_quirks(sw);
  2515. tb_switch_default_link_ports(sw);
  2516. ret = tb_switch_update_link_attributes(sw);
  2517. if (ret)
  2518. return ret;
  2519. ret = tb_switch_tmu_init(sw);
  2520. if (ret)
  2521. return ret;
  2522. }
  2523. ret = tb_switch_port_hotplug_enable(sw);
  2524. if (ret)
  2525. return ret;
  2526. ret = device_add(&sw->dev);
  2527. if (ret) {
  2528. dev_err(&sw->dev, "failed to add device: %d\n", ret);
  2529. return ret;
  2530. }
  2531. if (tb_route(sw)) {
  2532. dev_info(&sw->dev, "new device found, vendor=%#x device=%#x\n",
  2533. sw->vendor, sw->device);
  2534. if (sw->vendor_name && sw->device_name)
  2535. dev_info(&sw->dev, "%s %s\n", sw->vendor_name,
  2536. sw->device_name);
  2537. }
  2538. ret = usb4_switch_add_ports(sw);
  2539. if (ret) {
  2540. dev_err(&sw->dev, "failed to add USB4 ports\n");
  2541. goto err_del;
  2542. }
  2543. ret = tb_switch_nvm_add(sw);
  2544. if (ret) {
  2545. dev_err(&sw->dev, "failed to add NVM devices\n");
  2546. goto err_ports;
  2547. }
  2548. /*
  2549. * Thunderbolt routers do not generate wakeups themselves but
  2550. * they forward wakeups from tunneled protocols, so enable it
  2551. * here.
  2552. */
  2553. device_init_wakeup(&sw->dev, true);
  2554. pm_runtime_set_active(&sw->dev);
  2555. if (sw->rpm) {
  2556. pm_runtime_set_autosuspend_delay(&sw->dev, TB_AUTOSUSPEND_DELAY);
  2557. pm_runtime_use_autosuspend(&sw->dev);
  2558. pm_runtime_mark_last_busy(&sw->dev);
  2559. pm_runtime_enable(&sw->dev);
  2560. pm_request_autosuspend(&sw->dev);
  2561. }
  2562. tb_switch_debugfs_init(sw);
  2563. return 0;
  2564. err_ports:
  2565. usb4_switch_remove_ports(sw);
  2566. err_del:
  2567. device_del(&sw->dev);
  2568. return ret;
  2569. }
  2570. /**
  2571. * tb_switch_remove() - Remove and release a switch
  2572. * @sw: Switch to remove
  2573. *
  2574. * This will remove the switch from the domain and release it after last
  2575. * reference count drops to zero. If there are switches connected below
  2576. * this switch, they will be removed as well.
  2577. */
  2578. void tb_switch_remove(struct tb_switch *sw)
  2579. {
  2580. struct tb_port *port;
  2581. tb_switch_debugfs_remove(sw);
  2582. if (sw->rpm) {
  2583. pm_runtime_get_sync(&sw->dev);
  2584. pm_runtime_disable(&sw->dev);
  2585. }
  2586. /* port 0 is the switch itself and never has a remote */
  2587. tb_switch_for_each_port(sw, port) {
  2588. if (tb_port_has_remote(port)) {
  2589. tb_switch_remove(port->remote->sw);
  2590. port->remote = NULL;
  2591. } else if (port->xdomain) {
  2592. tb_xdomain_remove(port->xdomain);
  2593. port->xdomain = NULL;
  2594. }
  2595. /* Remove any downstream retimers */
  2596. tb_retimer_remove_all(port);
  2597. }
  2598. if (!sw->is_unplugged)
  2599. tb_plug_events_active(sw, false);
  2600. tb_switch_nvm_remove(sw);
  2601. usb4_switch_remove_ports(sw);
  2602. if (tb_route(sw))
  2603. dev_info(&sw->dev, "device disconnected\n");
  2604. device_unregister(&sw->dev);
  2605. }
  2606. /**
  2607. * tb_sw_set_unplugged() - set is_unplugged on switch and downstream switches
  2608. * @sw: Router to mark unplugged
  2609. */
  2610. void tb_sw_set_unplugged(struct tb_switch *sw)
  2611. {
  2612. struct tb_port *port;
  2613. if (sw == sw->tb->root_switch) {
  2614. tb_sw_WARN(sw, "cannot unplug root switch\n");
  2615. return;
  2616. }
  2617. if (sw->is_unplugged) {
  2618. tb_sw_WARN(sw, "is_unplugged already set\n");
  2619. return;
  2620. }
  2621. sw->is_unplugged = true;
  2622. tb_switch_for_each_port(sw, port) {
  2623. if (tb_port_has_remote(port))
  2624. tb_sw_set_unplugged(port->remote->sw);
  2625. else if (port->xdomain)
  2626. port->xdomain->is_unplugged = true;
  2627. }
  2628. }
  2629. static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags)
  2630. {
  2631. if (flags)
  2632. tb_sw_dbg(sw, "enabling wakeup: %#x\n", flags);
  2633. else
  2634. tb_sw_dbg(sw, "disabling wakeup\n");
  2635. if (tb_switch_is_usb4(sw))
  2636. return usb4_switch_set_wake(sw, flags);
  2637. return tb_lc_set_wake(sw, flags);
  2638. }
  2639. int tb_switch_resume(struct tb_switch *sw)
  2640. {
  2641. struct tb_port *port;
  2642. int err;
  2643. tb_sw_dbg(sw, "resuming switch\n");
  2644. /*
  2645. * Check for UID of the connected switches except for root
  2646. * switch which we assume cannot be removed.
  2647. */
  2648. if (tb_route(sw)) {
  2649. u64 uid;
  2650. /*
  2651. * Check first that we can still read the switch config
  2652. * space. It may be that there is now another domain
  2653. * connected.
  2654. */
  2655. err = tb_cfg_get_upstream_port(sw->tb->ctl, tb_route(sw));
  2656. if (err < 0) {
  2657. tb_sw_info(sw, "switch not present anymore\n");
  2658. return err;
  2659. }
  2660. /* We don't have any way to confirm this was the same device */
  2661. if (!sw->uid)
  2662. return -ENODEV;
  2663. if (tb_switch_is_usb4(sw))
  2664. err = usb4_switch_read_uid(sw, &uid);
  2665. else
  2666. err = tb_drom_read_uid_only(sw, &uid);
  2667. if (err) {
  2668. tb_sw_warn(sw, "uid read failed\n");
  2669. return err;
  2670. }
  2671. if (sw->uid != uid) {
  2672. tb_sw_info(sw,
  2673. "changed while suspended (uid %#llx -> %#llx)\n",
  2674. sw->uid, uid);
  2675. return -ENODEV;
  2676. }
  2677. }
  2678. err = tb_switch_configure(sw);
  2679. if (err)
  2680. return err;
  2681. /* Disable wakes */
  2682. tb_switch_set_wake(sw, 0);
  2683. err = tb_switch_tmu_init(sw);
  2684. if (err)
  2685. return err;
  2686. /* check for surviving downstream switches */
  2687. tb_switch_for_each_port(sw, port) {
  2688. if (!tb_port_is_null(port))
  2689. continue;
  2690. if (!tb_port_resume(port))
  2691. continue;
  2692. if (tb_wait_for_port(port, true) <= 0) {
  2693. tb_port_warn(port,
  2694. "lost during suspend, disconnecting\n");
  2695. if (tb_port_has_remote(port))
  2696. tb_sw_set_unplugged(port->remote->sw);
  2697. else if (port->xdomain)
  2698. port->xdomain->is_unplugged = true;
  2699. } else {
  2700. /*
  2701. * Always unlock the port so the downstream
  2702. * switch/domain is accessible.
  2703. */
  2704. if (tb_port_unlock(port))
  2705. tb_port_warn(port, "failed to unlock port\n");
  2706. if (port->remote && tb_switch_resume(port->remote->sw)) {
  2707. tb_port_warn(port,
  2708. "lost during suspend, disconnecting\n");
  2709. tb_sw_set_unplugged(port->remote->sw);
  2710. }
  2711. }
  2712. }
  2713. return 0;
  2714. }
  2715. /**
  2716. * tb_switch_suspend() - Put a switch to sleep
  2717. * @sw: Switch to suspend
  2718. * @runtime: Is this runtime suspend or system sleep
  2719. *
  2720. * Suspends router and all its children. Enables wakes according to
  2721. * value of @runtime and then sets sleep bit for the router. If @sw is
  2722. * host router the domain is ready to go to sleep once this function
  2723. * returns.
  2724. */
  2725. void tb_switch_suspend(struct tb_switch *sw, bool runtime)
  2726. {
  2727. unsigned int flags = 0;
  2728. struct tb_port *port;
  2729. int err;
  2730. tb_sw_dbg(sw, "suspending switch\n");
  2731. /*
  2732. * Actually only needed for Titan Ridge but for simplicity can be
  2733. * done for USB4 device too as CLx is re-enabled at resume.
  2734. * CL0s and CL1 are enabled and supported together.
  2735. */
  2736. if (tb_switch_is_clx_enabled(sw, TB_CL1)) {
  2737. if (tb_switch_disable_clx(sw, TB_CL1))
  2738. tb_sw_warn(sw, "failed to disable %s on upstream port\n",
  2739. tb_switch_clx_name(TB_CL1));
  2740. }
  2741. err = tb_plug_events_active(sw, false);
  2742. if (err)
  2743. return;
  2744. tb_switch_for_each_port(sw, port) {
  2745. if (tb_port_has_remote(port))
  2746. tb_switch_suspend(port->remote->sw, runtime);
  2747. }
  2748. if (runtime) {
  2749. /* Trigger wake when something is plugged in/out */
  2750. flags |= TB_WAKE_ON_CONNECT | TB_WAKE_ON_DISCONNECT;
  2751. flags |= TB_WAKE_ON_USB4;
  2752. flags |= TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE | TB_WAKE_ON_DP;
  2753. } else if (device_may_wakeup(&sw->dev)) {
  2754. flags |= TB_WAKE_ON_USB4 | TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE;
  2755. }
  2756. tb_switch_set_wake(sw, flags);
  2757. if (tb_switch_is_usb4(sw))
  2758. usb4_switch_set_sleep(sw);
  2759. else
  2760. tb_lc_set_sleep(sw);
  2761. }
  2762. /**
  2763. * tb_switch_query_dp_resource() - Query availability of DP resource
  2764. * @sw: Switch whose DP resource is queried
  2765. * @in: DP IN port
  2766. *
  2767. * Queries availability of DP resource for DP tunneling using switch
  2768. * specific means. Returns %true if resource is available.
  2769. */
  2770. bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
  2771. {
  2772. if (tb_switch_is_usb4(sw))
  2773. return usb4_switch_query_dp_resource(sw, in);
  2774. return tb_lc_dp_sink_query(sw, in);
  2775. }
  2776. /**
  2777. * tb_switch_alloc_dp_resource() - Allocate available DP resource
  2778. * @sw: Switch whose DP resource is allocated
  2779. * @in: DP IN port
  2780. *
  2781. * Allocates DP resource for DP tunneling. The resource must be
  2782. * available for this to succeed (see tb_switch_query_dp_resource()).
  2783. * Returns %0 in success and negative errno otherwise.
  2784. */
  2785. int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
  2786. {
  2787. int ret;
  2788. if (tb_switch_is_usb4(sw))
  2789. ret = usb4_switch_alloc_dp_resource(sw, in);
  2790. else
  2791. ret = tb_lc_dp_sink_alloc(sw, in);
  2792. if (ret)
  2793. tb_sw_warn(sw, "failed to allocate DP resource for port %d\n",
  2794. in->port);
  2795. else
  2796. tb_sw_dbg(sw, "allocated DP resource for port %d\n", in->port);
  2797. return ret;
  2798. }
  2799. /**
  2800. * tb_switch_dealloc_dp_resource() - De-allocate DP resource
  2801. * @sw: Switch whose DP resource is de-allocated
  2802. * @in: DP IN port
  2803. *
  2804. * De-allocates DP resource that was previously allocated for DP
  2805. * tunneling.
  2806. */
  2807. void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
  2808. {
  2809. int ret;
  2810. if (tb_switch_is_usb4(sw))
  2811. ret = usb4_switch_dealloc_dp_resource(sw, in);
  2812. else
  2813. ret = tb_lc_dp_sink_dealloc(sw, in);
  2814. if (ret)
  2815. tb_sw_warn(sw, "failed to de-allocate DP resource for port %d\n",
  2816. in->port);
  2817. else
  2818. tb_sw_dbg(sw, "released DP resource for port %d\n", in->port);
  2819. }
  2820. struct tb_sw_lookup {
  2821. struct tb *tb;
  2822. u8 link;
  2823. u8 depth;
  2824. const uuid_t *uuid;
  2825. u64 route;
  2826. };
  2827. static int tb_switch_match(struct device *dev, const void *data)
  2828. {
  2829. struct tb_switch *sw = tb_to_switch(dev);
  2830. const struct tb_sw_lookup *lookup = data;
  2831. if (!sw)
  2832. return 0;
  2833. if (sw->tb != lookup->tb)
  2834. return 0;
  2835. if (lookup->uuid)
  2836. return !memcmp(sw->uuid, lookup->uuid, sizeof(*lookup->uuid));
  2837. if (lookup->route) {
  2838. return sw->config.route_lo == lower_32_bits(lookup->route) &&
  2839. sw->config.route_hi == upper_32_bits(lookup->route);
  2840. }
  2841. /* Root switch is matched only by depth */
  2842. if (!lookup->depth)
  2843. return !sw->depth;
  2844. return sw->link == lookup->link && sw->depth == lookup->depth;
  2845. }
  2846. /**
  2847. * tb_switch_find_by_link_depth() - Find switch by link and depth
  2848. * @tb: Domain the switch belongs
  2849. * @link: Link number the switch is connected
  2850. * @depth: Depth of the switch in link
  2851. *
  2852. * Returned switch has reference count increased so the caller needs to
  2853. * call tb_switch_put() when done with the switch.
  2854. */
  2855. struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth)
  2856. {
  2857. struct tb_sw_lookup lookup;
  2858. struct device *dev;
  2859. memset(&lookup, 0, sizeof(lookup));
  2860. lookup.tb = tb;
  2861. lookup.link = link;
  2862. lookup.depth = depth;
  2863. dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
  2864. if (dev)
  2865. return tb_to_switch(dev);
  2866. return NULL;
  2867. }
  2868. /**
  2869. * tb_switch_find_by_uuid() - Find switch by UUID
  2870. * @tb: Domain the switch belongs
  2871. * @uuid: UUID to look for
  2872. *
  2873. * Returned switch has reference count increased so the caller needs to
  2874. * call tb_switch_put() when done with the switch.
  2875. */
  2876. struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid)
  2877. {
  2878. struct tb_sw_lookup lookup;
  2879. struct device *dev;
  2880. memset(&lookup, 0, sizeof(lookup));
  2881. lookup.tb = tb;
  2882. lookup.uuid = uuid;
  2883. dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
  2884. if (dev)
  2885. return tb_to_switch(dev);
  2886. return NULL;
  2887. }
  2888. /**
  2889. * tb_switch_find_by_route() - Find switch by route string
  2890. * @tb: Domain the switch belongs
  2891. * @route: Route string to look for
  2892. *
  2893. * Returned switch has reference count increased so the caller needs to
  2894. * call tb_switch_put() when done with the switch.
  2895. */
  2896. struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route)
  2897. {
  2898. struct tb_sw_lookup lookup;
  2899. struct device *dev;
  2900. if (!route)
  2901. return tb_switch_get(tb->root_switch);
  2902. memset(&lookup, 0, sizeof(lookup));
  2903. lookup.tb = tb;
  2904. lookup.route = route;
  2905. dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
  2906. if (dev)
  2907. return tb_to_switch(dev);
  2908. return NULL;
  2909. }
  2910. /**
  2911. * tb_switch_find_port() - return the first port of @type on @sw or NULL
  2912. * @sw: Switch to find the port from
  2913. * @type: Port type to look for
  2914. */
  2915. struct tb_port *tb_switch_find_port(struct tb_switch *sw,
  2916. enum tb_port_type type)
  2917. {
  2918. struct tb_port *port;
  2919. tb_switch_for_each_port(sw, port) {
  2920. if (port->config.type == type)
  2921. return port;
  2922. }
  2923. return NULL;
  2924. }
  2925. static int tb_switch_pm_secondary_resolve(struct tb_switch *sw)
  2926. {
  2927. struct tb_switch *parent = tb_switch_parent(sw);
  2928. struct tb_port *up, *down;
  2929. int ret;
  2930. if (!tb_route(sw))
  2931. return 0;
  2932. up = tb_upstream_port(sw);
  2933. down = tb_port_at(tb_route(sw), parent);
  2934. ret = tb_port_pm_secondary_enable(up);
  2935. if (ret)
  2936. return ret;
  2937. return tb_port_pm_secondary_disable(down);
  2938. }
  2939. static int __tb_switch_enable_clx(struct tb_switch *sw, enum tb_clx clx)
  2940. {
  2941. struct tb_switch *parent = tb_switch_parent(sw);
  2942. bool up_clx_support, down_clx_support;
  2943. struct tb_port *up, *down;
  2944. int ret;
  2945. if (!tb_switch_is_clx_supported(sw))
  2946. return 0;
  2947. /*
  2948. * Enable CLx for host router's downstream port as part of the
  2949. * downstream router enabling procedure.
  2950. */
  2951. if (!tb_route(sw))
  2952. return 0;
  2953. /* Enable CLx only for first hop router (depth = 1) */
  2954. if (tb_route(parent))
  2955. return 0;
  2956. ret = tb_switch_pm_secondary_resolve(sw);
  2957. if (ret)
  2958. return ret;
  2959. up = tb_upstream_port(sw);
  2960. down = tb_port_at(tb_route(sw), parent);
  2961. up_clx_support = tb_port_clx_supported(up, clx);
  2962. down_clx_support = tb_port_clx_supported(down, clx);
  2963. tb_port_dbg(up, "%s %ssupported\n", tb_switch_clx_name(clx),
  2964. up_clx_support ? "" : "not ");
  2965. tb_port_dbg(down, "%s %ssupported\n", tb_switch_clx_name(clx),
  2966. down_clx_support ? "" : "not ");
  2967. if (!up_clx_support || !down_clx_support)
  2968. return -EOPNOTSUPP;
  2969. ret = tb_port_clx_enable(up, clx);
  2970. if (ret)
  2971. return ret;
  2972. ret = tb_port_clx_enable(down, clx);
  2973. if (ret) {
  2974. tb_port_clx_disable(up, clx);
  2975. return ret;
  2976. }
  2977. ret = tb_switch_mask_clx_objections(sw);
  2978. if (ret) {
  2979. tb_port_clx_disable(up, clx);
  2980. tb_port_clx_disable(down, clx);
  2981. return ret;
  2982. }
  2983. sw->clx = clx;
  2984. tb_port_dbg(up, "%s enabled\n", tb_switch_clx_name(clx));
  2985. return 0;
  2986. }
  2987. /**
  2988. * tb_switch_enable_clx() - Enable CLx on upstream port of specified router
  2989. * @sw: Router to enable CLx for
  2990. * @clx: The CLx state to enable
  2991. *
  2992. * Enable CLx state only for first hop router. That is the most common
  2993. * use-case, that is intended for better thermal management, and so helps
  2994. * to improve performance. CLx is enabled only if both sides of the link
  2995. * support CLx, and if both sides of the link are not configured as two
  2996. * single lane links and only if the link is not inter-domain link. The
  2997. * complete set of conditions is described in CM Guide 1.0 section 8.1.
  2998. *
  2999. * Return: Returns 0 on success or an error code on failure.
  3000. */
  3001. int tb_switch_enable_clx(struct tb_switch *sw, enum tb_clx clx)
  3002. {
  3003. struct tb_switch *root_sw = sw->tb->root_switch;
  3004. if (!clx_enabled)
  3005. return 0;
  3006. /*
  3007. * CLx is not enabled and validated on Intel USB4 platforms before
  3008. * Alder Lake.
  3009. */
  3010. if (root_sw->generation < 4 || tb_switch_is_tiger_lake(root_sw))
  3011. return 0;
  3012. switch (clx) {
  3013. case TB_CL1:
  3014. /* CL0s and CL1 are enabled and supported together */
  3015. return __tb_switch_enable_clx(sw, clx);
  3016. default:
  3017. return -EOPNOTSUPP;
  3018. }
  3019. }
  3020. static int __tb_switch_disable_clx(struct tb_switch *sw, enum tb_clx clx)
  3021. {
  3022. struct tb_switch *parent = tb_switch_parent(sw);
  3023. struct tb_port *up, *down;
  3024. int ret;
  3025. if (!tb_switch_is_clx_supported(sw))
  3026. return 0;
  3027. /*
  3028. * Disable CLx for host router's downstream port as part of the
  3029. * downstream router enabling procedure.
  3030. */
  3031. if (!tb_route(sw))
  3032. return 0;
  3033. /* Disable CLx only for first hop router (depth = 1) */
  3034. if (tb_route(parent))
  3035. return 0;
  3036. up = tb_upstream_port(sw);
  3037. down = tb_port_at(tb_route(sw), parent);
  3038. ret = tb_port_clx_disable(up, clx);
  3039. if (ret)
  3040. return ret;
  3041. ret = tb_port_clx_disable(down, clx);
  3042. if (ret)
  3043. return ret;
  3044. sw->clx = TB_CLX_DISABLE;
  3045. tb_port_dbg(up, "%s disabled\n", tb_switch_clx_name(clx));
  3046. return 0;
  3047. }
  3048. /**
  3049. * tb_switch_disable_clx() - Disable CLx on upstream port of specified router
  3050. * @sw: Router to disable CLx for
  3051. * @clx: The CLx state to disable
  3052. *
  3053. * Return: Returns 0 on success or an error code on failure.
  3054. */
  3055. int tb_switch_disable_clx(struct tb_switch *sw, enum tb_clx clx)
  3056. {
  3057. if (!clx_enabled)
  3058. return 0;
  3059. switch (clx) {
  3060. case TB_CL1:
  3061. /* CL0s and CL1 are enabled and supported together */
  3062. return __tb_switch_disable_clx(sw, clx);
  3063. default:
  3064. return -EOPNOTSUPP;
  3065. }
  3066. }
  3067. /**
  3068. * tb_switch_mask_clx_objections() - Mask CLx objections for a router
  3069. * @sw: Router to mask objections for
  3070. *
  3071. * Mask the objections coming from the second depth routers in order to
  3072. * stop these objections from interfering with the CLx states of the first
  3073. * depth link.
  3074. */
  3075. int tb_switch_mask_clx_objections(struct tb_switch *sw)
  3076. {
  3077. int up_port = sw->config.upstream_port_number;
  3078. u32 offset, val[2], mask_obj, unmask_obj;
  3079. int ret, i;
  3080. /* Only Titan Ridge of pre-USB4 devices support CLx states */
  3081. if (!tb_switch_is_titan_ridge(sw))
  3082. return 0;
  3083. if (!tb_route(sw))
  3084. return 0;
  3085. /*
  3086. * In Titan Ridge there are only 2 dual-lane Thunderbolt ports:
  3087. * Port A consists of lane adapters 1,2 and
  3088. * Port B consists of lane adapters 3,4
  3089. * If upstream port is A, (lanes are 1,2), we mask objections from
  3090. * port B (lanes 3,4) and unmask objections from Port A and vice-versa.
  3091. */
  3092. if (up_port == 1) {
  3093. mask_obj = TB_LOW_PWR_C0_PORT_B_MASK;
  3094. unmask_obj = TB_LOW_PWR_C1_PORT_A_MASK;
  3095. offset = TB_LOW_PWR_C1_CL1;
  3096. } else {
  3097. mask_obj = TB_LOW_PWR_C1_PORT_A_MASK;
  3098. unmask_obj = TB_LOW_PWR_C0_PORT_B_MASK;
  3099. offset = TB_LOW_PWR_C3_CL1;
  3100. }
  3101. ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
  3102. sw->cap_lp + offset, ARRAY_SIZE(val));
  3103. if (ret)
  3104. return ret;
  3105. for (i = 0; i < ARRAY_SIZE(val); i++) {
  3106. val[i] |= mask_obj;
  3107. val[i] &= ~unmask_obj;
  3108. }
  3109. return tb_sw_write(sw, &val, TB_CFG_SWITCH,
  3110. sw->cap_lp + offset, ARRAY_SIZE(val));
  3111. }
  3112. /*
  3113. * Can be used for read/write a specified PCIe bridge for any Thunderbolt 3
  3114. * device. For now used only for Titan Ridge.
  3115. */
  3116. static int tb_switch_pcie_bridge_write(struct tb_switch *sw, unsigned int bridge,
  3117. unsigned int pcie_offset, u32 value)
  3118. {
  3119. u32 offset, command, val;
  3120. int ret;
  3121. if (sw->generation != 3)
  3122. return -EOPNOTSUPP;
  3123. offset = sw->cap_plug_events + TB_PLUG_EVENTS_PCIE_WR_DATA;
  3124. ret = tb_sw_write(sw, &value, TB_CFG_SWITCH, offset, 1);
  3125. if (ret)
  3126. return ret;
  3127. command = pcie_offset & TB_PLUG_EVENTS_PCIE_CMD_DW_OFFSET_MASK;
  3128. command |= BIT(bridge + TB_PLUG_EVENTS_PCIE_CMD_BR_SHIFT);
  3129. command |= TB_PLUG_EVENTS_PCIE_CMD_RD_WR_MASK;
  3130. command |= TB_PLUG_EVENTS_PCIE_CMD_COMMAND_VAL
  3131. << TB_PLUG_EVENTS_PCIE_CMD_COMMAND_SHIFT;
  3132. command |= TB_PLUG_EVENTS_PCIE_CMD_REQ_ACK_MASK;
  3133. offset = sw->cap_plug_events + TB_PLUG_EVENTS_PCIE_CMD;
  3134. ret = tb_sw_write(sw, &command, TB_CFG_SWITCH, offset, 1);
  3135. if (ret)
  3136. return ret;
  3137. ret = tb_switch_wait_for_bit(sw, offset,
  3138. TB_PLUG_EVENTS_PCIE_CMD_REQ_ACK_MASK, 0, 100);
  3139. if (ret)
  3140. return ret;
  3141. ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1);
  3142. if (ret)
  3143. return ret;
  3144. if (val & TB_PLUG_EVENTS_PCIE_CMD_TIMEOUT_MASK)
  3145. return -ETIMEDOUT;
  3146. return 0;
  3147. }
  3148. /**
  3149. * tb_switch_pcie_l1_enable() - Enable PCIe link to enter L1 state
  3150. * @sw: Router to enable PCIe L1
  3151. *
  3152. * For Titan Ridge switch to enter CLx state, its PCIe bridges shall enable
  3153. * entry to PCIe L1 state. Shall be called after the upstream PCIe tunnel
  3154. * was configured. Due to Intel platforms limitation, shall be called only
  3155. * for first hop switch.
  3156. */
  3157. int tb_switch_pcie_l1_enable(struct tb_switch *sw)
  3158. {
  3159. struct tb_switch *parent = tb_switch_parent(sw);
  3160. int ret;
  3161. if (!tb_route(sw))
  3162. return 0;
  3163. if (!tb_switch_is_titan_ridge(sw))
  3164. return 0;
  3165. /* Enable PCIe L1 enable only for first hop router (depth = 1) */
  3166. if (tb_route(parent))
  3167. return 0;
  3168. /* Write to downstream PCIe bridge #5 aka Dn4 */
  3169. ret = tb_switch_pcie_bridge_write(sw, 5, 0x143, 0x0c7806b1);
  3170. if (ret)
  3171. return ret;
  3172. /* Write to Upstream PCIe bridge #0 aka Up0 */
  3173. return tb_switch_pcie_bridge_write(sw, 0, 0x143, 0x0c5806b1);
  3174. }
  3175. /**
  3176. * tb_switch_xhci_connect() - Connect internal xHCI
  3177. * @sw: Router whose xHCI to connect
  3178. *
  3179. * Can be called to any router. For Alpine Ridge and Titan Ridge
  3180. * performs special flows that bring the xHCI functional for any device
  3181. * connected to the type-C port. Call only after PCIe tunnel has been
  3182. * established. The function only does the connect if not done already
  3183. * so can be called several times for the same router.
  3184. */
  3185. int tb_switch_xhci_connect(struct tb_switch *sw)
  3186. {
  3187. struct tb_port *port1, *port3;
  3188. int ret;
  3189. if (sw->generation != 3)
  3190. return 0;
  3191. port1 = &sw->ports[1];
  3192. port3 = &sw->ports[3];
  3193. if (tb_switch_is_alpine_ridge(sw)) {
  3194. bool usb_port1, usb_port3, xhci_port1, xhci_port3;
  3195. usb_port1 = tb_lc_is_usb_plugged(port1);
  3196. usb_port3 = tb_lc_is_usb_plugged(port3);
  3197. xhci_port1 = tb_lc_is_xhci_connected(port1);
  3198. xhci_port3 = tb_lc_is_xhci_connected(port3);
  3199. /* Figure out correct USB port to connect */
  3200. if (usb_port1 && !xhci_port1) {
  3201. ret = tb_lc_xhci_connect(port1);
  3202. if (ret)
  3203. return ret;
  3204. }
  3205. if (usb_port3 && !xhci_port3)
  3206. return tb_lc_xhci_connect(port3);
  3207. } else if (tb_switch_is_titan_ridge(sw)) {
  3208. ret = tb_lc_xhci_connect(port1);
  3209. if (ret)
  3210. return ret;
  3211. return tb_lc_xhci_connect(port3);
  3212. }
  3213. return 0;
  3214. }
  3215. /**
  3216. * tb_switch_xhci_disconnect() - Disconnect internal xHCI
  3217. * @sw: Router whose xHCI to disconnect
  3218. *
  3219. * The opposite of tb_switch_xhci_connect(). Disconnects xHCI on both
  3220. * ports.
  3221. */
  3222. void tb_switch_xhci_disconnect(struct tb_switch *sw)
  3223. {
  3224. if (sw->generation == 3) {
  3225. struct tb_port *port1 = &sw->ports[1];
  3226. struct tb_port *port3 = &sw->ports[3];
  3227. tb_lc_xhci_disconnect(port1);
  3228. tb_port_dbg(port1, "disconnected xHCI\n");
  3229. tb_lc_xhci_disconnect(port3);
  3230. tb_port_dbg(port3, "disconnected xHCI\n");
  3231. }
  3232. }