test.c 85 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * KUnit tests
  4. *
  5. * Copyright (C) 2020, Intel Corporation
  6. * Author: Mika Westerberg <[email protected]>
  7. */
  8. #include <kunit/test.h>
  9. #include <linux/idr.h>
  10. #include "tb.h"
  11. #include "tunnel.h"
  12. static int __ida_init(struct kunit_resource *res, void *context)
  13. {
  14. struct ida *ida = context;
  15. ida_init(ida);
  16. res->data = ida;
  17. return 0;
  18. }
  19. static void __ida_destroy(struct kunit_resource *res)
  20. {
  21. struct ida *ida = res->data;
  22. ida_destroy(ida);
  23. }
  24. static void kunit_ida_init(struct kunit *test, struct ida *ida)
  25. {
  26. kunit_alloc_resource(test, __ida_init, __ida_destroy, GFP_KERNEL, ida);
  27. }
  28. static struct tb_switch *alloc_switch(struct kunit *test, u64 route,
  29. u8 upstream_port, u8 max_port_number)
  30. {
  31. struct tb_switch *sw;
  32. size_t size;
  33. int i;
  34. sw = kunit_kzalloc(test, sizeof(*sw), GFP_KERNEL);
  35. if (!sw)
  36. return NULL;
  37. sw->config.upstream_port_number = upstream_port;
  38. sw->config.depth = tb_route_length(route);
  39. sw->config.route_hi = upper_32_bits(route);
  40. sw->config.route_lo = lower_32_bits(route);
  41. sw->config.enabled = 0;
  42. sw->config.max_port_number = max_port_number;
  43. size = (sw->config.max_port_number + 1) * sizeof(*sw->ports);
  44. sw->ports = kunit_kzalloc(test, size, GFP_KERNEL);
  45. if (!sw->ports)
  46. return NULL;
  47. for (i = 0; i <= sw->config.max_port_number; i++) {
  48. sw->ports[i].sw = sw;
  49. sw->ports[i].port = i;
  50. sw->ports[i].config.port_number = i;
  51. if (i) {
  52. kunit_ida_init(test, &sw->ports[i].in_hopids);
  53. kunit_ida_init(test, &sw->ports[i].out_hopids);
  54. }
  55. }
  56. return sw;
  57. }
  58. static struct tb_switch *alloc_host(struct kunit *test)
  59. {
  60. struct tb_switch *sw;
  61. sw = alloc_switch(test, 0, 7, 13);
  62. if (!sw)
  63. return NULL;
  64. sw->config.vendor_id = 0x8086;
  65. sw->config.device_id = 0x9a1b;
  66. sw->ports[0].config.type = TB_TYPE_PORT;
  67. sw->ports[0].config.max_in_hop_id = 7;
  68. sw->ports[0].config.max_out_hop_id = 7;
  69. sw->ports[1].config.type = TB_TYPE_PORT;
  70. sw->ports[1].config.max_in_hop_id = 19;
  71. sw->ports[1].config.max_out_hop_id = 19;
  72. sw->ports[1].total_credits = 60;
  73. sw->ports[1].ctl_credits = 2;
  74. sw->ports[1].dual_link_port = &sw->ports[2];
  75. sw->ports[2].config.type = TB_TYPE_PORT;
  76. sw->ports[2].config.max_in_hop_id = 19;
  77. sw->ports[2].config.max_out_hop_id = 19;
  78. sw->ports[2].total_credits = 60;
  79. sw->ports[2].ctl_credits = 2;
  80. sw->ports[2].dual_link_port = &sw->ports[1];
  81. sw->ports[2].link_nr = 1;
  82. sw->ports[3].config.type = TB_TYPE_PORT;
  83. sw->ports[3].config.max_in_hop_id = 19;
  84. sw->ports[3].config.max_out_hop_id = 19;
  85. sw->ports[3].total_credits = 60;
  86. sw->ports[3].ctl_credits = 2;
  87. sw->ports[3].dual_link_port = &sw->ports[4];
  88. sw->ports[4].config.type = TB_TYPE_PORT;
  89. sw->ports[4].config.max_in_hop_id = 19;
  90. sw->ports[4].config.max_out_hop_id = 19;
  91. sw->ports[4].total_credits = 60;
  92. sw->ports[4].ctl_credits = 2;
  93. sw->ports[4].dual_link_port = &sw->ports[3];
  94. sw->ports[4].link_nr = 1;
  95. sw->ports[5].config.type = TB_TYPE_DP_HDMI_IN;
  96. sw->ports[5].config.max_in_hop_id = 9;
  97. sw->ports[5].config.max_out_hop_id = 9;
  98. sw->ports[5].cap_adap = -1;
  99. sw->ports[6].config.type = TB_TYPE_DP_HDMI_IN;
  100. sw->ports[6].config.max_in_hop_id = 9;
  101. sw->ports[6].config.max_out_hop_id = 9;
  102. sw->ports[6].cap_adap = -1;
  103. sw->ports[7].config.type = TB_TYPE_NHI;
  104. sw->ports[7].config.max_in_hop_id = 11;
  105. sw->ports[7].config.max_out_hop_id = 11;
  106. sw->ports[7].config.nfc_credits = 0x41800000;
  107. sw->ports[8].config.type = TB_TYPE_PCIE_DOWN;
  108. sw->ports[8].config.max_in_hop_id = 8;
  109. sw->ports[8].config.max_out_hop_id = 8;
  110. sw->ports[9].config.type = TB_TYPE_PCIE_DOWN;
  111. sw->ports[9].config.max_in_hop_id = 8;
  112. sw->ports[9].config.max_out_hop_id = 8;
  113. sw->ports[10].disabled = true;
  114. sw->ports[11].disabled = true;
  115. sw->ports[12].config.type = TB_TYPE_USB3_DOWN;
  116. sw->ports[12].config.max_in_hop_id = 8;
  117. sw->ports[12].config.max_out_hop_id = 8;
  118. sw->ports[13].config.type = TB_TYPE_USB3_DOWN;
  119. sw->ports[13].config.max_in_hop_id = 8;
  120. sw->ports[13].config.max_out_hop_id = 8;
  121. return sw;
  122. }
  123. static struct tb_switch *alloc_host_usb4(struct kunit *test)
  124. {
  125. struct tb_switch *sw;
  126. sw = alloc_host(test);
  127. if (!sw)
  128. return NULL;
  129. sw->generation = 4;
  130. sw->credit_allocation = true;
  131. sw->max_usb3_credits = 32;
  132. sw->min_dp_aux_credits = 1;
  133. sw->min_dp_main_credits = 0;
  134. sw->max_pcie_credits = 64;
  135. sw->max_dma_credits = 14;
  136. return sw;
  137. }
  138. static struct tb_switch *alloc_dev_default(struct kunit *test,
  139. struct tb_switch *parent,
  140. u64 route, bool bonded)
  141. {
  142. struct tb_port *port, *upstream_port;
  143. struct tb_switch *sw;
  144. sw = alloc_switch(test, route, 1, 19);
  145. if (!sw)
  146. return NULL;
  147. sw->config.vendor_id = 0x8086;
  148. sw->config.device_id = 0x15ef;
  149. sw->ports[0].config.type = TB_TYPE_PORT;
  150. sw->ports[0].config.max_in_hop_id = 8;
  151. sw->ports[0].config.max_out_hop_id = 8;
  152. sw->ports[1].config.type = TB_TYPE_PORT;
  153. sw->ports[1].config.max_in_hop_id = 19;
  154. sw->ports[1].config.max_out_hop_id = 19;
  155. sw->ports[1].total_credits = 60;
  156. sw->ports[1].ctl_credits = 2;
  157. sw->ports[1].dual_link_port = &sw->ports[2];
  158. sw->ports[2].config.type = TB_TYPE_PORT;
  159. sw->ports[2].config.max_in_hop_id = 19;
  160. sw->ports[2].config.max_out_hop_id = 19;
  161. sw->ports[2].total_credits = 60;
  162. sw->ports[2].ctl_credits = 2;
  163. sw->ports[2].dual_link_port = &sw->ports[1];
  164. sw->ports[2].link_nr = 1;
  165. sw->ports[3].config.type = TB_TYPE_PORT;
  166. sw->ports[3].config.max_in_hop_id = 19;
  167. sw->ports[3].config.max_out_hop_id = 19;
  168. sw->ports[3].total_credits = 60;
  169. sw->ports[3].ctl_credits = 2;
  170. sw->ports[3].dual_link_port = &sw->ports[4];
  171. sw->ports[4].config.type = TB_TYPE_PORT;
  172. sw->ports[4].config.max_in_hop_id = 19;
  173. sw->ports[4].config.max_out_hop_id = 19;
  174. sw->ports[4].total_credits = 60;
  175. sw->ports[4].ctl_credits = 2;
  176. sw->ports[4].dual_link_port = &sw->ports[3];
  177. sw->ports[4].link_nr = 1;
  178. sw->ports[5].config.type = TB_TYPE_PORT;
  179. sw->ports[5].config.max_in_hop_id = 19;
  180. sw->ports[5].config.max_out_hop_id = 19;
  181. sw->ports[5].total_credits = 60;
  182. sw->ports[5].ctl_credits = 2;
  183. sw->ports[5].dual_link_port = &sw->ports[6];
  184. sw->ports[6].config.type = TB_TYPE_PORT;
  185. sw->ports[6].config.max_in_hop_id = 19;
  186. sw->ports[6].config.max_out_hop_id = 19;
  187. sw->ports[6].total_credits = 60;
  188. sw->ports[6].ctl_credits = 2;
  189. sw->ports[6].dual_link_port = &sw->ports[5];
  190. sw->ports[6].link_nr = 1;
  191. sw->ports[7].config.type = TB_TYPE_PORT;
  192. sw->ports[7].config.max_in_hop_id = 19;
  193. sw->ports[7].config.max_out_hop_id = 19;
  194. sw->ports[7].total_credits = 60;
  195. sw->ports[7].ctl_credits = 2;
  196. sw->ports[7].dual_link_port = &sw->ports[8];
  197. sw->ports[8].config.type = TB_TYPE_PORT;
  198. sw->ports[8].config.max_in_hop_id = 19;
  199. sw->ports[8].config.max_out_hop_id = 19;
  200. sw->ports[8].total_credits = 60;
  201. sw->ports[8].ctl_credits = 2;
  202. sw->ports[8].dual_link_port = &sw->ports[7];
  203. sw->ports[8].link_nr = 1;
  204. sw->ports[9].config.type = TB_TYPE_PCIE_UP;
  205. sw->ports[9].config.max_in_hop_id = 8;
  206. sw->ports[9].config.max_out_hop_id = 8;
  207. sw->ports[10].config.type = TB_TYPE_PCIE_DOWN;
  208. sw->ports[10].config.max_in_hop_id = 8;
  209. sw->ports[10].config.max_out_hop_id = 8;
  210. sw->ports[11].config.type = TB_TYPE_PCIE_DOWN;
  211. sw->ports[11].config.max_in_hop_id = 8;
  212. sw->ports[11].config.max_out_hop_id = 8;
  213. sw->ports[12].config.type = TB_TYPE_PCIE_DOWN;
  214. sw->ports[12].config.max_in_hop_id = 8;
  215. sw->ports[12].config.max_out_hop_id = 8;
  216. sw->ports[13].config.type = TB_TYPE_DP_HDMI_OUT;
  217. sw->ports[13].config.max_in_hop_id = 9;
  218. sw->ports[13].config.max_out_hop_id = 9;
  219. sw->ports[13].cap_adap = -1;
  220. sw->ports[14].config.type = TB_TYPE_DP_HDMI_OUT;
  221. sw->ports[14].config.max_in_hop_id = 9;
  222. sw->ports[14].config.max_out_hop_id = 9;
  223. sw->ports[14].cap_adap = -1;
  224. sw->ports[15].disabled = true;
  225. sw->ports[16].config.type = TB_TYPE_USB3_UP;
  226. sw->ports[16].config.max_in_hop_id = 8;
  227. sw->ports[16].config.max_out_hop_id = 8;
  228. sw->ports[17].config.type = TB_TYPE_USB3_DOWN;
  229. sw->ports[17].config.max_in_hop_id = 8;
  230. sw->ports[17].config.max_out_hop_id = 8;
  231. sw->ports[18].config.type = TB_TYPE_USB3_DOWN;
  232. sw->ports[18].config.max_in_hop_id = 8;
  233. sw->ports[18].config.max_out_hop_id = 8;
  234. sw->ports[19].config.type = TB_TYPE_USB3_DOWN;
  235. sw->ports[19].config.max_in_hop_id = 8;
  236. sw->ports[19].config.max_out_hop_id = 8;
  237. if (!parent)
  238. return sw;
  239. /* Link them */
  240. upstream_port = tb_upstream_port(sw);
  241. port = tb_port_at(route, parent);
  242. port->remote = upstream_port;
  243. upstream_port->remote = port;
  244. if (port->dual_link_port && upstream_port->dual_link_port) {
  245. port->dual_link_port->remote = upstream_port->dual_link_port;
  246. upstream_port->dual_link_port->remote = port->dual_link_port;
  247. if (bonded) {
  248. /* Bonding is used */
  249. port->bonded = true;
  250. port->total_credits *= 2;
  251. port->dual_link_port->bonded = true;
  252. port->dual_link_port->total_credits = 0;
  253. upstream_port->bonded = true;
  254. upstream_port->total_credits *= 2;
  255. upstream_port->dual_link_port->bonded = true;
  256. upstream_port->dual_link_port->total_credits = 0;
  257. }
  258. }
  259. return sw;
  260. }
  261. static struct tb_switch *alloc_dev_with_dpin(struct kunit *test,
  262. struct tb_switch *parent,
  263. u64 route, bool bonded)
  264. {
  265. struct tb_switch *sw;
  266. sw = alloc_dev_default(test, parent, route, bonded);
  267. if (!sw)
  268. return NULL;
  269. sw->ports[13].config.type = TB_TYPE_DP_HDMI_IN;
  270. sw->ports[13].config.max_in_hop_id = 9;
  271. sw->ports[13].config.max_out_hop_id = 9;
  272. sw->ports[14].config.type = TB_TYPE_DP_HDMI_IN;
  273. sw->ports[14].config.max_in_hop_id = 9;
  274. sw->ports[14].config.max_out_hop_id = 9;
  275. return sw;
  276. }
  277. static struct tb_switch *alloc_dev_without_dp(struct kunit *test,
  278. struct tb_switch *parent,
  279. u64 route, bool bonded)
  280. {
  281. struct tb_switch *sw;
  282. int i;
  283. sw = alloc_dev_default(test, parent, route, bonded);
  284. if (!sw)
  285. return NULL;
  286. /*
  287. * Device with:
  288. * 2x USB4 Adapters (adapters 1,2 and 3,4),
  289. * 1x PCIe Upstream (adapter 9),
  290. * 1x PCIe Downstream (adapter 10),
  291. * 1x USB3 Upstream (adapter 16),
  292. * 1x USB3 Downstream (adapter 17)
  293. */
  294. for (i = 5; i <= 8; i++)
  295. sw->ports[i].disabled = true;
  296. for (i = 11; i <= 14; i++)
  297. sw->ports[i].disabled = true;
  298. sw->ports[13].cap_adap = 0;
  299. sw->ports[14].cap_adap = 0;
  300. for (i = 18; i <= 19; i++)
  301. sw->ports[i].disabled = true;
  302. sw->generation = 4;
  303. sw->credit_allocation = true;
  304. sw->max_usb3_credits = 109;
  305. sw->min_dp_aux_credits = 0;
  306. sw->min_dp_main_credits = 0;
  307. sw->max_pcie_credits = 30;
  308. sw->max_dma_credits = 1;
  309. return sw;
  310. }
  311. static struct tb_switch *alloc_dev_usb4(struct kunit *test,
  312. struct tb_switch *parent,
  313. u64 route, bool bonded)
  314. {
  315. struct tb_switch *sw;
  316. sw = alloc_dev_default(test, parent, route, bonded);
  317. if (!sw)
  318. return NULL;
  319. sw->generation = 4;
  320. sw->credit_allocation = true;
  321. sw->max_usb3_credits = 14;
  322. sw->min_dp_aux_credits = 1;
  323. sw->min_dp_main_credits = 18;
  324. sw->max_pcie_credits = 32;
  325. sw->max_dma_credits = 14;
  326. return sw;
  327. }
  328. static void tb_test_path_basic(struct kunit *test)
  329. {
  330. struct tb_port *src_port, *dst_port, *p;
  331. struct tb_switch *host;
  332. host = alloc_host(test);
  333. src_port = &host->ports[5];
  334. dst_port = src_port;
  335. p = tb_next_port_on_path(src_port, dst_port, NULL);
  336. KUNIT_EXPECT_PTR_EQ(test, p, dst_port);
  337. p = tb_next_port_on_path(src_port, dst_port, p);
  338. KUNIT_EXPECT_TRUE(test, !p);
  339. }
  340. static void tb_test_path_not_connected_walk(struct kunit *test)
  341. {
  342. struct tb_port *src_port, *dst_port, *p;
  343. struct tb_switch *host, *dev;
  344. host = alloc_host(test);
  345. /* No connection between host and dev */
  346. dev = alloc_dev_default(test, NULL, 3, true);
  347. src_port = &host->ports[12];
  348. dst_port = &dev->ports[16];
  349. p = tb_next_port_on_path(src_port, dst_port, NULL);
  350. KUNIT_EXPECT_PTR_EQ(test, p, src_port);
  351. p = tb_next_port_on_path(src_port, dst_port, p);
  352. KUNIT_EXPECT_PTR_EQ(test, p, &host->ports[3]);
  353. p = tb_next_port_on_path(src_port, dst_port, p);
  354. KUNIT_EXPECT_TRUE(test, !p);
  355. /* Other direction */
  356. p = tb_next_port_on_path(dst_port, src_port, NULL);
  357. KUNIT_EXPECT_PTR_EQ(test, p, dst_port);
  358. p = tb_next_port_on_path(dst_port, src_port, p);
  359. KUNIT_EXPECT_PTR_EQ(test, p, &dev->ports[1]);
  360. p = tb_next_port_on_path(dst_port, src_port, p);
  361. KUNIT_EXPECT_TRUE(test, !p);
  362. }
  363. struct port_expectation {
  364. u64 route;
  365. u8 port;
  366. enum tb_port_type type;
  367. };
  368. static void tb_test_path_single_hop_walk(struct kunit *test)
  369. {
  370. /*
  371. * Walks from Host PCIe downstream port to Device #1 PCIe
  372. * upstream port.
  373. *
  374. * [Host]
  375. * 1 |
  376. * 1 |
  377. * [Device]
  378. */
  379. static const struct port_expectation test_data[] = {
  380. { .route = 0x0, .port = 8, .type = TB_TYPE_PCIE_DOWN },
  381. { .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
  382. { .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
  383. { .route = 0x1, .port = 9, .type = TB_TYPE_PCIE_UP },
  384. };
  385. struct tb_port *src_port, *dst_port, *p;
  386. struct tb_switch *host, *dev;
  387. int i;
  388. host = alloc_host(test);
  389. dev = alloc_dev_default(test, host, 1, true);
  390. src_port = &host->ports[8];
  391. dst_port = &dev->ports[9];
  392. /* Walk both directions */
  393. i = 0;
  394. tb_for_each_port_on_path(src_port, dst_port, p) {
  395. KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
  396. KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
  397. KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
  398. KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
  399. test_data[i].type);
  400. i++;
  401. }
  402. KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data));
  403. i = ARRAY_SIZE(test_data) - 1;
  404. tb_for_each_port_on_path(dst_port, src_port, p) {
  405. KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
  406. KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
  407. KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
  408. KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
  409. test_data[i].type);
  410. i--;
  411. }
  412. KUNIT_EXPECT_EQ(test, i, -1);
  413. }
  414. static void tb_test_path_daisy_chain_walk(struct kunit *test)
  415. {
  416. /*
  417. * Walks from Host DP IN to Device #2 DP OUT.
  418. *
  419. * [Host]
  420. * 1 |
  421. * 1 |
  422. * [Device #1]
  423. * 3 /
  424. * 1 /
  425. * [Device #2]
  426. */
  427. static const struct port_expectation test_data[] = {
  428. { .route = 0x0, .port = 5, .type = TB_TYPE_DP_HDMI_IN },
  429. { .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
  430. { .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
  431. { .route = 0x1, .port = 3, .type = TB_TYPE_PORT },
  432. { .route = 0x301, .port = 1, .type = TB_TYPE_PORT },
  433. { .route = 0x301, .port = 13, .type = TB_TYPE_DP_HDMI_OUT },
  434. };
  435. struct tb_port *src_port, *dst_port, *p;
  436. struct tb_switch *host, *dev1, *dev2;
  437. int i;
  438. host = alloc_host(test);
  439. dev1 = alloc_dev_default(test, host, 0x1, true);
  440. dev2 = alloc_dev_default(test, dev1, 0x301, true);
  441. src_port = &host->ports[5];
  442. dst_port = &dev2->ports[13];
  443. /* Walk both directions */
  444. i = 0;
  445. tb_for_each_port_on_path(src_port, dst_port, p) {
  446. KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
  447. KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
  448. KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
  449. KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
  450. test_data[i].type);
  451. i++;
  452. }
  453. KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data));
  454. i = ARRAY_SIZE(test_data) - 1;
  455. tb_for_each_port_on_path(dst_port, src_port, p) {
  456. KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
  457. KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
  458. KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
  459. KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
  460. test_data[i].type);
  461. i--;
  462. }
  463. KUNIT_EXPECT_EQ(test, i, -1);
  464. }
  465. static void tb_test_path_simple_tree_walk(struct kunit *test)
  466. {
  467. /*
  468. * Walks from Host DP IN to Device #3 DP OUT.
  469. *
  470. * [Host]
  471. * 1 |
  472. * 1 |
  473. * [Device #1]
  474. * 3 / | 5 \ 7
  475. * 1 / | \ 1
  476. * [Device #2] | [Device #4]
  477. * | 1
  478. * [Device #3]
  479. */
  480. static const struct port_expectation test_data[] = {
  481. { .route = 0x0, .port = 5, .type = TB_TYPE_DP_HDMI_IN },
  482. { .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
  483. { .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
  484. { .route = 0x1, .port = 5, .type = TB_TYPE_PORT },
  485. { .route = 0x501, .port = 1, .type = TB_TYPE_PORT },
  486. { .route = 0x501, .port = 13, .type = TB_TYPE_DP_HDMI_OUT },
  487. };
  488. struct tb_port *src_port, *dst_port, *p;
  489. struct tb_switch *host, *dev1, *dev3;
  490. int i;
  491. host = alloc_host(test);
  492. dev1 = alloc_dev_default(test, host, 0x1, true);
  493. alloc_dev_default(test, dev1, 0x301, true);
  494. dev3 = alloc_dev_default(test, dev1, 0x501, true);
  495. alloc_dev_default(test, dev1, 0x701, true);
  496. src_port = &host->ports[5];
  497. dst_port = &dev3->ports[13];
  498. /* Walk both directions */
  499. i = 0;
  500. tb_for_each_port_on_path(src_port, dst_port, p) {
  501. KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
  502. KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
  503. KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
  504. KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
  505. test_data[i].type);
  506. i++;
  507. }
  508. KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data));
  509. i = ARRAY_SIZE(test_data) - 1;
  510. tb_for_each_port_on_path(dst_port, src_port, p) {
  511. KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
  512. KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
  513. KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
  514. KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
  515. test_data[i].type);
  516. i--;
  517. }
  518. KUNIT_EXPECT_EQ(test, i, -1);
  519. }
  520. static void tb_test_path_complex_tree_walk(struct kunit *test)
  521. {
  522. /*
  523. * Walks from Device #3 DP IN to Device #9 DP OUT.
  524. *
  525. * [Host]
  526. * 1 |
  527. * 1 |
  528. * [Device #1]
  529. * 3 / | 5 \ 7
  530. * 1 / | \ 1
  531. * [Device #2] | [Device #5]
  532. * 5 | | 1 \ 7
  533. * 1 | [Device #4] \ 1
  534. * [Device #3] [Device #6]
  535. * 3 /
  536. * 1 /
  537. * [Device #7]
  538. * 3 / | 5
  539. * 1 / |
  540. * [Device #8] | 1
  541. * [Device #9]
  542. */
  543. static const struct port_expectation test_data[] = {
  544. { .route = 0x50301, .port = 13, .type = TB_TYPE_DP_HDMI_IN },
  545. { .route = 0x50301, .port = 1, .type = TB_TYPE_PORT },
  546. { .route = 0x301, .port = 5, .type = TB_TYPE_PORT },
  547. { .route = 0x301, .port = 1, .type = TB_TYPE_PORT },
  548. { .route = 0x1, .port = 3, .type = TB_TYPE_PORT },
  549. { .route = 0x1, .port = 7, .type = TB_TYPE_PORT },
  550. { .route = 0x701, .port = 1, .type = TB_TYPE_PORT },
  551. { .route = 0x701, .port = 7, .type = TB_TYPE_PORT },
  552. { .route = 0x70701, .port = 1, .type = TB_TYPE_PORT },
  553. { .route = 0x70701, .port = 3, .type = TB_TYPE_PORT },
  554. { .route = 0x3070701, .port = 1, .type = TB_TYPE_PORT },
  555. { .route = 0x3070701, .port = 5, .type = TB_TYPE_PORT },
  556. { .route = 0x503070701, .port = 1, .type = TB_TYPE_PORT },
  557. { .route = 0x503070701, .port = 14, .type = TB_TYPE_DP_HDMI_OUT },
  558. };
  559. struct tb_switch *host, *dev1, *dev2, *dev3, *dev5, *dev6, *dev7, *dev9;
  560. struct tb_port *src_port, *dst_port, *p;
  561. int i;
  562. host = alloc_host(test);
  563. dev1 = alloc_dev_default(test, host, 0x1, true);
  564. dev2 = alloc_dev_default(test, dev1, 0x301, true);
  565. dev3 = alloc_dev_with_dpin(test, dev2, 0x50301, true);
  566. alloc_dev_default(test, dev1, 0x501, true);
  567. dev5 = alloc_dev_default(test, dev1, 0x701, true);
  568. dev6 = alloc_dev_default(test, dev5, 0x70701, true);
  569. dev7 = alloc_dev_default(test, dev6, 0x3070701, true);
  570. alloc_dev_default(test, dev7, 0x303070701, true);
  571. dev9 = alloc_dev_default(test, dev7, 0x503070701, true);
  572. src_port = &dev3->ports[13];
  573. dst_port = &dev9->ports[14];
  574. /* Walk both directions */
  575. i = 0;
  576. tb_for_each_port_on_path(src_port, dst_port, p) {
  577. KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
  578. KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
  579. KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
  580. KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
  581. test_data[i].type);
  582. i++;
  583. }
  584. KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data));
  585. i = ARRAY_SIZE(test_data) - 1;
  586. tb_for_each_port_on_path(dst_port, src_port, p) {
  587. KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
  588. KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
  589. KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
  590. KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
  591. test_data[i].type);
  592. i--;
  593. }
  594. KUNIT_EXPECT_EQ(test, i, -1);
  595. }
  596. static void tb_test_path_max_length_walk(struct kunit *test)
  597. {
  598. struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5, *dev6;
  599. struct tb_switch *dev7, *dev8, *dev9, *dev10, *dev11, *dev12;
  600. struct tb_port *src_port, *dst_port, *p;
  601. int i;
  602. /*
  603. * Walks from Device #6 DP IN to Device #12 DP OUT.
  604. *
  605. * [Host]
  606. * 1 / \ 3
  607. * 1 / \ 1
  608. * [Device #1] [Device #7]
  609. * 3 | | 3
  610. * 1 | | 1
  611. * [Device #2] [Device #8]
  612. * 3 | | 3
  613. * 1 | | 1
  614. * [Device #3] [Device #9]
  615. * 3 | | 3
  616. * 1 | | 1
  617. * [Device #4] [Device #10]
  618. * 3 | | 3
  619. * 1 | | 1
  620. * [Device #5] [Device #11]
  621. * 3 | | 3
  622. * 1 | | 1
  623. * [Device #6] [Device #12]
  624. */
  625. static const struct port_expectation test_data[] = {
  626. { .route = 0x30303030301, .port = 13, .type = TB_TYPE_DP_HDMI_IN },
  627. { .route = 0x30303030301, .port = 1, .type = TB_TYPE_PORT },
  628. { .route = 0x303030301, .port = 3, .type = TB_TYPE_PORT },
  629. { .route = 0x303030301, .port = 1, .type = TB_TYPE_PORT },
  630. { .route = 0x3030301, .port = 3, .type = TB_TYPE_PORT },
  631. { .route = 0x3030301, .port = 1, .type = TB_TYPE_PORT },
  632. { .route = 0x30301, .port = 3, .type = TB_TYPE_PORT },
  633. { .route = 0x30301, .port = 1, .type = TB_TYPE_PORT },
  634. { .route = 0x301, .port = 3, .type = TB_TYPE_PORT },
  635. { .route = 0x301, .port = 1, .type = TB_TYPE_PORT },
  636. { .route = 0x1, .port = 3, .type = TB_TYPE_PORT },
  637. { .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
  638. { .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
  639. { .route = 0x0, .port = 3, .type = TB_TYPE_PORT },
  640. { .route = 0x3, .port = 1, .type = TB_TYPE_PORT },
  641. { .route = 0x3, .port = 3, .type = TB_TYPE_PORT },
  642. { .route = 0x303, .port = 1, .type = TB_TYPE_PORT },
  643. { .route = 0x303, .port = 3, .type = TB_TYPE_PORT },
  644. { .route = 0x30303, .port = 1, .type = TB_TYPE_PORT },
  645. { .route = 0x30303, .port = 3, .type = TB_TYPE_PORT },
  646. { .route = 0x3030303, .port = 1, .type = TB_TYPE_PORT },
  647. { .route = 0x3030303, .port = 3, .type = TB_TYPE_PORT },
  648. { .route = 0x303030303, .port = 1, .type = TB_TYPE_PORT },
  649. { .route = 0x303030303, .port = 3, .type = TB_TYPE_PORT },
  650. { .route = 0x30303030303, .port = 1, .type = TB_TYPE_PORT },
  651. { .route = 0x30303030303, .port = 13, .type = TB_TYPE_DP_HDMI_OUT },
  652. };
  653. host = alloc_host(test);
  654. dev1 = alloc_dev_default(test, host, 0x1, true);
  655. dev2 = alloc_dev_default(test, dev1, 0x301, true);
  656. dev3 = alloc_dev_default(test, dev2, 0x30301, true);
  657. dev4 = alloc_dev_default(test, dev3, 0x3030301, true);
  658. dev5 = alloc_dev_default(test, dev4, 0x303030301, true);
  659. dev6 = alloc_dev_with_dpin(test, dev5, 0x30303030301, true);
  660. dev7 = alloc_dev_default(test, host, 0x3, true);
  661. dev8 = alloc_dev_default(test, dev7, 0x303, true);
  662. dev9 = alloc_dev_default(test, dev8, 0x30303, true);
  663. dev10 = alloc_dev_default(test, dev9, 0x3030303, true);
  664. dev11 = alloc_dev_default(test, dev10, 0x303030303, true);
  665. dev12 = alloc_dev_default(test, dev11, 0x30303030303, true);
  666. src_port = &dev6->ports[13];
  667. dst_port = &dev12->ports[13];
  668. /* Walk both directions */
  669. i = 0;
  670. tb_for_each_port_on_path(src_port, dst_port, p) {
  671. KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
  672. KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
  673. KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
  674. KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
  675. test_data[i].type);
  676. i++;
  677. }
  678. KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data));
  679. i = ARRAY_SIZE(test_data) - 1;
  680. tb_for_each_port_on_path(dst_port, src_port, p) {
  681. KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
  682. KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
  683. KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
  684. KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
  685. test_data[i].type);
  686. i--;
  687. }
  688. KUNIT_EXPECT_EQ(test, i, -1);
  689. }
  690. static void tb_test_path_not_connected(struct kunit *test)
  691. {
  692. struct tb_switch *host, *dev1, *dev2;
  693. struct tb_port *down, *up;
  694. struct tb_path *path;
  695. host = alloc_host(test);
  696. dev1 = alloc_dev_default(test, host, 0x3, false);
  697. /* Not connected to anything */
  698. dev2 = alloc_dev_default(test, NULL, 0x303, false);
  699. down = &dev1->ports[10];
  700. up = &dev2->ports[9];
  701. path = tb_path_alloc(NULL, down, 8, up, 8, 0, "PCIe Down");
  702. KUNIT_ASSERT_NULL(test, path);
  703. path = tb_path_alloc(NULL, down, 8, up, 8, 1, "PCIe Down");
  704. KUNIT_ASSERT_NULL(test, path);
  705. }
  706. struct hop_expectation {
  707. u64 route;
  708. u8 in_port;
  709. enum tb_port_type in_type;
  710. u8 out_port;
  711. enum tb_port_type out_type;
  712. };
  713. static void tb_test_path_not_bonded_lane0(struct kunit *test)
  714. {
  715. /*
  716. * PCIe path from host to device using lane 0.
  717. *
  718. * [Host]
  719. * 3 |: 4
  720. * 1 |: 2
  721. * [Device]
  722. */
  723. static const struct hop_expectation test_data[] = {
  724. {
  725. .route = 0x0,
  726. .in_port = 9,
  727. .in_type = TB_TYPE_PCIE_DOWN,
  728. .out_port = 3,
  729. .out_type = TB_TYPE_PORT,
  730. },
  731. {
  732. .route = 0x3,
  733. .in_port = 1,
  734. .in_type = TB_TYPE_PORT,
  735. .out_port = 9,
  736. .out_type = TB_TYPE_PCIE_UP,
  737. },
  738. };
  739. struct tb_switch *host, *dev;
  740. struct tb_port *down, *up;
  741. struct tb_path *path;
  742. int i;
  743. host = alloc_host(test);
  744. dev = alloc_dev_default(test, host, 0x3, false);
  745. down = &host->ports[9];
  746. up = &dev->ports[9];
  747. path = tb_path_alloc(NULL, down, 8, up, 8, 0, "PCIe Down");
  748. KUNIT_ASSERT_NOT_NULL(test, path);
  749. KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
  750. for (i = 0; i < ARRAY_SIZE(test_data); i++) {
  751. const struct tb_port *in_port, *out_port;
  752. in_port = path->hops[i].in_port;
  753. out_port = path->hops[i].out_port;
  754. KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
  755. KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
  756. KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
  757. test_data[i].in_type);
  758. KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
  759. KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
  760. KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
  761. test_data[i].out_type);
  762. }
  763. tb_path_free(path);
  764. }
  765. static void tb_test_path_not_bonded_lane1(struct kunit *test)
  766. {
  767. /*
  768. * DP Video path from host to device using lane 1. Paths like
  769. * these are only used with Thunderbolt 1 devices where lane
  770. * bonding is not possible. USB4 specifically does not allow
  771. * paths like this (you either use lane 0 where lane 1 is
  772. * disabled or both lanes are bonded).
  773. *
  774. * [Host]
  775. * 1 :| 2
  776. * 1 :| 2
  777. * [Device]
  778. */
  779. static const struct hop_expectation test_data[] = {
  780. {
  781. .route = 0x0,
  782. .in_port = 5,
  783. .in_type = TB_TYPE_DP_HDMI_IN,
  784. .out_port = 2,
  785. .out_type = TB_TYPE_PORT,
  786. },
  787. {
  788. .route = 0x1,
  789. .in_port = 2,
  790. .in_type = TB_TYPE_PORT,
  791. .out_port = 13,
  792. .out_type = TB_TYPE_DP_HDMI_OUT,
  793. },
  794. };
  795. struct tb_switch *host, *dev;
  796. struct tb_port *in, *out;
  797. struct tb_path *path;
  798. int i;
  799. host = alloc_host(test);
  800. dev = alloc_dev_default(test, host, 0x1, false);
  801. in = &host->ports[5];
  802. out = &dev->ports[13];
  803. path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
  804. KUNIT_ASSERT_NOT_NULL(test, path);
  805. KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
  806. for (i = 0; i < ARRAY_SIZE(test_data); i++) {
  807. const struct tb_port *in_port, *out_port;
  808. in_port = path->hops[i].in_port;
  809. out_port = path->hops[i].out_port;
  810. KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
  811. KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
  812. KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
  813. test_data[i].in_type);
  814. KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
  815. KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
  816. KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
  817. test_data[i].out_type);
  818. }
  819. tb_path_free(path);
  820. }
  821. static void tb_test_path_not_bonded_lane1_chain(struct kunit *test)
  822. {
  823. /*
  824. * DP Video path from host to device 3 using lane 1.
  825. *
  826. * [Host]
  827. * 1 :| 2
  828. * 1 :| 2
  829. * [Device #1]
  830. * 7 :| 8
  831. * 1 :| 2
  832. * [Device #2]
  833. * 5 :| 6
  834. * 1 :| 2
  835. * [Device #3]
  836. */
  837. static const struct hop_expectation test_data[] = {
  838. {
  839. .route = 0x0,
  840. .in_port = 5,
  841. .in_type = TB_TYPE_DP_HDMI_IN,
  842. .out_port = 2,
  843. .out_type = TB_TYPE_PORT,
  844. },
  845. {
  846. .route = 0x1,
  847. .in_port = 2,
  848. .in_type = TB_TYPE_PORT,
  849. .out_port = 8,
  850. .out_type = TB_TYPE_PORT,
  851. },
  852. {
  853. .route = 0x701,
  854. .in_port = 2,
  855. .in_type = TB_TYPE_PORT,
  856. .out_port = 6,
  857. .out_type = TB_TYPE_PORT,
  858. },
  859. {
  860. .route = 0x50701,
  861. .in_port = 2,
  862. .in_type = TB_TYPE_PORT,
  863. .out_port = 13,
  864. .out_type = TB_TYPE_DP_HDMI_OUT,
  865. },
  866. };
  867. struct tb_switch *host, *dev1, *dev2, *dev3;
  868. struct tb_port *in, *out;
  869. struct tb_path *path;
  870. int i;
  871. host = alloc_host(test);
  872. dev1 = alloc_dev_default(test, host, 0x1, false);
  873. dev2 = alloc_dev_default(test, dev1, 0x701, false);
  874. dev3 = alloc_dev_default(test, dev2, 0x50701, false);
  875. in = &host->ports[5];
  876. out = &dev3->ports[13];
  877. path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
  878. KUNIT_ASSERT_NOT_NULL(test, path);
  879. KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
  880. for (i = 0; i < ARRAY_SIZE(test_data); i++) {
  881. const struct tb_port *in_port, *out_port;
  882. in_port = path->hops[i].in_port;
  883. out_port = path->hops[i].out_port;
  884. KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
  885. KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
  886. KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
  887. test_data[i].in_type);
  888. KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
  889. KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
  890. KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
  891. test_data[i].out_type);
  892. }
  893. tb_path_free(path);
  894. }
  895. static void tb_test_path_not_bonded_lane1_chain_reverse(struct kunit *test)
  896. {
  897. /*
  898. * DP Video path from device 3 to host using lane 1.
  899. *
  900. * [Host]
  901. * 1 :| 2
  902. * 1 :| 2
  903. * [Device #1]
  904. * 7 :| 8
  905. * 1 :| 2
  906. * [Device #2]
  907. * 5 :| 6
  908. * 1 :| 2
  909. * [Device #3]
  910. */
  911. static const struct hop_expectation test_data[] = {
  912. {
  913. .route = 0x50701,
  914. .in_port = 13,
  915. .in_type = TB_TYPE_DP_HDMI_IN,
  916. .out_port = 2,
  917. .out_type = TB_TYPE_PORT,
  918. },
  919. {
  920. .route = 0x701,
  921. .in_port = 6,
  922. .in_type = TB_TYPE_PORT,
  923. .out_port = 2,
  924. .out_type = TB_TYPE_PORT,
  925. },
  926. {
  927. .route = 0x1,
  928. .in_port = 8,
  929. .in_type = TB_TYPE_PORT,
  930. .out_port = 2,
  931. .out_type = TB_TYPE_PORT,
  932. },
  933. {
  934. .route = 0x0,
  935. .in_port = 2,
  936. .in_type = TB_TYPE_PORT,
  937. .out_port = 5,
  938. .out_type = TB_TYPE_DP_HDMI_IN,
  939. },
  940. };
  941. struct tb_switch *host, *dev1, *dev2, *dev3;
  942. struct tb_port *in, *out;
  943. struct tb_path *path;
  944. int i;
  945. host = alloc_host(test);
  946. dev1 = alloc_dev_default(test, host, 0x1, false);
  947. dev2 = alloc_dev_default(test, dev1, 0x701, false);
  948. dev3 = alloc_dev_with_dpin(test, dev2, 0x50701, false);
  949. in = &dev3->ports[13];
  950. out = &host->ports[5];
  951. path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
  952. KUNIT_ASSERT_NOT_NULL(test, path);
  953. KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
  954. for (i = 0; i < ARRAY_SIZE(test_data); i++) {
  955. const struct tb_port *in_port, *out_port;
  956. in_port = path->hops[i].in_port;
  957. out_port = path->hops[i].out_port;
  958. KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
  959. KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
  960. KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
  961. test_data[i].in_type);
  962. KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
  963. KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
  964. KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
  965. test_data[i].out_type);
  966. }
  967. tb_path_free(path);
  968. }
  969. static void tb_test_path_mixed_chain(struct kunit *test)
  970. {
  971. /*
  972. * DP Video path from host to device 4 where first and last link
  973. * is bonded.
  974. *
  975. * [Host]
  976. * 1 |
  977. * 1 |
  978. * [Device #1]
  979. * 7 :| 8
  980. * 1 :| 2
  981. * [Device #2]
  982. * 5 :| 6
  983. * 1 :| 2
  984. * [Device #3]
  985. * 3 |
  986. * 1 |
  987. * [Device #4]
  988. */
  989. static const struct hop_expectation test_data[] = {
  990. {
  991. .route = 0x0,
  992. .in_port = 5,
  993. .in_type = TB_TYPE_DP_HDMI_IN,
  994. .out_port = 1,
  995. .out_type = TB_TYPE_PORT,
  996. },
  997. {
  998. .route = 0x1,
  999. .in_port = 1,
  1000. .in_type = TB_TYPE_PORT,
  1001. .out_port = 8,
  1002. .out_type = TB_TYPE_PORT,
  1003. },
  1004. {
  1005. .route = 0x701,
  1006. .in_port = 2,
  1007. .in_type = TB_TYPE_PORT,
  1008. .out_port = 6,
  1009. .out_type = TB_TYPE_PORT,
  1010. },
  1011. {
  1012. .route = 0x50701,
  1013. .in_port = 2,
  1014. .in_type = TB_TYPE_PORT,
  1015. .out_port = 3,
  1016. .out_type = TB_TYPE_PORT,
  1017. },
  1018. {
  1019. .route = 0x3050701,
  1020. .in_port = 1,
  1021. .in_type = TB_TYPE_PORT,
  1022. .out_port = 13,
  1023. .out_type = TB_TYPE_DP_HDMI_OUT,
  1024. },
  1025. };
  1026. struct tb_switch *host, *dev1, *dev2, *dev3, *dev4;
  1027. struct tb_port *in, *out;
  1028. struct tb_path *path;
  1029. int i;
  1030. host = alloc_host(test);
  1031. dev1 = alloc_dev_default(test, host, 0x1, true);
  1032. dev2 = alloc_dev_default(test, dev1, 0x701, false);
  1033. dev3 = alloc_dev_default(test, dev2, 0x50701, false);
  1034. dev4 = alloc_dev_default(test, dev3, 0x3050701, true);
  1035. in = &host->ports[5];
  1036. out = &dev4->ports[13];
  1037. path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
  1038. KUNIT_ASSERT_NOT_NULL(test, path);
  1039. KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
  1040. for (i = 0; i < ARRAY_SIZE(test_data); i++) {
  1041. const struct tb_port *in_port, *out_port;
  1042. in_port = path->hops[i].in_port;
  1043. out_port = path->hops[i].out_port;
  1044. KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
  1045. KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
  1046. KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
  1047. test_data[i].in_type);
  1048. KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
  1049. KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
  1050. KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
  1051. test_data[i].out_type);
  1052. }
  1053. tb_path_free(path);
  1054. }
  1055. static void tb_test_path_mixed_chain_reverse(struct kunit *test)
  1056. {
  1057. /*
  1058. * DP Video path from device 4 to host where first and last link
  1059. * is bonded.
  1060. *
  1061. * [Host]
  1062. * 1 |
  1063. * 1 |
  1064. * [Device #1]
  1065. * 7 :| 8
  1066. * 1 :| 2
  1067. * [Device #2]
  1068. * 5 :| 6
  1069. * 1 :| 2
  1070. * [Device #3]
  1071. * 3 |
  1072. * 1 |
  1073. * [Device #4]
  1074. */
  1075. static const struct hop_expectation test_data[] = {
  1076. {
  1077. .route = 0x3050701,
  1078. .in_port = 13,
  1079. .in_type = TB_TYPE_DP_HDMI_OUT,
  1080. .out_port = 1,
  1081. .out_type = TB_TYPE_PORT,
  1082. },
  1083. {
  1084. .route = 0x50701,
  1085. .in_port = 3,
  1086. .in_type = TB_TYPE_PORT,
  1087. .out_port = 2,
  1088. .out_type = TB_TYPE_PORT,
  1089. },
  1090. {
  1091. .route = 0x701,
  1092. .in_port = 6,
  1093. .in_type = TB_TYPE_PORT,
  1094. .out_port = 2,
  1095. .out_type = TB_TYPE_PORT,
  1096. },
  1097. {
  1098. .route = 0x1,
  1099. .in_port = 8,
  1100. .in_type = TB_TYPE_PORT,
  1101. .out_port = 1,
  1102. .out_type = TB_TYPE_PORT,
  1103. },
  1104. {
  1105. .route = 0x0,
  1106. .in_port = 1,
  1107. .in_type = TB_TYPE_PORT,
  1108. .out_port = 5,
  1109. .out_type = TB_TYPE_DP_HDMI_IN,
  1110. },
  1111. };
  1112. struct tb_switch *host, *dev1, *dev2, *dev3, *dev4;
  1113. struct tb_port *in, *out;
  1114. struct tb_path *path;
  1115. int i;
  1116. host = alloc_host(test);
  1117. dev1 = alloc_dev_default(test, host, 0x1, true);
  1118. dev2 = alloc_dev_default(test, dev1, 0x701, false);
  1119. dev3 = alloc_dev_default(test, dev2, 0x50701, false);
  1120. dev4 = alloc_dev_default(test, dev3, 0x3050701, true);
  1121. in = &dev4->ports[13];
  1122. out = &host->ports[5];
  1123. path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
  1124. KUNIT_ASSERT_NOT_NULL(test, path);
  1125. KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
  1126. for (i = 0; i < ARRAY_SIZE(test_data); i++) {
  1127. const struct tb_port *in_port, *out_port;
  1128. in_port = path->hops[i].in_port;
  1129. out_port = path->hops[i].out_port;
  1130. KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
  1131. KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
  1132. KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
  1133. test_data[i].in_type);
  1134. KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
  1135. KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
  1136. KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
  1137. test_data[i].out_type);
  1138. }
  1139. tb_path_free(path);
  1140. }
  1141. static void tb_test_tunnel_pcie(struct kunit *test)
  1142. {
  1143. struct tb_switch *host, *dev1, *dev2;
  1144. struct tb_tunnel *tunnel1, *tunnel2;
  1145. struct tb_port *down, *up;
  1146. /*
  1147. * Create PCIe tunnel between host and two devices.
  1148. *
  1149. * [Host]
  1150. * 1 |
  1151. * 1 |
  1152. * [Device #1]
  1153. * 5 |
  1154. * 1 |
  1155. * [Device #2]
  1156. */
  1157. host = alloc_host(test);
  1158. dev1 = alloc_dev_default(test, host, 0x1, true);
  1159. dev2 = alloc_dev_default(test, dev1, 0x501, true);
  1160. down = &host->ports[8];
  1161. up = &dev1->ports[9];
  1162. tunnel1 = tb_tunnel_alloc_pci(NULL, up, down);
  1163. KUNIT_ASSERT_NOT_NULL(test, tunnel1);
  1164. KUNIT_EXPECT_EQ(test, tunnel1->type, TB_TUNNEL_PCI);
  1165. KUNIT_EXPECT_PTR_EQ(test, tunnel1->src_port, down);
  1166. KUNIT_EXPECT_PTR_EQ(test, tunnel1->dst_port, up);
  1167. KUNIT_ASSERT_EQ(test, tunnel1->npaths, 2);
  1168. KUNIT_ASSERT_EQ(test, tunnel1->paths[0]->path_length, 2);
  1169. KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[0].in_port, down);
  1170. KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[1].out_port, up);
  1171. KUNIT_ASSERT_EQ(test, tunnel1->paths[1]->path_length, 2);
  1172. KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[0].in_port, up);
  1173. KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[1].out_port, down);
  1174. down = &dev1->ports[10];
  1175. up = &dev2->ports[9];
  1176. tunnel2 = tb_tunnel_alloc_pci(NULL, up, down);
  1177. KUNIT_ASSERT_NOT_NULL(test, tunnel2);
  1178. KUNIT_EXPECT_EQ(test, tunnel2->type, TB_TUNNEL_PCI);
  1179. KUNIT_EXPECT_PTR_EQ(test, tunnel2->src_port, down);
  1180. KUNIT_EXPECT_PTR_EQ(test, tunnel2->dst_port, up);
  1181. KUNIT_ASSERT_EQ(test, tunnel2->npaths, 2);
  1182. KUNIT_ASSERT_EQ(test, tunnel2->paths[0]->path_length, 2);
  1183. KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[0].in_port, down);
  1184. KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[1].out_port, up);
  1185. KUNIT_ASSERT_EQ(test, tunnel2->paths[1]->path_length, 2);
  1186. KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[0].in_port, up);
  1187. KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[1].out_port, down);
  1188. tb_tunnel_free(tunnel2);
  1189. tb_tunnel_free(tunnel1);
  1190. }
  1191. static void tb_test_tunnel_dp(struct kunit *test)
  1192. {
  1193. struct tb_switch *host, *dev;
  1194. struct tb_port *in, *out;
  1195. struct tb_tunnel *tunnel;
  1196. /*
  1197. * Create DP tunnel between Host and Device
  1198. *
  1199. * [Host]
  1200. * 1 |
  1201. * 1 |
  1202. * [Device]
  1203. */
  1204. host = alloc_host(test);
  1205. dev = alloc_dev_default(test, host, 0x3, true);
  1206. in = &host->ports[5];
  1207. out = &dev->ports[13];
  1208. tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
  1209. KUNIT_ASSERT_NOT_NULL(test, tunnel);
  1210. KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
  1211. KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
  1212. KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
  1213. KUNIT_ASSERT_EQ(test, tunnel->npaths, 3);
  1214. KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 2);
  1215. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
  1216. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[1].out_port, out);
  1217. KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 2);
  1218. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
  1219. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[1].out_port, out);
  1220. KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 2);
  1221. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
  1222. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[1].out_port, in);
  1223. tb_tunnel_free(tunnel);
  1224. }
  1225. static void tb_test_tunnel_dp_chain(struct kunit *test)
  1226. {
  1227. struct tb_switch *host, *dev1, *dev4;
  1228. struct tb_port *in, *out;
  1229. struct tb_tunnel *tunnel;
  1230. /*
  1231. * Create DP tunnel from Host DP IN to Device #4 DP OUT.
  1232. *
  1233. * [Host]
  1234. * 1 |
  1235. * 1 |
  1236. * [Device #1]
  1237. * 3 / | 5 \ 7
  1238. * 1 / | \ 1
  1239. * [Device #2] | [Device #4]
  1240. * | 1
  1241. * [Device #3]
  1242. */
  1243. host = alloc_host(test);
  1244. dev1 = alloc_dev_default(test, host, 0x1, true);
  1245. alloc_dev_default(test, dev1, 0x301, true);
  1246. alloc_dev_default(test, dev1, 0x501, true);
  1247. dev4 = alloc_dev_default(test, dev1, 0x701, true);
  1248. in = &host->ports[5];
  1249. out = &dev4->ports[14];
  1250. tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
  1251. KUNIT_ASSERT_NOT_NULL(test, tunnel);
  1252. KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
  1253. KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
  1254. KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
  1255. KUNIT_ASSERT_EQ(test, tunnel->npaths, 3);
  1256. KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 3);
  1257. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
  1258. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[2].out_port, out);
  1259. KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 3);
  1260. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
  1261. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[2].out_port, out);
  1262. KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 3);
  1263. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
  1264. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[2].out_port, in);
  1265. tb_tunnel_free(tunnel);
  1266. }
  1267. static void tb_test_tunnel_dp_tree(struct kunit *test)
  1268. {
  1269. struct tb_switch *host, *dev1, *dev2, *dev3, *dev5;
  1270. struct tb_port *in, *out;
  1271. struct tb_tunnel *tunnel;
  1272. /*
  1273. * Create DP tunnel from Device #2 DP IN to Device #5 DP OUT.
  1274. *
  1275. * [Host]
  1276. * 3 |
  1277. * 1 |
  1278. * [Device #1]
  1279. * 3 / | 5 \ 7
  1280. * 1 / | \ 1
  1281. * [Device #2] | [Device #4]
  1282. * | 1
  1283. * [Device #3]
  1284. * | 5
  1285. * | 1
  1286. * [Device #5]
  1287. */
  1288. host = alloc_host(test);
  1289. dev1 = alloc_dev_default(test, host, 0x3, true);
  1290. dev2 = alloc_dev_with_dpin(test, dev1, 0x303, true);
  1291. dev3 = alloc_dev_default(test, dev1, 0x503, true);
  1292. alloc_dev_default(test, dev1, 0x703, true);
  1293. dev5 = alloc_dev_default(test, dev3, 0x50503, true);
  1294. in = &dev2->ports[13];
  1295. out = &dev5->ports[13];
  1296. tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
  1297. KUNIT_ASSERT_NOT_NULL(test, tunnel);
  1298. KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
  1299. KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
  1300. KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
  1301. KUNIT_ASSERT_EQ(test, tunnel->npaths, 3);
  1302. KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 4);
  1303. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
  1304. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[3].out_port, out);
  1305. KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 4);
  1306. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
  1307. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[3].out_port, out);
  1308. KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 4);
  1309. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
  1310. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[3].out_port, in);
  1311. tb_tunnel_free(tunnel);
  1312. }
  1313. static void tb_test_tunnel_dp_max_length(struct kunit *test)
  1314. {
  1315. struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5, *dev6;
  1316. struct tb_switch *dev7, *dev8, *dev9, *dev10, *dev11, *dev12;
  1317. struct tb_port *in, *out;
  1318. struct tb_tunnel *tunnel;
  1319. /*
  1320. * Creates DP tunnel from Device #6 to Device #12.
  1321. *
  1322. * [Host]
  1323. * 1 / \ 3
  1324. * 1 / \ 1
  1325. * [Device #1] [Device #7]
  1326. * 3 | | 3
  1327. * 1 | | 1
  1328. * [Device #2] [Device #8]
  1329. * 3 | | 3
  1330. * 1 | | 1
  1331. * [Device #3] [Device #9]
  1332. * 3 | | 3
  1333. * 1 | | 1
  1334. * [Device #4] [Device #10]
  1335. * 3 | | 3
  1336. * 1 | | 1
  1337. * [Device #5] [Device #11]
  1338. * 3 | | 3
  1339. * 1 | | 1
  1340. * [Device #6] [Device #12]
  1341. */
  1342. host = alloc_host(test);
  1343. dev1 = alloc_dev_default(test, host, 0x1, true);
  1344. dev2 = alloc_dev_default(test, dev1, 0x301, true);
  1345. dev3 = alloc_dev_default(test, dev2, 0x30301, true);
  1346. dev4 = alloc_dev_default(test, dev3, 0x3030301, true);
  1347. dev5 = alloc_dev_default(test, dev4, 0x303030301, true);
  1348. dev6 = alloc_dev_with_dpin(test, dev5, 0x30303030301, true);
  1349. dev7 = alloc_dev_default(test, host, 0x3, true);
  1350. dev8 = alloc_dev_default(test, dev7, 0x303, true);
  1351. dev9 = alloc_dev_default(test, dev8, 0x30303, true);
  1352. dev10 = alloc_dev_default(test, dev9, 0x3030303, true);
  1353. dev11 = alloc_dev_default(test, dev10, 0x303030303, true);
  1354. dev12 = alloc_dev_default(test, dev11, 0x30303030303, true);
  1355. in = &dev6->ports[13];
  1356. out = &dev12->ports[13];
  1357. tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
  1358. KUNIT_ASSERT_NOT_NULL(test, tunnel);
  1359. KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
  1360. KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
  1361. KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
  1362. KUNIT_ASSERT_EQ(test, tunnel->npaths, 3);
  1363. KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 13);
  1364. /* First hop */
  1365. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
  1366. /* Middle */
  1367. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[6].in_port,
  1368. &host->ports[1]);
  1369. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[6].out_port,
  1370. &host->ports[3]);
  1371. /* Last */
  1372. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[12].out_port, out);
  1373. KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 13);
  1374. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
  1375. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[6].in_port,
  1376. &host->ports[1]);
  1377. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[6].out_port,
  1378. &host->ports[3]);
  1379. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[12].out_port, out);
  1380. KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 13);
  1381. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
  1382. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[6].in_port,
  1383. &host->ports[3]);
  1384. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[6].out_port,
  1385. &host->ports[1]);
  1386. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[12].out_port, in);
  1387. tb_tunnel_free(tunnel);
  1388. }
  1389. static void tb_test_tunnel_usb3(struct kunit *test)
  1390. {
  1391. struct tb_switch *host, *dev1, *dev2;
  1392. struct tb_tunnel *tunnel1, *tunnel2;
  1393. struct tb_port *down, *up;
  1394. /*
  1395. * Create USB3 tunnel between host and two devices.
  1396. *
  1397. * [Host]
  1398. * 1 |
  1399. * 1 |
  1400. * [Device #1]
  1401. * \ 7
  1402. * \ 1
  1403. * [Device #2]
  1404. */
  1405. host = alloc_host(test);
  1406. dev1 = alloc_dev_default(test, host, 0x1, true);
  1407. dev2 = alloc_dev_default(test, dev1, 0x701, true);
  1408. down = &host->ports[12];
  1409. up = &dev1->ports[16];
  1410. tunnel1 = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0);
  1411. KUNIT_ASSERT_NOT_NULL(test, tunnel1);
  1412. KUNIT_EXPECT_EQ(test, tunnel1->type, TB_TUNNEL_USB3);
  1413. KUNIT_EXPECT_PTR_EQ(test, tunnel1->src_port, down);
  1414. KUNIT_EXPECT_PTR_EQ(test, tunnel1->dst_port, up);
  1415. KUNIT_ASSERT_EQ(test, tunnel1->npaths, 2);
  1416. KUNIT_ASSERT_EQ(test, tunnel1->paths[0]->path_length, 2);
  1417. KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[0].in_port, down);
  1418. KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[1].out_port, up);
  1419. KUNIT_ASSERT_EQ(test, tunnel1->paths[1]->path_length, 2);
  1420. KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[0].in_port, up);
  1421. KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[1].out_port, down);
  1422. down = &dev1->ports[17];
  1423. up = &dev2->ports[16];
  1424. tunnel2 = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0);
  1425. KUNIT_ASSERT_NOT_NULL(test, tunnel2);
  1426. KUNIT_EXPECT_EQ(test, tunnel2->type, TB_TUNNEL_USB3);
  1427. KUNIT_EXPECT_PTR_EQ(test, tunnel2->src_port, down);
  1428. KUNIT_EXPECT_PTR_EQ(test, tunnel2->dst_port, up);
  1429. KUNIT_ASSERT_EQ(test, tunnel2->npaths, 2);
  1430. KUNIT_ASSERT_EQ(test, tunnel2->paths[0]->path_length, 2);
  1431. KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[0].in_port, down);
  1432. KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[1].out_port, up);
  1433. KUNIT_ASSERT_EQ(test, tunnel2->paths[1]->path_length, 2);
  1434. KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[0].in_port, up);
  1435. KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[1].out_port, down);
  1436. tb_tunnel_free(tunnel2);
  1437. tb_tunnel_free(tunnel1);
  1438. }
  1439. static void tb_test_tunnel_port_on_path(struct kunit *test)
  1440. {
  1441. struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5;
  1442. struct tb_port *in, *out, *port;
  1443. struct tb_tunnel *dp_tunnel;
  1444. /*
  1445. * [Host]
  1446. * 3 |
  1447. * 1 |
  1448. * [Device #1]
  1449. * 3 / | 5 \ 7
  1450. * 1 / | \ 1
  1451. * [Device #2] | [Device #4]
  1452. * | 1
  1453. * [Device #3]
  1454. * | 5
  1455. * | 1
  1456. * [Device #5]
  1457. */
  1458. host = alloc_host(test);
  1459. dev1 = alloc_dev_default(test, host, 0x3, true);
  1460. dev2 = alloc_dev_with_dpin(test, dev1, 0x303, true);
  1461. dev3 = alloc_dev_default(test, dev1, 0x503, true);
  1462. dev4 = alloc_dev_default(test, dev1, 0x703, true);
  1463. dev5 = alloc_dev_default(test, dev3, 0x50503, true);
  1464. in = &dev2->ports[13];
  1465. out = &dev5->ports[13];
  1466. dp_tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
  1467. KUNIT_ASSERT_NOT_NULL(test, dp_tunnel);
  1468. KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, in));
  1469. KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, out));
  1470. port = &host->ports[8];
  1471. KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
  1472. port = &host->ports[3];
  1473. KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
  1474. port = &dev1->ports[1];
  1475. KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
  1476. port = &dev1->ports[3];
  1477. KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
  1478. port = &dev1->ports[5];
  1479. KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
  1480. port = &dev1->ports[7];
  1481. KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
  1482. port = &dev3->ports[1];
  1483. KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
  1484. port = &dev5->ports[1];
  1485. KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
  1486. port = &dev4->ports[1];
  1487. KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
  1488. tb_tunnel_free(dp_tunnel);
  1489. }
  1490. static void tb_test_tunnel_dma(struct kunit *test)
  1491. {
  1492. struct tb_port *nhi, *port;
  1493. struct tb_tunnel *tunnel;
  1494. struct tb_switch *host;
  1495. /*
  1496. * Create DMA tunnel from NHI to port 1 and back.
  1497. *
  1498. * [Host 1]
  1499. * 1 ^ In HopID 1 -> Out HopID 8
  1500. * |
  1501. * v In HopID 8 -> Out HopID 1
  1502. * ............ Domain border
  1503. * |
  1504. * [Host 2]
  1505. */
  1506. host = alloc_host(test);
  1507. nhi = &host->ports[7];
  1508. port = &host->ports[1];
  1509. tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
  1510. KUNIT_ASSERT_NOT_NULL(test, tunnel);
  1511. KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DMA);
  1512. KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi);
  1513. KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port);
  1514. KUNIT_ASSERT_EQ(test, tunnel->npaths, 2);
  1515. /* RX path */
  1516. KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 1);
  1517. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, port);
  1518. KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 8);
  1519. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port, nhi);
  1520. KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].next_hop_index, 1);
  1521. /* TX path */
  1522. KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 1);
  1523. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, nhi);
  1524. KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[0].in_hop_index, 1);
  1525. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].out_port, port);
  1526. KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[0].next_hop_index, 8);
  1527. tb_tunnel_free(tunnel);
  1528. }
  1529. static void tb_test_tunnel_dma_rx(struct kunit *test)
  1530. {
  1531. struct tb_port *nhi, *port;
  1532. struct tb_tunnel *tunnel;
  1533. struct tb_switch *host;
  1534. /*
  1535. * Create DMA RX tunnel from port 1 to NHI.
  1536. *
  1537. * [Host 1]
  1538. * 1 ^
  1539. * |
  1540. * | In HopID 15 -> Out HopID 2
  1541. * ............ Domain border
  1542. * |
  1543. * [Host 2]
  1544. */
  1545. host = alloc_host(test);
  1546. nhi = &host->ports[7];
  1547. port = &host->ports[1];
  1548. tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, -1, -1, 15, 2);
  1549. KUNIT_ASSERT_NOT_NULL(test, tunnel);
  1550. KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DMA);
  1551. KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi);
  1552. KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port);
  1553. KUNIT_ASSERT_EQ(test, tunnel->npaths, 1);
  1554. /* RX path */
  1555. KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 1);
  1556. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, port);
  1557. KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 15);
  1558. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port, nhi);
  1559. KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].next_hop_index, 2);
  1560. tb_tunnel_free(tunnel);
  1561. }
  1562. static void tb_test_tunnel_dma_tx(struct kunit *test)
  1563. {
  1564. struct tb_port *nhi, *port;
  1565. struct tb_tunnel *tunnel;
  1566. struct tb_switch *host;
  1567. /*
  1568. * Create DMA TX tunnel from NHI to port 1.
  1569. *
  1570. * [Host 1]
  1571. * 1 | In HopID 2 -> Out HopID 15
  1572. * |
  1573. * v
  1574. * ............ Domain border
  1575. * |
  1576. * [Host 2]
  1577. */
  1578. host = alloc_host(test);
  1579. nhi = &host->ports[7];
  1580. port = &host->ports[1];
  1581. tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 15, 2, -1, -1);
  1582. KUNIT_ASSERT_NOT_NULL(test, tunnel);
  1583. KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DMA);
  1584. KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi);
  1585. KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port);
  1586. KUNIT_ASSERT_EQ(test, tunnel->npaths, 1);
  1587. /* TX path */
  1588. KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 1);
  1589. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, nhi);
  1590. KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 2);
  1591. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port, port);
  1592. KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].next_hop_index, 15);
  1593. tb_tunnel_free(tunnel);
  1594. }
  1595. static void tb_test_tunnel_dma_chain(struct kunit *test)
  1596. {
  1597. struct tb_switch *host, *dev1, *dev2;
  1598. struct tb_port *nhi, *port;
  1599. struct tb_tunnel *tunnel;
  1600. /*
  1601. * Create DMA tunnel from NHI to Device #2 port 3 and back.
  1602. *
  1603. * [Host 1]
  1604. * 1 ^ In HopID 1 -> Out HopID x
  1605. * |
  1606. * 1 | In HopID x -> Out HopID 1
  1607. * [Device #1]
  1608. * 7 \
  1609. * 1 \
  1610. * [Device #2]
  1611. * 3 | In HopID x -> Out HopID 8
  1612. * |
  1613. * v In HopID 8 -> Out HopID x
  1614. * ............ Domain border
  1615. * |
  1616. * [Host 2]
  1617. */
  1618. host = alloc_host(test);
  1619. dev1 = alloc_dev_default(test, host, 0x1, true);
  1620. dev2 = alloc_dev_default(test, dev1, 0x701, true);
  1621. nhi = &host->ports[7];
  1622. port = &dev2->ports[3];
  1623. tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
  1624. KUNIT_ASSERT_NOT_NULL(test, tunnel);
  1625. KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DMA);
  1626. KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi);
  1627. KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port);
  1628. KUNIT_ASSERT_EQ(test, tunnel->npaths, 2);
  1629. /* RX path */
  1630. KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 3);
  1631. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, port);
  1632. KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 8);
  1633. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port,
  1634. &dev2->ports[1]);
  1635. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[1].in_port,
  1636. &dev1->ports[7]);
  1637. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[1].out_port,
  1638. &dev1->ports[1]);
  1639. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[2].in_port,
  1640. &host->ports[1]);
  1641. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[2].out_port, nhi);
  1642. KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[2].next_hop_index, 1);
  1643. /* TX path */
  1644. KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 3);
  1645. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, nhi);
  1646. KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[0].in_hop_index, 1);
  1647. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[1].in_port,
  1648. &dev1->ports[1]);
  1649. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[1].out_port,
  1650. &dev1->ports[7]);
  1651. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[2].in_port,
  1652. &dev2->ports[1]);
  1653. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[2].out_port, port);
  1654. KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[2].next_hop_index, 8);
  1655. tb_tunnel_free(tunnel);
  1656. }
  1657. static void tb_test_tunnel_dma_match(struct kunit *test)
  1658. {
  1659. struct tb_port *nhi, *port;
  1660. struct tb_tunnel *tunnel;
  1661. struct tb_switch *host;
  1662. host = alloc_host(test);
  1663. nhi = &host->ports[7];
  1664. port = &host->ports[1];
  1665. tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 15, 1, 15, 1);
  1666. KUNIT_ASSERT_NOT_NULL(test, tunnel);
  1667. KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, 1, 15, 1));
  1668. KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 8, 1, 15, 1));
  1669. KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 1));
  1670. KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, 1, -1, -1));
  1671. KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, -1, -1, -1));
  1672. KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, 1, -1, -1));
  1673. KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, -1));
  1674. KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, 1));
  1675. KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, -1));
  1676. KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 8, -1, 8, -1));
  1677. tb_tunnel_free(tunnel);
  1678. tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 15, 1, -1, -1);
  1679. KUNIT_ASSERT_NOT_NULL(test, tunnel);
  1680. KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, 1, -1, -1));
  1681. KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, -1, -1, -1));
  1682. KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, 1, -1, -1));
  1683. KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, -1));
  1684. KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 15, 1, 15, 1));
  1685. KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 1));
  1686. KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 15, 11, -1, -1));
  1687. tb_tunnel_free(tunnel);
  1688. tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, -1, -1, 15, 11);
  1689. KUNIT_ASSERT_NOT_NULL(test, tunnel);
  1690. KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 11));
  1691. KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, -1));
  1692. KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, 11));
  1693. KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, -1));
  1694. KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 1));
  1695. KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, -1, -1, 10, 11));
  1696. KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 15, 11, -1, -1));
  1697. tb_tunnel_free(tunnel);
  1698. }
  1699. static void tb_test_credit_alloc_legacy_not_bonded(struct kunit *test)
  1700. {
  1701. struct tb_switch *host, *dev;
  1702. struct tb_port *up, *down;
  1703. struct tb_tunnel *tunnel;
  1704. struct tb_path *path;
  1705. host = alloc_host(test);
  1706. dev = alloc_dev_default(test, host, 0x1, false);
  1707. down = &host->ports[8];
  1708. up = &dev->ports[9];
  1709. tunnel = tb_tunnel_alloc_pci(NULL, up, down);
  1710. KUNIT_ASSERT_NOT_NULL(test, tunnel);
  1711. KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
  1712. path = tunnel->paths[0];
  1713. KUNIT_ASSERT_EQ(test, path->path_length, 2);
  1714. KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
  1715. KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
  1716. KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
  1717. KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 16U);
  1718. path = tunnel->paths[1];
  1719. KUNIT_ASSERT_EQ(test, path->path_length, 2);
  1720. KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
  1721. KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
  1722. KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
  1723. KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 16U);
  1724. tb_tunnel_free(tunnel);
  1725. }
  1726. static void tb_test_credit_alloc_legacy_bonded(struct kunit *test)
  1727. {
  1728. struct tb_switch *host, *dev;
  1729. struct tb_port *up, *down;
  1730. struct tb_tunnel *tunnel;
  1731. struct tb_path *path;
  1732. host = alloc_host(test);
  1733. dev = alloc_dev_default(test, host, 0x1, true);
  1734. down = &host->ports[8];
  1735. up = &dev->ports[9];
  1736. tunnel = tb_tunnel_alloc_pci(NULL, up, down);
  1737. KUNIT_ASSERT_NOT_NULL(test, tunnel);
  1738. KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
  1739. path = tunnel->paths[0];
  1740. KUNIT_ASSERT_EQ(test, path->path_length, 2);
  1741. KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
  1742. KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
  1743. KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
  1744. KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
  1745. path = tunnel->paths[1];
  1746. KUNIT_ASSERT_EQ(test, path->path_length, 2);
  1747. KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
  1748. KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
  1749. KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
  1750. KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
  1751. tb_tunnel_free(tunnel);
  1752. }
  1753. static void tb_test_credit_alloc_pcie(struct kunit *test)
  1754. {
  1755. struct tb_switch *host, *dev;
  1756. struct tb_port *up, *down;
  1757. struct tb_tunnel *tunnel;
  1758. struct tb_path *path;
  1759. host = alloc_host_usb4(test);
  1760. dev = alloc_dev_usb4(test, host, 0x1, true);
  1761. down = &host->ports[8];
  1762. up = &dev->ports[9];
  1763. tunnel = tb_tunnel_alloc_pci(NULL, up, down);
  1764. KUNIT_ASSERT_NOT_NULL(test, tunnel);
  1765. KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
  1766. path = tunnel->paths[0];
  1767. KUNIT_ASSERT_EQ(test, path->path_length, 2);
  1768. KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
  1769. KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
  1770. KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
  1771. KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
  1772. path = tunnel->paths[1];
  1773. KUNIT_ASSERT_EQ(test, path->path_length, 2);
  1774. KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
  1775. KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
  1776. KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
  1777. KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 64U);
  1778. tb_tunnel_free(tunnel);
  1779. }
  1780. static void tb_test_credit_alloc_without_dp(struct kunit *test)
  1781. {
  1782. struct tb_switch *host, *dev;
  1783. struct tb_port *up, *down;
  1784. struct tb_tunnel *tunnel;
  1785. struct tb_path *path;
  1786. host = alloc_host_usb4(test);
  1787. dev = alloc_dev_without_dp(test, host, 0x1, true);
  1788. /*
  1789. * The device has no DP therefore baMinDPmain = baMinDPaux = 0
  1790. *
  1791. * Create PCIe path with buffers less than baMaxPCIe.
  1792. *
  1793. * For a device with buffers configurations:
  1794. * baMaxUSB3 = 109
  1795. * baMinDPaux = 0
  1796. * baMinDPmain = 0
  1797. * baMaxPCIe = 30
  1798. * baMaxHI = 1
  1799. * Remaining Buffers = Total - (CP + DP) = 120 - (2 + 0) = 118
  1800. * PCIe Credits = Max(6, Min(baMaxPCIe, Remaining Buffers - baMaxUSB3)
  1801. * = Max(6, Min(30, 9) = 9
  1802. */
  1803. down = &host->ports[8];
  1804. up = &dev->ports[9];
  1805. tunnel = tb_tunnel_alloc_pci(NULL, up, down);
  1806. KUNIT_ASSERT_TRUE(test, tunnel != NULL);
  1807. KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
  1808. /* PCIe downstream path */
  1809. path = tunnel->paths[0];
  1810. KUNIT_ASSERT_EQ(test, path->path_length, 2);
  1811. KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
  1812. KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
  1813. KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
  1814. KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 9U);
  1815. /* PCIe upstream path */
  1816. path = tunnel->paths[1];
  1817. KUNIT_ASSERT_EQ(test, path->path_length, 2);
  1818. KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
  1819. KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
  1820. KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
  1821. KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 64U);
  1822. tb_tunnel_free(tunnel);
  1823. }
  1824. static void tb_test_credit_alloc_dp(struct kunit *test)
  1825. {
  1826. struct tb_switch *host, *dev;
  1827. struct tb_port *in, *out;
  1828. struct tb_tunnel *tunnel;
  1829. struct tb_path *path;
  1830. host = alloc_host_usb4(test);
  1831. dev = alloc_dev_usb4(test, host, 0x1, true);
  1832. in = &host->ports[5];
  1833. out = &dev->ports[14];
  1834. tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
  1835. KUNIT_ASSERT_NOT_NULL(test, tunnel);
  1836. KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)3);
  1837. /* Video (main) path */
  1838. path = tunnel->paths[0];
  1839. KUNIT_ASSERT_EQ(test, path->path_length, 2);
  1840. KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 12U);
  1841. KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
  1842. KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 18U);
  1843. KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 0U);
  1844. /* AUX TX */
  1845. path = tunnel->paths[1];
  1846. KUNIT_ASSERT_EQ(test, path->path_length, 2);
  1847. KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
  1848. KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
  1849. KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
  1850. KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
  1851. /* AUX RX */
  1852. path = tunnel->paths[2];
  1853. KUNIT_ASSERT_EQ(test, path->path_length, 2);
  1854. KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
  1855. KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
  1856. KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
  1857. KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
  1858. tb_tunnel_free(tunnel);
  1859. }
  1860. static void tb_test_credit_alloc_usb3(struct kunit *test)
  1861. {
  1862. struct tb_switch *host, *dev;
  1863. struct tb_port *up, *down;
  1864. struct tb_tunnel *tunnel;
  1865. struct tb_path *path;
  1866. host = alloc_host_usb4(test);
  1867. dev = alloc_dev_usb4(test, host, 0x1, true);
  1868. down = &host->ports[12];
  1869. up = &dev->ports[16];
  1870. tunnel = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0);
  1871. KUNIT_ASSERT_NOT_NULL(test, tunnel);
  1872. KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
  1873. path = tunnel->paths[0];
  1874. KUNIT_ASSERT_EQ(test, path->path_length, 2);
  1875. KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
  1876. KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
  1877. KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
  1878. KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
  1879. path = tunnel->paths[1];
  1880. KUNIT_ASSERT_EQ(test, path->path_length, 2);
  1881. KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
  1882. KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
  1883. KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
  1884. KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
  1885. tb_tunnel_free(tunnel);
  1886. }
  1887. static void tb_test_credit_alloc_dma(struct kunit *test)
  1888. {
  1889. struct tb_switch *host, *dev;
  1890. struct tb_port *nhi, *port;
  1891. struct tb_tunnel *tunnel;
  1892. struct tb_path *path;
  1893. host = alloc_host_usb4(test);
  1894. dev = alloc_dev_usb4(test, host, 0x1, true);
  1895. nhi = &host->ports[7];
  1896. port = &dev->ports[3];
  1897. tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
  1898. KUNIT_ASSERT_NOT_NULL(test, tunnel);
  1899. KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
  1900. /* DMA RX */
  1901. path = tunnel->paths[0];
  1902. KUNIT_ASSERT_EQ(test, path->path_length, 2);
  1903. KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
  1904. KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
  1905. KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
  1906. KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
  1907. /* DMA TX */
  1908. path = tunnel->paths[1];
  1909. KUNIT_ASSERT_EQ(test, path->path_length, 2);
  1910. KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
  1911. KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
  1912. KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
  1913. KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
  1914. tb_tunnel_free(tunnel);
  1915. }
  1916. static void tb_test_credit_alloc_dma_multiple(struct kunit *test)
  1917. {
  1918. struct tb_tunnel *tunnel1, *tunnel2, *tunnel3;
  1919. struct tb_switch *host, *dev;
  1920. struct tb_port *nhi, *port;
  1921. struct tb_path *path;
  1922. host = alloc_host_usb4(test);
  1923. dev = alloc_dev_usb4(test, host, 0x1, true);
  1924. nhi = &host->ports[7];
  1925. port = &dev->ports[3];
  1926. /*
  1927. * Create three DMA tunnels through the same ports. With the
  1928. * default buffers we should be able to create two and the last
  1929. * one fails.
  1930. *
  1931. * For default host we have following buffers for DMA:
  1932. *
  1933. * 120 - (2 + 2 * (1 + 0) + 32 + 64 + spare) = 20
  1934. *
  1935. * For device we have following:
  1936. *
  1937. * 120 - (2 + 2 * (1 + 18) + 14 + 32 + spare) = 34
  1938. *
  1939. * spare = 14 + 1 = 15
  1940. *
  1941. * So on host the first tunnel gets 14 and the second gets the
  1942. * remaining 1 and then we run out of buffers.
  1943. */
  1944. tunnel1 = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
  1945. KUNIT_ASSERT_NOT_NULL(test, tunnel1);
  1946. KUNIT_ASSERT_EQ(test, tunnel1->npaths, (size_t)2);
  1947. path = tunnel1->paths[0];
  1948. KUNIT_ASSERT_EQ(test, path->path_length, 2);
  1949. KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
  1950. KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
  1951. KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
  1952. KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
  1953. path = tunnel1->paths[1];
  1954. KUNIT_ASSERT_EQ(test, path->path_length, 2);
  1955. KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
  1956. KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
  1957. KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
  1958. KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
  1959. tunnel2 = tb_tunnel_alloc_dma(NULL, nhi, port, 9, 2, 9, 2);
  1960. KUNIT_ASSERT_NOT_NULL(test, tunnel2);
  1961. KUNIT_ASSERT_EQ(test, tunnel2->npaths, (size_t)2);
  1962. path = tunnel2->paths[0];
  1963. KUNIT_ASSERT_EQ(test, path->path_length, 2);
  1964. KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
  1965. KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
  1966. KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
  1967. KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
  1968. path = tunnel2->paths[1];
  1969. KUNIT_ASSERT_EQ(test, path->path_length, 2);
  1970. KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
  1971. KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
  1972. KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
  1973. KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
  1974. tunnel3 = tb_tunnel_alloc_dma(NULL, nhi, port, 10, 3, 10, 3);
  1975. KUNIT_ASSERT_NULL(test, tunnel3);
  1976. /*
  1977. * Release the first DMA tunnel. That should make 14 buffers
  1978. * available for the next tunnel.
  1979. */
  1980. tb_tunnel_free(tunnel1);
  1981. tunnel3 = tb_tunnel_alloc_dma(NULL, nhi, port, 10, 3, 10, 3);
  1982. KUNIT_ASSERT_NOT_NULL(test, tunnel3);
  1983. path = tunnel3->paths[0];
  1984. KUNIT_ASSERT_EQ(test, path->path_length, 2);
  1985. KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
  1986. KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
  1987. KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
  1988. KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
  1989. path = tunnel3->paths[1];
  1990. KUNIT_ASSERT_EQ(test, path->path_length, 2);
  1991. KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
  1992. KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
  1993. KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
  1994. KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
  1995. tb_tunnel_free(tunnel3);
  1996. tb_tunnel_free(tunnel2);
  1997. }
  1998. static struct tb_tunnel *TB_TEST_PCIE_TUNNEL(struct kunit *test,
  1999. struct tb_switch *host, struct tb_switch *dev)
  2000. {
  2001. struct tb_port *up, *down;
  2002. struct tb_tunnel *pcie_tunnel;
  2003. struct tb_path *path;
  2004. down = &host->ports[8];
  2005. up = &dev->ports[9];
  2006. pcie_tunnel = tb_tunnel_alloc_pci(NULL, up, down);
  2007. KUNIT_ASSERT_NOT_NULL(test, pcie_tunnel);
  2008. KUNIT_ASSERT_EQ(test, pcie_tunnel->npaths, (size_t)2);
  2009. path = pcie_tunnel->paths[0];
  2010. KUNIT_ASSERT_EQ(test, path->path_length, 2);
  2011. KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
  2012. KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
  2013. KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
  2014. KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
  2015. path = pcie_tunnel->paths[1];
  2016. KUNIT_ASSERT_EQ(test, path->path_length, 2);
  2017. KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
  2018. KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
  2019. KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
  2020. KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 64U);
  2021. return pcie_tunnel;
  2022. }
  2023. static struct tb_tunnel *TB_TEST_DP_TUNNEL1(struct kunit *test,
  2024. struct tb_switch *host, struct tb_switch *dev)
  2025. {
  2026. struct tb_port *in, *out;
  2027. struct tb_tunnel *dp_tunnel1;
  2028. struct tb_path *path;
  2029. in = &host->ports[5];
  2030. out = &dev->ports[13];
  2031. dp_tunnel1 = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
  2032. KUNIT_ASSERT_NOT_NULL(test, dp_tunnel1);
  2033. KUNIT_ASSERT_EQ(test, dp_tunnel1->npaths, (size_t)3);
  2034. path = dp_tunnel1->paths[0];
  2035. KUNIT_ASSERT_EQ(test, path->path_length, 2);
  2036. KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 12U);
  2037. KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
  2038. KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 18U);
  2039. KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 0U);
  2040. path = dp_tunnel1->paths[1];
  2041. KUNIT_ASSERT_EQ(test, path->path_length, 2);
  2042. KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
  2043. KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
  2044. KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
  2045. KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
  2046. path = dp_tunnel1->paths[2];
  2047. KUNIT_ASSERT_EQ(test, path->path_length, 2);
  2048. KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
  2049. KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
  2050. KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
  2051. KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
  2052. return dp_tunnel1;
  2053. }
  2054. static struct tb_tunnel *TB_TEST_DP_TUNNEL2(struct kunit *test,
  2055. struct tb_switch *host, struct tb_switch *dev)
  2056. {
  2057. struct tb_port *in, *out;
  2058. struct tb_tunnel *dp_tunnel2;
  2059. struct tb_path *path;
  2060. in = &host->ports[6];
  2061. out = &dev->ports[14];
  2062. dp_tunnel2 = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
  2063. KUNIT_ASSERT_NOT_NULL(test, dp_tunnel2);
  2064. KUNIT_ASSERT_EQ(test, dp_tunnel2->npaths, (size_t)3);
  2065. path = dp_tunnel2->paths[0];
  2066. KUNIT_ASSERT_EQ(test, path->path_length, 2);
  2067. KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 12U);
  2068. KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
  2069. KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 18U);
  2070. KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 0U);
  2071. path = dp_tunnel2->paths[1];
  2072. KUNIT_ASSERT_EQ(test, path->path_length, 2);
  2073. KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
  2074. KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
  2075. KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
  2076. KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
  2077. path = dp_tunnel2->paths[2];
  2078. KUNIT_ASSERT_EQ(test, path->path_length, 2);
  2079. KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
  2080. KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
  2081. KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
  2082. KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
  2083. return dp_tunnel2;
  2084. }
  2085. static struct tb_tunnel *TB_TEST_USB3_TUNNEL(struct kunit *test,
  2086. struct tb_switch *host, struct tb_switch *dev)
  2087. {
  2088. struct tb_port *up, *down;
  2089. struct tb_tunnel *usb3_tunnel;
  2090. struct tb_path *path;
  2091. down = &host->ports[12];
  2092. up = &dev->ports[16];
  2093. usb3_tunnel = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0);
  2094. KUNIT_ASSERT_NOT_NULL(test, usb3_tunnel);
  2095. KUNIT_ASSERT_EQ(test, usb3_tunnel->npaths, (size_t)2);
  2096. path = usb3_tunnel->paths[0];
  2097. KUNIT_ASSERT_EQ(test, path->path_length, 2);
  2098. KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
  2099. KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
  2100. KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
  2101. KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
  2102. path = usb3_tunnel->paths[1];
  2103. KUNIT_ASSERT_EQ(test, path->path_length, 2);
  2104. KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
  2105. KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
  2106. KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
  2107. KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
  2108. return usb3_tunnel;
  2109. }
  2110. static struct tb_tunnel *TB_TEST_DMA_TUNNEL1(struct kunit *test,
  2111. struct tb_switch *host, struct tb_switch *dev)
  2112. {
  2113. struct tb_port *nhi, *port;
  2114. struct tb_tunnel *dma_tunnel1;
  2115. struct tb_path *path;
  2116. nhi = &host->ports[7];
  2117. port = &dev->ports[3];
  2118. dma_tunnel1 = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
  2119. KUNIT_ASSERT_NOT_NULL(test, dma_tunnel1);
  2120. KUNIT_ASSERT_EQ(test, dma_tunnel1->npaths, (size_t)2);
  2121. path = dma_tunnel1->paths[0];
  2122. KUNIT_ASSERT_EQ(test, path->path_length, 2);
  2123. KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
  2124. KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
  2125. KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
  2126. KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
  2127. path = dma_tunnel1->paths[1];
  2128. KUNIT_ASSERT_EQ(test, path->path_length, 2);
  2129. KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
  2130. KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
  2131. KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
  2132. KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
  2133. return dma_tunnel1;
  2134. }
  2135. static struct tb_tunnel *TB_TEST_DMA_TUNNEL2(struct kunit *test,
  2136. struct tb_switch *host, struct tb_switch *dev)
  2137. {
  2138. struct tb_port *nhi, *port;
  2139. struct tb_tunnel *dma_tunnel2;
  2140. struct tb_path *path;
  2141. nhi = &host->ports[7];
  2142. port = &dev->ports[3];
  2143. dma_tunnel2 = tb_tunnel_alloc_dma(NULL, nhi, port, 9, 2, 9, 2);
  2144. KUNIT_ASSERT_NOT_NULL(test, dma_tunnel2);
  2145. KUNIT_ASSERT_EQ(test, dma_tunnel2->npaths, (size_t)2);
  2146. path = dma_tunnel2->paths[0];
  2147. KUNIT_ASSERT_EQ(test, path->path_length, 2);
  2148. KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
  2149. KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
  2150. KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
  2151. KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
  2152. path = dma_tunnel2->paths[1];
  2153. KUNIT_ASSERT_EQ(test, path->path_length, 2);
  2154. KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
  2155. KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
  2156. KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
  2157. KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
  2158. return dma_tunnel2;
  2159. }
  2160. static void tb_test_credit_alloc_all(struct kunit *test)
  2161. {
  2162. struct tb_tunnel *pcie_tunnel, *dp_tunnel1, *dp_tunnel2, *usb3_tunnel;
  2163. struct tb_tunnel *dma_tunnel1, *dma_tunnel2;
  2164. struct tb_switch *host, *dev;
  2165. /*
  2166. * Create PCIe, 2 x DP, USB 3.x and two DMA tunnels from host to
  2167. * device. Expectation is that all these can be established with
  2168. * the default credit allocation found in Intel hardware.
  2169. */
  2170. host = alloc_host_usb4(test);
  2171. dev = alloc_dev_usb4(test, host, 0x1, true);
  2172. pcie_tunnel = TB_TEST_PCIE_TUNNEL(test, host, dev);
  2173. dp_tunnel1 = TB_TEST_DP_TUNNEL1(test, host, dev);
  2174. dp_tunnel2 = TB_TEST_DP_TUNNEL2(test, host, dev);
  2175. usb3_tunnel = TB_TEST_USB3_TUNNEL(test, host, dev);
  2176. dma_tunnel1 = TB_TEST_DMA_TUNNEL1(test, host, dev);
  2177. dma_tunnel2 = TB_TEST_DMA_TUNNEL2(test, host, dev);
  2178. tb_tunnel_free(dma_tunnel2);
  2179. tb_tunnel_free(dma_tunnel1);
  2180. tb_tunnel_free(usb3_tunnel);
  2181. tb_tunnel_free(dp_tunnel2);
  2182. tb_tunnel_free(dp_tunnel1);
  2183. tb_tunnel_free(pcie_tunnel);
  2184. }
  2185. static const u32 root_directory[] = {
  2186. 0x55584401, /* "UXD" v1 */
  2187. 0x00000018, /* Root directory length */
  2188. 0x76656e64, /* "vend" */
  2189. 0x6f726964, /* "orid" */
  2190. 0x76000001, /* "v" R 1 */
  2191. 0x00000a27, /* Immediate value, ! Vendor ID */
  2192. 0x76656e64, /* "vend" */
  2193. 0x6f726964, /* "orid" */
  2194. 0x74000003, /* "t" R 3 */
  2195. 0x0000001a, /* Text leaf offset, (“Apple Inc.”) */
  2196. 0x64657669, /* "devi" */
  2197. 0x63656964, /* "ceid" */
  2198. 0x76000001, /* "v" R 1 */
  2199. 0x0000000a, /* Immediate value, ! Device ID */
  2200. 0x64657669, /* "devi" */
  2201. 0x63656964, /* "ceid" */
  2202. 0x74000003, /* "t" R 3 */
  2203. 0x0000001d, /* Text leaf offset, (“Macintosh”) */
  2204. 0x64657669, /* "devi" */
  2205. 0x63657276, /* "cerv" */
  2206. 0x76000001, /* "v" R 1 */
  2207. 0x80000100, /* Immediate value, Device Revision */
  2208. 0x6e657477, /* "netw" */
  2209. 0x6f726b00, /* "ork" */
  2210. 0x44000014, /* "D" R 20 */
  2211. 0x00000021, /* Directory data offset, (Network Directory) */
  2212. 0x4170706c, /* "Appl" */
  2213. 0x6520496e, /* "e In" */
  2214. 0x632e0000, /* "c." ! */
  2215. 0x4d616369, /* "Maci" */
  2216. 0x6e746f73, /* "ntos" */
  2217. 0x68000000, /* "h" */
  2218. 0x00000000, /* padding */
  2219. 0xca8961c6, /* Directory UUID, Network Directory */
  2220. 0x9541ce1c, /* Directory UUID, Network Directory */
  2221. 0x5949b8bd, /* Directory UUID, Network Directory */
  2222. 0x4f5a5f2e, /* Directory UUID, Network Directory */
  2223. 0x70727463, /* "prtc" */
  2224. 0x69640000, /* "id" */
  2225. 0x76000001, /* "v" R 1 */
  2226. 0x00000001, /* Immediate value, Network Protocol ID */
  2227. 0x70727463, /* "prtc" */
  2228. 0x76657273, /* "vers" */
  2229. 0x76000001, /* "v" R 1 */
  2230. 0x00000001, /* Immediate value, Network Protocol Version */
  2231. 0x70727463, /* "prtc" */
  2232. 0x72657673, /* "revs" */
  2233. 0x76000001, /* "v" R 1 */
  2234. 0x00000001, /* Immediate value, Network Protocol Revision */
  2235. 0x70727463, /* "prtc" */
  2236. 0x73746e73, /* "stns" */
  2237. 0x76000001, /* "v" R 1 */
  2238. 0x00000000, /* Immediate value, Network Protocol Settings */
  2239. };
  2240. static const uuid_t network_dir_uuid =
  2241. UUID_INIT(0xc66189ca, 0x1cce, 0x4195,
  2242. 0xbd, 0xb8, 0x49, 0x59, 0x2e, 0x5f, 0x5a, 0x4f);
  2243. static void tb_test_property_parse(struct kunit *test)
  2244. {
  2245. struct tb_property_dir *dir, *network_dir;
  2246. struct tb_property *p;
  2247. dir = tb_property_parse_dir(root_directory, ARRAY_SIZE(root_directory));
  2248. KUNIT_ASSERT_NOT_NULL(test, dir);
  2249. p = tb_property_find(dir, "foo", TB_PROPERTY_TYPE_TEXT);
  2250. KUNIT_ASSERT_NULL(test, p);
  2251. p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_TEXT);
  2252. KUNIT_ASSERT_NOT_NULL(test, p);
  2253. KUNIT_EXPECT_STREQ(test, p->value.text, "Apple Inc.");
  2254. p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_VALUE);
  2255. KUNIT_ASSERT_NOT_NULL(test, p);
  2256. KUNIT_EXPECT_EQ(test, p->value.immediate, 0xa27);
  2257. p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_TEXT);
  2258. KUNIT_ASSERT_NOT_NULL(test, p);
  2259. KUNIT_EXPECT_STREQ(test, p->value.text, "Macintosh");
  2260. p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_VALUE);
  2261. KUNIT_ASSERT_NOT_NULL(test, p);
  2262. KUNIT_EXPECT_EQ(test, p->value.immediate, 0xa);
  2263. p = tb_property_find(dir, "missing", TB_PROPERTY_TYPE_DIRECTORY);
  2264. KUNIT_ASSERT_NULL(test, p);
  2265. p = tb_property_find(dir, "network", TB_PROPERTY_TYPE_DIRECTORY);
  2266. KUNIT_ASSERT_NOT_NULL(test, p);
  2267. network_dir = p->value.dir;
  2268. KUNIT_EXPECT_TRUE(test, uuid_equal(network_dir->uuid, &network_dir_uuid));
  2269. p = tb_property_find(network_dir, "prtcid", TB_PROPERTY_TYPE_VALUE);
  2270. KUNIT_ASSERT_NOT_NULL(test, p);
  2271. KUNIT_EXPECT_EQ(test, p->value.immediate, 0x1);
  2272. p = tb_property_find(network_dir, "prtcvers", TB_PROPERTY_TYPE_VALUE);
  2273. KUNIT_ASSERT_NOT_NULL(test, p);
  2274. KUNIT_EXPECT_EQ(test, p->value.immediate, 0x1);
  2275. p = tb_property_find(network_dir, "prtcrevs", TB_PROPERTY_TYPE_VALUE);
  2276. KUNIT_ASSERT_NOT_NULL(test, p);
  2277. KUNIT_EXPECT_EQ(test, p->value.immediate, 0x1);
  2278. p = tb_property_find(network_dir, "prtcstns", TB_PROPERTY_TYPE_VALUE);
  2279. KUNIT_ASSERT_NOT_NULL(test, p);
  2280. KUNIT_EXPECT_EQ(test, p->value.immediate, 0x0);
  2281. p = tb_property_find(network_dir, "deviceid", TB_PROPERTY_TYPE_VALUE);
  2282. KUNIT_EXPECT_TRUE(test, !p);
  2283. p = tb_property_find(network_dir, "deviceid", TB_PROPERTY_TYPE_TEXT);
  2284. KUNIT_EXPECT_TRUE(test, !p);
  2285. tb_property_free_dir(dir);
  2286. }
  2287. static void tb_test_property_format(struct kunit *test)
  2288. {
  2289. struct tb_property_dir *dir;
  2290. ssize_t block_len;
  2291. u32 *block;
  2292. int ret, i;
  2293. dir = tb_property_parse_dir(root_directory, ARRAY_SIZE(root_directory));
  2294. KUNIT_ASSERT_NOT_NULL(test, dir);
  2295. ret = tb_property_format_dir(dir, NULL, 0);
  2296. KUNIT_ASSERT_EQ(test, ret, ARRAY_SIZE(root_directory));
  2297. block_len = ret;
  2298. block = kunit_kzalloc(test, block_len * sizeof(u32), GFP_KERNEL);
  2299. KUNIT_ASSERT_NOT_NULL(test, block);
  2300. ret = tb_property_format_dir(dir, block, block_len);
  2301. KUNIT_EXPECT_EQ(test, ret, 0);
  2302. for (i = 0; i < ARRAY_SIZE(root_directory); i++)
  2303. KUNIT_EXPECT_EQ(test, root_directory[i], block[i]);
  2304. tb_property_free_dir(dir);
  2305. }
  2306. static void compare_dirs(struct kunit *test, struct tb_property_dir *d1,
  2307. struct tb_property_dir *d2)
  2308. {
  2309. struct tb_property *p1, *p2, *tmp;
  2310. int n1, n2, i;
  2311. if (d1->uuid) {
  2312. KUNIT_ASSERT_NOT_NULL(test, d2->uuid);
  2313. KUNIT_ASSERT_TRUE(test, uuid_equal(d1->uuid, d2->uuid));
  2314. } else {
  2315. KUNIT_ASSERT_NULL(test, d2->uuid);
  2316. }
  2317. n1 = 0;
  2318. tb_property_for_each(d1, tmp)
  2319. n1++;
  2320. KUNIT_ASSERT_NE(test, n1, 0);
  2321. n2 = 0;
  2322. tb_property_for_each(d2, tmp)
  2323. n2++;
  2324. KUNIT_ASSERT_NE(test, n2, 0);
  2325. KUNIT_ASSERT_EQ(test, n1, n2);
  2326. p1 = NULL;
  2327. p2 = NULL;
  2328. for (i = 0; i < n1; i++) {
  2329. p1 = tb_property_get_next(d1, p1);
  2330. KUNIT_ASSERT_NOT_NULL(test, p1);
  2331. p2 = tb_property_get_next(d2, p2);
  2332. KUNIT_ASSERT_NOT_NULL(test, p2);
  2333. KUNIT_ASSERT_STREQ(test, &p1->key[0], &p2->key[0]);
  2334. KUNIT_ASSERT_EQ(test, p1->type, p2->type);
  2335. KUNIT_ASSERT_EQ(test, p1->length, p2->length);
  2336. switch (p1->type) {
  2337. case TB_PROPERTY_TYPE_DIRECTORY:
  2338. KUNIT_ASSERT_NOT_NULL(test, p1->value.dir);
  2339. KUNIT_ASSERT_NOT_NULL(test, p2->value.dir);
  2340. compare_dirs(test, p1->value.dir, p2->value.dir);
  2341. break;
  2342. case TB_PROPERTY_TYPE_DATA:
  2343. KUNIT_ASSERT_NOT_NULL(test, p1->value.data);
  2344. KUNIT_ASSERT_NOT_NULL(test, p2->value.data);
  2345. KUNIT_ASSERT_TRUE(test,
  2346. !memcmp(p1->value.data, p2->value.data,
  2347. p1->length * 4)
  2348. );
  2349. break;
  2350. case TB_PROPERTY_TYPE_TEXT:
  2351. KUNIT_ASSERT_NOT_NULL(test, p1->value.text);
  2352. KUNIT_ASSERT_NOT_NULL(test, p2->value.text);
  2353. KUNIT_ASSERT_STREQ(test, p1->value.text, p2->value.text);
  2354. break;
  2355. case TB_PROPERTY_TYPE_VALUE:
  2356. KUNIT_ASSERT_EQ(test, p1->value.immediate,
  2357. p2->value.immediate);
  2358. break;
  2359. default:
  2360. KUNIT_FAIL(test, "unexpected property type");
  2361. break;
  2362. }
  2363. }
  2364. }
  2365. static void tb_test_property_copy(struct kunit *test)
  2366. {
  2367. struct tb_property_dir *src, *dst;
  2368. u32 *block;
  2369. int ret, i;
  2370. src = tb_property_parse_dir(root_directory, ARRAY_SIZE(root_directory));
  2371. KUNIT_ASSERT_NOT_NULL(test, src);
  2372. dst = tb_property_copy_dir(src);
  2373. KUNIT_ASSERT_NOT_NULL(test, dst);
  2374. /* Compare the structures */
  2375. compare_dirs(test, src, dst);
  2376. /* Compare the resulting property block */
  2377. ret = tb_property_format_dir(dst, NULL, 0);
  2378. KUNIT_ASSERT_EQ(test, ret, ARRAY_SIZE(root_directory));
  2379. block = kunit_kzalloc(test, sizeof(root_directory), GFP_KERNEL);
  2380. KUNIT_ASSERT_NOT_NULL(test, block);
  2381. ret = tb_property_format_dir(dst, block, ARRAY_SIZE(root_directory));
  2382. KUNIT_EXPECT_TRUE(test, !ret);
  2383. for (i = 0; i < ARRAY_SIZE(root_directory); i++)
  2384. KUNIT_EXPECT_EQ(test, root_directory[i], block[i]);
  2385. tb_property_free_dir(dst);
  2386. tb_property_free_dir(src);
  2387. }
  2388. static struct kunit_case tb_test_cases[] = {
  2389. KUNIT_CASE(tb_test_path_basic),
  2390. KUNIT_CASE(tb_test_path_not_connected_walk),
  2391. KUNIT_CASE(tb_test_path_single_hop_walk),
  2392. KUNIT_CASE(tb_test_path_daisy_chain_walk),
  2393. KUNIT_CASE(tb_test_path_simple_tree_walk),
  2394. KUNIT_CASE(tb_test_path_complex_tree_walk),
  2395. KUNIT_CASE(tb_test_path_max_length_walk),
  2396. KUNIT_CASE(tb_test_path_not_connected),
  2397. KUNIT_CASE(tb_test_path_not_bonded_lane0),
  2398. KUNIT_CASE(tb_test_path_not_bonded_lane1),
  2399. KUNIT_CASE(tb_test_path_not_bonded_lane1_chain),
  2400. KUNIT_CASE(tb_test_path_not_bonded_lane1_chain_reverse),
  2401. KUNIT_CASE(tb_test_path_mixed_chain),
  2402. KUNIT_CASE(tb_test_path_mixed_chain_reverse),
  2403. KUNIT_CASE(tb_test_tunnel_pcie),
  2404. KUNIT_CASE(tb_test_tunnel_dp),
  2405. KUNIT_CASE(tb_test_tunnel_dp_chain),
  2406. KUNIT_CASE(tb_test_tunnel_dp_tree),
  2407. KUNIT_CASE(tb_test_tunnel_dp_max_length),
  2408. KUNIT_CASE(tb_test_tunnel_port_on_path),
  2409. KUNIT_CASE(tb_test_tunnel_usb3),
  2410. KUNIT_CASE(tb_test_tunnel_dma),
  2411. KUNIT_CASE(tb_test_tunnel_dma_rx),
  2412. KUNIT_CASE(tb_test_tunnel_dma_tx),
  2413. KUNIT_CASE(tb_test_tunnel_dma_chain),
  2414. KUNIT_CASE(tb_test_tunnel_dma_match),
  2415. KUNIT_CASE(tb_test_credit_alloc_legacy_not_bonded),
  2416. KUNIT_CASE(tb_test_credit_alloc_legacy_bonded),
  2417. KUNIT_CASE(tb_test_credit_alloc_pcie),
  2418. KUNIT_CASE(tb_test_credit_alloc_without_dp),
  2419. KUNIT_CASE(tb_test_credit_alloc_dp),
  2420. KUNIT_CASE(tb_test_credit_alloc_usb3),
  2421. KUNIT_CASE(tb_test_credit_alloc_dma),
  2422. KUNIT_CASE(tb_test_credit_alloc_dma_multiple),
  2423. KUNIT_CASE(tb_test_credit_alloc_all),
  2424. KUNIT_CASE(tb_test_property_parse),
  2425. KUNIT_CASE(tb_test_property_format),
  2426. KUNIT_CASE(tb_test_property_copy),
  2427. { }
  2428. };
  2429. static struct kunit_suite tb_test_suite = {
  2430. .name = "thunderbolt",
  2431. .test_cases = tb_test_cases,
  2432. };
  2433. kunit_test_suite(tb_test_suite);