clk_test.c 64 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Kunit test for clk rate management
  4. */
  5. #include <linux/clk.h>
  6. #include <linux/clk-provider.h>
  7. /* Needed for clk_hw_get_clk() */
  8. #include "clk.h"
  9. #include <kunit/test.h>
  10. #define DUMMY_CLOCK_INIT_RATE (42 * 1000 * 1000)
  11. #define DUMMY_CLOCK_RATE_1 (142 * 1000 * 1000)
  12. #define DUMMY_CLOCK_RATE_2 (242 * 1000 * 1000)
  13. struct clk_dummy_context {
  14. struct clk_hw hw;
  15. unsigned long rate;
  16. };
  17. static unsigned long clk_dummy_recalc_rate(struct clk_hw *hw,
  18. unsigned long parent_rate)
  19. {
  20. struct clk_dummy_context *ctx =
  21. container_of(hw, struct clk_dummy_context, hw);
  22. return ctx->rate;
  23. }
  24. static int clk_dummy_determine_rate(struct clk_hw *hw,
  25. struct clk_rate_request *req)
  26. {
  27. /* Just return the same rate without modifying it */
  28. return 0;
  29. }
  30. static int clk_dummy_maximize_rate(struct clk_hw *hw,
  31. struct clk_rate_request *req)
  32. {
  33. /*
  34. * If there's a maximum set, always run the clock at the maximum
  35. * allowed.
  36. */
  37. if (req->max_rate < ULONG_MAX)
  38. req->rate = req->max_rate;
  39. return 0;
  40. }
  41. static int clk_dummy_minimize_rate(struct clk_hw *hw,
  42. struct clk_rate_request *req)
  43. {
  44. /*
  45. * If there's a minimum set, always run the clock at the minimum
  46. * allowed.
  47. */
  48. if (req->min_rate > 0)
  49. req->rate = req->min_rate;
  50. return 0;
  51. }
  52. static int clk_dummy_set_rate(struct clk_hw *hw,
  53. unsigned long rate,
  54. unsigned long parent_rate)
  55. {
  56. struct clk_dummy_context *ctx =
  57. container_of(hw, struct clk_dummy_context, hw);
  58. ctx->rate = rate;
  59. return 0;
  60. }
  61. static int clk_dummy_single_set_parent(struct clk_hw *hw, u8 index)
  62. {
  63. if (index >= clk_hw_get_num_parents(hw))
  64. return -EINVAL;
  65. return 0;
  66. }
  67. static u8 clk_dummy_single_get_parent(struct clk_hw *hw)
  68. {
  69. return 0;
  70. }
  71. static const struct clk_ops clk_dummy_rate_ops = {
  72. .recalc_rate = clk_dummy_recalc_rate,
  73. .determine_rate = clk_dummy_determine_rate,
  74. .set_rate = clk_dummy_set_rate,
  75. };
  76. static const struct clk_ops clk_dummy_maximize_rate_ops = {
  77. .recalc_rate = clk_dummy_recalc_rate,
  78. .determine_rate = clk_dummy_maximize_rate,
  79. .set_rate = clk_dummy_set_rate,
  80. };
  81. static const struct clk_ops clk_dummy_minimize_rate_ops = {
  82. .recalc_rate = clk_dummy_recalc_rate,
  83. .determine_rate = clk_dummy_minimize_rate,
  84. .set_rate = clk_dummy_set_rate,
  85. };
  86. static const struct clk_ops clk_dummy_single_parent_ops = {
  87. .set_parent = clk_dummy_single_set_parent,
  88. .get_parent = clk_dummy_single_get_parent,
  89. };
  90. struct clk_multiple_parent_ctx {
  91. struct clk_dummy_context parents_ctx[2];
  92. struct clk_hw hw;
  93. u8 current_parent;
  94. };
  95. static int clk_multiple_parents_mux_set_parent(struct clk_hw *hw, u8 index)
  96. {
  97. struct clk_multiple_parent_ctx *ctx =
  98. container_of(hw, struct clk_multiple_parent_ctx, hw);
  99. if (index >= clk_hw_get_num_parents(hw))
  100. return -EINVAL;
  101. ctx->current_parent = index;
  102. return 0;
  103. }
  104. static u8 clk_multiple_parents_mux_get_parent(struct clk_hw *hw)
  105. {
  106. struct clk_multiple_parent_ctx *ctx =
  107. container_of(hw, struct clk_multiple_parent_ctx, hw);
  108. return ctx->current_parent;
  109. }
  110. static const struct clk_ops clk_multiple_parents_mux_ops = {
  111. .get_parent = clk_multiple_parents_mux_get_parent,
  112. .set_parent = clk_multiple_parents_mux_set_parent,
  113. .determine_rate = __clk_mux_determine_rate_closest,
  114. };
  115. static int clk_test_init_with_ops(struct kunit *test, const struct clk_ops *ops)
  116. {
  117. struct clk_dummy_context *ctx;
  118. struct clk_init_data init = { };
  119. int ret;
  120. ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
  121. if (!ctx)
  122. return -ENOMEM;
  123. ctx->rate = DUMMY_CLOCK_INIT_RATE;
  124. test->priv = ctx;
  125. init.name = "test_dummy_rate";
  126. init.ops = ops;
  127. ctx->hw.init = &init;
  128. ret = clk_hw_register(NULL, &ctx->hw);
  129. if (ret)
  130. return ret;
  131. return 0;
  132. }
  133. static int clk_test_init(struct kunit *test)
  134. {
  135. return clk_test_init_with_ops(test, &clk_dummy_rate_ops);
  136. }
  137. static int clk_maximize_test_init(struct kunit *test)
  138. {
  139. return clk_test_init_with_ops(test, &clk_dummy_maximize_rate_ops);
  140. }
  141. static int clk_minimize_test_init(struct kunit *test)
  142. {
  143. return clk_test_init_with_ops(test, &clk_dummy_minimize_rate_ops);
  144. }
  145. static void clk_test_exit(struct kunit *test)
  146. {
  147. struct clk_dummy_context *ctx = test->priv;
  148. clk_hw_unregister(&ctx->hw);
  149. }
  150. /*
  151. * Test that the actual rate matches what is returned by clk_get_rate()
  152. */
  153. static void clk_test_get_rate(struct kunit *test)
  154. {
  155. struct clk_dummy_context *ctx = test->priv;
  156. struct clk_hw *hw = &ctx->hw;
  157. struct clk *clk = clk_hw_get_clk(hw, NULL);
  158. unsigned long rate;
  159. rate = clk_get_rate(clk);
  160. KUNIT_ASSERT_GT(test, rate, 0);
  161. KUNIT_EXPECT_EQ(test, rate, ctx->rate);
  162. clk_put(clk);
  163. }
  164. /*
  165. * Test that, after a call to clk_set_rate(), the rate returned by
  166. * clk_get_rate() matches.
  167. *
  168. * This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
  169. * modify the requested rate, which is our case in clk_dummy_rate_ops.
  170. */
  171. static void clk_test_set_get_rate(struct kunit *test)
  172. {
  173. struct clk_dummy_context *ctx = test->priv;
  174. struct clk_hw *hw = &ctx->hw;
  175. struct clk *clk = clk_hw_get_clk(hw, NULL);
  176. unsigned long rate;
  177. KUNIT_ASSERT_EQ(test,
  178. clk_set_rate(clk, DUMMY_CLOCK_RATE_1),
  179. 0);
  180. rate = clk_get_rate(clk);
  181. KUNIT_ASSERT_GT(test, rate, 0);
  182. KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
  183. clk_put(clk);
  184. }
  185. /*
  186. * Test that, after several calls to clk_set_rate(), the rate returned
  187. * by clk_get_rate() matches the last one.
  188. *
  189. * This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
  190. * modify the requested rate, which is our case in clk_dummy_rate_ops.
  191. */
  192. static void clk_test_set_set_get_rate(struct kunit *test)
  193. {
  194. struct clk_dummy_context *ctx = test->priv;
  195. struct clk_hw *hw = &ctx->hw;
  196. struct clk *clk = clk_hw_get_clk(hw, NULL);
  197. unsigned long rate;
  198. KUNIT_ASSERT_EQ(test,
  199. clk_set_rate(clk, DUMMY_CLOCK_RATE_1),
  200. 0);
  201. KUNIT_ASSERT_EQ(test,
  202. clk_set_rate(clk, DUMMY_CLOCK_RATE_2),
  203. 0);
  204. rate = clk_get_rate(clk);
  205. KUNIT_ASSERT_GT(test, rate, 0);
  206. KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
  207. clk_put(clk);
  208. }
  209. /*
  210. * Test that clk_round_rate and clk_set_rate are consitent and will
  211. * return the same frequency.
  212. */
  213. static void clk_test_round_set_get_rate(struct kunit *test)
  214. {
  215. struct clk_dummy_context *ctx = test->priv;
  216. struct clk_hw *hw = &ctx->hw;
  217. struct clk *clk = clk_hw_get_clk(hw, NULL);
  218. unsigned long rounded_rate, set_rate;
  219. rounded_rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1);
  220. KUNIT_ASSERT_GT(test, rounded_rate, 0);
  221. KUNIT_EXPECT_EQ(test, rounded_rate, DUMMY_CLOCK_RATE_1);
  222. KUNIT_ASSERT_EQ(test,
  223. clk_set_rate(clk, DUMMY_CLOCK_RATE_1),
  224. 0);
  225. set_rate = clk_get_rate(clk);
  226. KUNIT_ASSERT_GT(test, set_rate, 0);
  227. KUNIT_EXPECT_EQ(test, rounded_rate, set_rate);
  228. clk_put(clk);
  229. }
  230. static struct kunit_case clk_test_cases[] = {
  231. KUNIT_CASE(clk_test_get_rate),
  232. KUNIT_CASE(clk_test_set_get_rate),
  233. KUNIT_CASE(clk_test_set_set_get_rate),
  234. KUNIT_CASE(clk_test_round_set_get_rate),
  235. {}
  236. };
  237. /*
  238. * Test suite for a basic rate clock, without any parent.
  239. *
  240. * These tests exercise the rate API with simple scenarios
  241. */
  242. static struct kunit_suite clk_test_suite = {
  243. .name = "clk-test",
  244. .init = clk_test_init,
  245. .exit = clk_test_exit,
  246. .test_cases = clk_test_cases,
  247. };
  248. static int clk_uncached_test_init(struct kunit *test)
  249. {
  250. struct clk_dummy_context *ctx;
  251. int ret;
  252. ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
  253. if (!ctx)
  254. return -ENOMEM;
  255. test->priv = ctx;
  256. ctx->rate = DUMMY_CLOCK_INIT_RATE;
  257. ctx->hw.init = CLK_HW_INIT_NO_PARENT("test-clk",
  258. &clk_dummy_rate_ops,
  259. CLK_GET_RATE_NOCACHE);
  260. ret = clk_hw_register(NULL, &ctx->hw);
  261. if (ret)
  262. return ret;
  263. return 0;
  264. }
  265. /*
  266. * Test that for an uncached clock, the clock framework doesn't cache
  267. * the rate and clk_get_rate() will return the underlying clock rate
  268. * even if it changed.
  269. */
  270. static void clk_test_uncached_get_rate(struct kunit *test)
  271. {
  272. struct clk_dummy_context *ctx = test->priv;
  273. struct clk_hw *hw = &ctx->hw;
  274. struct clk *clk = clk_hw_get_clk(hw, NULL);
  275. unsigned long rate;
  276. rate = clk_get_rate(clk);
  277. KUNIT_ASSERT_GT(test, rate, 0);
  278. KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_INIT_RATE);
  279. /* We change the rate behind the clock framework's back */
  280. ctx->rate = DUMMY_CLOCK_RATE_1;
  281. rate = clk_get_rate(clk);
  282. KUNIT_ASSERT_GT(test, rate, 0);
  283. KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
  284. clk_put(clk);
  285. }
  286. /*
  287. * Test that for an uncached clock, clk_set_rate_range() will work
  288. * properly if the rate hasn't changed.
  289. */
  290. static void clk_test_uncached_set_range(struct kunit *test)
  291. {
  292. struct clk_dummy_context *ctx = test->priv;
  293. struct clk_hw *hw = &ctx->hw;
  294. struct clk *clk = clk_hw_get_clk(hw, NULL);
  295. unsigned long rate;
  296. KUNIT_ASSERT_EQ(test,
  297. clk_set_rate_range(clk,
  298. DUMMY_CLOCK_RATE_1,
  299. DUMMY_CLOCK_RATE_2),
  300. 0);
  301. rate = clk_get_rate(clk);
  302. KUNIT_ASSERT_GT(test, rate, 0);
  303. KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
  304. KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
  305. clk_put(clk);
  306. }
  307. /*
  308. * Test that for an uncached clock, clk_set_rate_range() will work
  309. * properly if the rate has changed in hardware.
  310. *
  311. * In this case, it means that if the rate wasn't initially in the range
  312. * we're trying to set, but got changed at some point into the range
  313. * without the kernel knowing about it, its rate shouldn't be affected.
  314. */
  315. static void clk_test_uncached_updated_rate_set_range(struct kunit *test)
  316. {
  317. struct clk_dummy_context *ctx = test->priv;
  318. struct clk_hw *hw = &ctx->hw;
  319. struct clk *clk = clk_hw_get_clk(hw, NULL);
  320. unsigned long rate;
  321. /* We change the rate behind the clock framework's back */
  322. ctx->rate = DUMMY_CLOCK_RATE_1 + 1000;
  323. KUNIT_ASSERT_EQ(test,
  324. clk_set_rate_range(clk,
  325. DUMMY_CLOCK_RATE_1,
  326. DUMMY_CLOCK_RATE_2),
  327. 0);
  328. rate = clk_get_rate(clk);
  329. KUNIT_ASSERT_GT(test, rate, 0);
  330. KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
  331. clk_put(clk);
  332. }
  333. static struct kunit_case clk_uncached_test_cases[] = {
  334. KUNIT_CASE(clk_test_uncached_get_rate),
  335. KUNIT_CASE(clk_test_uncached_set_range),
  336. KUNIT_CASE(clk_test_uncached_updated_rate_set_range),
  337. {}
  338. };
  339. /*
  340. * Test suite for a basic, uncached, rate clock, without any parent.
  341. *
  342. * These tests exercise the rate API with simple scenarios
  343. */
  344. static struct kunit_suite clk_uncached_test_suite = {
  345. .name = "clk-uncached-test",
  346. .init = clk_uncached_test_init,
  347. .exit = clk_test_exit,
  348. .test_cases = clk_uncached_test_cases,
  349. };
  350. static int
  351. clk_multiple_parents_mux_test_init(struct kunit *test)
  352. {
  353. struct clk_multiple_parent_ctx *ctx;
  354. const char *parents[2] = { "parent-0", "parent-1"};
  355. int ret;
  356. ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
  357. if (!ctx)
  358. return -ENOMEM;
  359. test->priv = ctx;
  360. ctx->parents_ctx[0].hw.init = CLK_HW_INIT_NO_PARENT("parent-0",
  361. &clk_dummy_rate_ops,
  362. 0);
  363. ctx->parents_ctx[0].rate = DUMMY_CLOCK_RATE_1;
  364. ret = clk_hw_register(NULL, &ctx->parents_ctx[0].hw);
  365. if (ret)
  366. return ret;
  367. ctx->parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("parent-1",
  368. &clk_dummy_rate_ops,
  369. 0);
  370. ctx->parents_ctx[1].rate = DUMMY_CLOCK_RATE_2;
  371. ret = clk_hw_register(NULL, &ctx->parents_ctx[1].hw);
  372. if (ret)
  373. return ret;
  374. ctx->current_parent = 0;
  375. ctx->hw.init = CLK_HW_INIT_PARENTS("test-mux", parents,
  376. &clk_multiple_parents_mux_ops,
  377. CLK_SET_RATE_PARENT);
  378. ret = clk_hw_register(NULL, &ctx->hw);
  379. if (ret)
  380. return ret;
  381. return 0;
  382. }
  383. static void
  384. clk_multiple_parents_mux_test_exit(struct kunit *test)
  385. {
  386. struct clk_multiple_parent_ctx *ctx = test->priv;
  387. clk_hw_unregister(&ctx->hw);
  388. clk_hw_unregister(&ctx->parents_ctx[0].hw);
  389. clk_hw_unregister(&ctx->parents_ctx[1].hw);
  390. }
  391. /*
  392. * Test that for a clock with multiple parents, clk_get_parent()
  393. * actually returns the current one.
  394. */
  395. static void
  396. clk_test_multiple_parents_mux_get_parent(struct kunit *test)
  397. {
  398. struct clk_multiple_parent_ctx *ctx = test->priv;
  399. struct clk_hw *hw = &ctx->hw;
  400. struct clk *clk = clk_hw_get_clk(hw, NULL);
  401. struct clk *parent = clk_hw_get_clk(&ctx->parents_ctx[0].hw, NULL);
  402. KUNIT_EXPECT_TRUE(test, clk_is_match(clk_get_parent(clk), parent));
  403. clk_put(parent);
  404. clk_put(clk);
  405. }
  406. /*
  407. * Test that for a clock with a multiple parents, clk_has_parent()
  408. * actually reports all of them as parents.
  409. */
  410. static void
  411. clk_test_multiple_parents_mux_has_parent(struct kunit *test)
  412. {
  413. struct clk_multiple_parent_ctx *ctx = test->priv;
  414. struct clk_hw *hw = &ctx->hw;
  415. struct clk *clk = clk_hw_get_clk(hw, NULL);
  416. struct clk *parent;
  417. parent = clk_hw_get_clk(&ctx->parents_ctx[0].hw, NULL);
  418. KUNIT_EXPECT_TRUE(test, clk_has_parent(clk, parent));
  419. clk_put(parent);
  420. parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
  421. KUNIT_EXPECT_TRUE(test, clk_has_parent(clk, parent));
  422. clk_put(parent);
  423. clk_put(clk);
  424. }
  425. /*
  426. * Test that for a clock with a multiple parents, if we set a range on
  427. * that clock and the parent is changed, its rate after the reparenting
  428. * is still within the range we asked for.
  429. *
  430. * FIXME: clk_set_parent() only does the reparenting but doesn't
  431. * reevaluate whether the new clock rate is within its boundaries or
  432. * not.
  433. */
  434. static void
  435. clk_test_multiple_parents_mux_set_range_set_parent_get_rate(struct kunit *test)
  436. {
  437. struct clk_multiple_parent_ctx *ctx = test->priv;
  438. struct clk_hw *hw = &ctx->hw;
  439. struct clk *clk = clk_hw_get_clk(hw, NULL);
  440. struct clk *parent1, *parent2;
  441. unsigned long rate;
  442. int ret;
  443. kunit_skip(test, "This needs to be fixed in the core.");
  444. parent1 = clk_hw_get_clk(&ctx->parents_ctx[0].hw, NULL);
  445. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent1);
  446. KUNIT_ASSERT_TRUE(test, clk_is_match(clk_get_parent(clk), parent1));
  447. parent2 = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
  448. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent2);
  449. ret = clk_set_rate(parent1, DUMMY_CLOCK_RATE_1);
  450. KUNIT_ASSERT_EQ(test, ret, 0);
  451. ret = clk_set_rate(parent2, DUMMY_CLOCK_RATE_2);
  452. KUNIT_ASSERT_EQ(test, ret, 0);
  453. ret = clk_set_rate_range(clk,
  454. DUMMY_CLOCK_RATE_1 - 1000,
  455. DUMMY_CLOCK_RATE_1 + 1000);
  456. KUNIT_ASSERT_EQ(test, ret, 0);
  457. ret = clk_set_parent(clk, parent2);
  458. KUNIT_ASSERT_EQ(test, ret, 0);
  459. rate = clk_get_rate(clk);
  460. KUNIT_ASSERT_GT(test, rate, 0);
  461. KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 - 1000);
  462. KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
  463. clk_put(parent2);
  464. clk_put(parent1);
  465. clk_put(clk);
  466. }
  467. static struct kunit_case clk_multiple_parents_mux_test_cases[] = {
  468. KUNIT_CASE(clk_test_multiple_parents_mux_get_parent),
  469. KUNIT_CASE(clk_test_multiple_parents_mux_has_parent),
  470. KUNIT_CASE(clk_test_multiple_parents_mux_set_range_set_parent_get_rate),
  471. {}
  472. };
  473. /*
  474. * Test suite for a basic mux clock with two parents, with
  475. * CLK_SET_RATE_PARENT on the child.
  476. *
  477. * These tests exercise the consumer API and check that the state of the
  478. * child and parents are sane and consistent.
  479. */
  480. static struct kunit_suite
  481. clk_multiple_parents_mux_test_suite = {
  482. .name = "clk-multiple-parents-mux-test",
  483. .init = clk_multiple_parents_mux_test_init,
  484. .exit = clk_multiple_parents_mux_test_exit,
  485. .test_cases = clk_multiple_parents_mux_test_cases,
  486. };
  487. static int
  488. clk_orphan_transparent_multiple_parent_mux_test_init(struct kunit *test)
  489. {
  490. struct clk_multiple_parent_ctx *ctx;
  491. const char *parents[2] = { "missing-parent", "proper-parent"};
  492. int ret;
  493. ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
  494. if (!ctx)
  495. return -ENOMEM;
  496. test->priv = ctx;
  497. ctx->parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("proper-parent",
  498. &clk_dummy_rate_ops,
  499. 0);
  500. ctx->parents_ctx[1].rate = DUMMY_CLOCK_INIT_RATE;
  501. ret = clk_hw_register(NULL, &ctx->parents_ctx[1].hw);
  502. if (ret)
  503. return ret;
  504. ctx->hw.init = CLK_HW_INIT_PARENTS("test-orphan-mux", parents,
  505. &clk_multiple_parents_mux_ops,
  506. CLK_SET_RATE_PARENT);
  507. ret = clk_hw_register(NULL, &ctx->hw);
  508. if (ret)
  509. return ret;
  510. return 0;
  511. }
  512. static void
  513. clk_orphan_transparent_multiple_parent_mux_test_exit(struct kunit *test)
  514. {
  515. struct clk_multiple_parent_ctx *ctx = test->priv;
  516. clk_hw_unregister(&ctx->hw);
  517. clk_hw_unregister(&ctx->parents_ctx[1].hw);
  518. }
  519. /*
  520. * Test that, for a mux whose current parent hasn't been registered yet and is
  521. * thus orphan, clk_get_parent() will return NULL.
  522. */
  523. static void
  524. clk_test_orphan_transparent_multiple_parent_mux_get_parent(struct kunit *test)
  525. {
  526. struct clk_multiple_parent_ctx *ctx = test->priv;
  527. struct clk_hw *hw = &ctx->hw;
  528. struct clk *clk = clk_hw_get_clk(hw, NULL);
  529. KUNIT_EXPECT_PTR_EQ(test, clk_get_parent(clk), NULL);
  530. clk_put(clk);
  531. }
  532. /*
  533. * Test that, for a mux whose current parent hasn't been registered yet,
  534. * calling clk_set_parent() to a valid parent will properly update the
  535. * mux parent and its orphan status.
  536. */
  537. static void
  538. clk_test_orphan_transparent_multiple_parent_mux_set_parent(struct kunit *test)
  539. {
  540. struct clk_multiple_parent_ctx *ctx = test->priv;
  541. struct clk_hw *hw = &ctx->hw;
  542. struct clk *clk = clk_hw_get_clk(hw, NULL);
  543. struct clk *parent, *new_parent;
  544. int ret;
  545. parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
  546. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
  547. ret = clk_set_parent(clk, parent);
  548. KUNIT_ASSERT_EQ(test, ret, 0);
  549. new_parent = clk_get_parent(clk);
  550. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
  551. KUNIT_EXPECT_TRUE(test, clk_is_match(parent, new_parent));
  552. clk_put(parent);
  553. clk_put(clk);
  554. }
  555. /*
  556. * Test that, for a mux that started orphan but got switched to a valid
  557. * parent, calling clk_drop_range() on the mux won't affect the parent
  558. * rate.
  559. */
  560. static void
  561. clk_test_orphan_transparent_multiple_parent_mux_set_parent_drop_range(struct kunit *test)
  562. {
  563. struct clk_multiple_parent_ctx *ctx = test->priv;
  564. struct clk_hw *hw = &ctx->hw;
  565. struct clk *clk = clk_hw_get_clk(hw, NULL);
  566. struct clk *parent;
  567. unsigned long parent_rate, new_parent_rate;
  568. int ret;
  569. parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
  570. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
  571. parent_rate = clk_get_rate(parent);
  572. KUNIT_ASSERT_GT(test, parent_rate, 0);
  573. ret = clk_set_parent(clk, parent);
  574. KUNIT_ASSERT_EQ(test, ret, 0);
  575. ret = clk_drop_range(clk);
  576. KUNIT_ASSERT_EQ(test, ret, 0);
  577. new_parent_rate = clk_get_rate(clk);
  578. KUNIT_ASSERT_GT(test, new_parent_rate, 0);
  579. KUNIT_EXPECT_EQ(test, parent_rate, new_parent_rate);
  580. clk_put(parent);
  581. clk_put(clk);
  582. }
  583. /*
  584. * Test that, for a mux that started orphan but got switched to a valid
  585. * parent, the rate of the mux and its new parent are consistent.
  586. */
  587. static void
  588. clk_test_orphan_transparent_multiple_parent_mux_set_parent_get_rate(struct kunit *test)
  589. {
  590. struct clk_multiple_parent_ctx *ctx = test->priv;
  591. struct clk_hw *hw = &ctx->hw;
  592. struct clk *clk = clk_hw_get_clk(hw, NULL);
  593. struct clk *parent;
  594. unsigned long parent_rate, rate;
  595. int ret;
  596. parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
  597. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
  598. parent_rate = clk_get_rate(parent);
  599. KUNIT_ASSERT_GT(test, parent_rate, 0);
  600. ret = clk_set_parent(clk, parent);
  601. KUNIT_ASSERT_EQ(test, ret, 0);
  602. rate = clk_get_rate(clk);
  603. KUNIT_ASSERT_GT(test, rate, 0);
  604. KUNIT_EXPECT_EQ(test, parent_rate, rate);
  605. clk_put(parent);
  606. clk_put(clk);
  607. }
  608. /*
  609. * Test that, for a mux that started orphan but got switched to a valid
  610. * parent, calling clk_put() on the mux won't affect the parent rate.
  611. */
  612. static void
  613. clk_test_orphan_transparent_multiple_parent_mux_set_parent_put(struct kunit *test)
  614. {
  615. struct clk_multiple_parent_ctx *ctx = test->priv;
  616. struct clk *clk, *parent;
  617. unsigned long parent_rate, new_parent_rate;
  618. int ret;
  619. parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
  620. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
  621. clk = clk_hw_get_clk(&ctx->hw, NULL);
  622. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, clk);
  623. parent_rate = clk_get_rate(parent);
  624. KUNIT_ASSERT_GT(test, parent_rate, 0);
  625. ret = clk_set_parent(clk, parent);
  626. KUNIT_ASSERT_EQ(test, ret, 0);
  627. clk_put(clk);
  628. new_parent_rate = clk_get_rate(parent);
  629. KUNIT_ASSERT_GT(test, new_parent_rate, 0);
  630. KUNIT_EXPECT_EQ(test, parent_rate, new_parent_rate);
  631. clk_put(parent);
  632. }
  633. /*
  634. * Test that, for a mux that started orphan but got switched to a valid
  635. * parent, calling clk_set_rate_range() will affect the parent state if
  636. * its rate is out of range.
  637. */
  638. static void
  639. clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_modified(struct kunit *test)
  640. {
  641. struct clk_multiple_parent_ctx *ctx = test->priv;
  642. struct clk_hw *hw = &ctx->hw;
  643. struct clk *clk = clk_hw_get_clk(hw, NULL);
  644. struct clk *parent;
  645. unsigned long rate;
  646. int ret;
  647. parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
  648. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
  649. ret = clk_set_parent(clk, parent);
  650. KUNIT_ASSERT_EQ(test, ret, 0);
  651. ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
  652. KUNIT_ASSERT_EQ(test, ret, 0);
  653. rate = clk_get_rate(clk);
  654. KUNIT_ASSERT_GT(test, rate, 0);
  655. KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
  656. KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
  657. clk_put(parent);
  658. clk_put(clk);
  659. }
  660. /*
  661. * Test that, for a mux that started orphan but got switched to a valid
  662. * parent, calling clk_set_rate_range() won't affect the parent state if
  663. * its rate is within range.
  664. */
  665. static void
  666. clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_untouched(struct kunit *test)
  667. {
  668. struct clk_multiple_parent_ctx *ctx = test->priv;
  669. struct clk_hw *hw = &ctx->hw;
  670. struct clk *clk = clk_hw_get_clk(hw, NULL);
  671. struct clk *parent;
  672. unsigned long parent_rate, new_parent_rate;
  673. int ret;
  674. parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
  675. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
  676. parent_rate = clk_get_rate(parent);
  677. KUNIT_ASSERT_GT(test, parent_rate, 0);
  678. ret = clk_set_parent(clk, parent);
  679. KUNIT_ASSERT_EQ(test, ret, 0);
  680. ret = clk_set_rate_range(clk,
  681. DUMMY_CLOCK_INIT_RATE - 1000,
  682. DUMMY_CLOCK_INIT_RATE + 1000);
  683. KUNIT_ASSERT_EQ(test, ret, 0);
  684. new_parent_rate = clk_get_rate(parent);
  685. KUNIT_ASSERT_GT(test, new_parent_rate, 0);
  686. KUNIT_EXPECT_EQ(test, parent_rate, new_parent_rate);
  687. clk_put(parent);
  688. clk_put(clk);
  689. }
  690. /*
  691. * Test that, for a mux whose current parent hasn't been registered yet,
  692. * calling clk_set_rate_range() will succeed, and will be taken into
  693. * account when rounding a rate.
  694. */
  695. static void
  696. clk_test_orphan_transparent_multiple_parent_mux_set_range_round_rate(struct kunit *test)
  697. {
  698. struct clk_multiple_parent_ctx *ctx = test->priv;
  699. struct clk_hw *hw = &ctx->hw;
  700. struct clk *clk = clk_hw_get_clk(hw, NULL);
  701. unsigned long rate;
  702. int ret;
  703. ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
  704. KUNIT_ASSERT_EQ(test, ret, 0);
  705. rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
  706. KUNIT_ASSERT_GT(test, rate, 0);
  707. KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
  708. KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
  709. clk_put(clk);
  710. }
  711. /*
  712. * Test that, for a mux that started orphan, was assigned and rate and
  713. * then got switched to a valid parent, its rate is eventually within
  714. * range.
  715. *
  716. * FIXME: Even though we update the rate as part of clk_set_parent(), we
  717. * don't evaluate whether that new rate is within range and needs to be
  718. * adjusted.
  719. */
  720. static void
  721. clk_test_orphan_transparent_multiple_parent_mux_set_range_set_parent_get_rate(struct kunit *test)
  722. {
  723. struct clk_multiple_parent_ctx *ctx = test->priv;
  724. struct clk_hw *hw = &ctx->hw;
  725. struct clk *clk = clk_hw_get_clk(hw, NULL);
  726. struct clk *parent;
  727. unsigned long rate;
  728. int ret;
  729. kunit_skip(test, "This needs to be fixed in the core.");
  730. clk_hw_set_rate_range(hw, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
  731. parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
  732. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
  733. ret = clk_set_parent(clk, parent);
  734. KUNIT_ASSERT_EQ(test, ret, 0);
  735. rate = clk_get_rate(clk);
  736. KUNIT_ASSERT_GT(test, rate, 0);
  737. KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
  738. KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
  739. clk_put(parent);
  740. clk_put(clk);
  741. }
  742. static struct kunit_case clk_orphan_transparent_multiple_parent_mux_test_cases[] = {
  743. KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_get_parent),
  744. KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent),
  745. KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_drop_range),
  746. KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_get_rate),
  747. KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_put),
  748. KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_modified),
  749. KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_untouched),
  750. KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_range_round_rate),
  751. KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_range_set_parent_get_rate),
  752. {}
  753. };
  754. /*
  755. * Test suite for a basic mux clock with two parents. The default parent
  756. * isn't registered, only the second parent is. By default, the clock
  757. * will thus be orphan.
  758. *
  759. * These tests exercise the behaviour of the consumer API when dealing
  760. * with an orphan clock, and how we deal with the transition to a valid
  761. * parent.
  762. */
  763. static struct kunit_suite clk_orphan_transparent_multiple_parent_mux_test_suite = {
  764. .name = "clk-orphan-transparent-multiple-parent-mux-test",
  765. .init = clk_orphan_transparent_multiple_parent_mux_test_init,
  766. .exit = clk_orphan_transparent_multiple_parent_mux_test_exit,
  767. .test_cases = clk_orphan_transparent_multiple_parent_mux_test_cases,
  768. };
  769. struct clk_single_parent_ctx {
  770. struct clk_dummy_context parent_ctx;
  771. struct clk_hw hw;
  772. };
  773. static int clk_single_parent_mux_test_init(struct kunit *test)
  774. {
  775. struct clk_single_parent_ctx *ctx;
  776. int ret;
  777. ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
  778. if (!ctx)
  779. return -ENOMEM;
  780. test->priv = ctx;
  781. ctx->parent_ctx.rate = DUMMY_CLOCK_INIT_RATE;
  782. ctx->parent_ctx.hw.init =
  783. CLK_HW_INIT_NO_PARENT("parent-clk",
  784. &clk_dummy_rate_ops,
  785. 0);
  786. ret = clk_hw_register(NULL, &ctx->parent_ctx.hw);
  787. if (ret)
  788. return ret;
  789. ctx->hw.init = CLK_HW_INIT("test-clk", "parent-clk",
  790. &clk_dummy_single_parent_ops,
  791. CLK_SET_RATE_PARENT);
  792. ret = clk_hw_register(NULL, &ctx->hw);
  793. if (ret)
  794. return ret;
  795. return 0;
  796. }
  797. static void
  798. clk_single_parent_mux_test_exit(struct kunit *test)
  799. {
  800. struct clk_single_parent_ctx *ctx = test->priv;
  801. clk_hw_unregister(&ctx->hw);
  802. clk_hw_unregister(&ctx->parent_ctx.hw);
  803. }
  804. /*
  805. * Test that for a clock with a single parent, clk_get_parent() actually
  806. * returns the parent.
  807. */
  808. static void
  809. clk_test_single_parent_mux_get_parent(struct kunit *test)
  810. {
  811. struct clk_single_parent_ctx *ctx = test->priv;
  812. struct clk_hw *hw = &ctx->hw;
  813. struct clk *clk = clk_hw_get_clk(hw, NULL);
  814. struct clk *parent = clk_hw_get_clk(&ctx->parent_ctx.hw, NULL);
  815. KUNIT_EXPECT_TRUE(test, clk_is_match(clk_get_parent(clk), parent));
  816. clk_put(parent);
  817. clk_put(clk);
  818. }
  819. /*
  820. * Test that for a clock with a single parent, clk_has_parent() actually
  821. * reports it as a parent.
  822. */
  823. static void
  824. clk_test_single_parent_mux_has_parent(struct kunit *test)
  825. {
  826. struct clk_single_parent_ctx *ctx = test->priv;
  827. struct clk_hw *hw = &ctx->hw;
  828. struct clk *clk = clk_hw_get_clk(hw, NULL);
  829. struct clk *parent = clk_hw_get_clk(&ctx->parent_ctx.hw, NULL);
  830. KUNIT_EXPECT_TRUE(test, clk_has_parent(clk, parent));
  831. clk_put(parent);
  832. clk_put(clk);
  833. }
  834. /*
  835. * Test that for a clock that can't modify its rate and with a single
  836. * parent, if we set disjoints range on the parent and then the child,
  837. * the second will return an error.
  838. *
  839. * FIXME: clk_set_rate_range() only considers the current clock when
  840. * evaluating whether ranges are disjoints and not the upstream clocks
  841. * ranges.
  842. */
  843. static void
  844. clk_test_single_parent_mux_set_range_disjoint_child_last(struct kunit *test)
  845. {
  846. struct clk_single_parent_ctx *ctx = test->priv;
  847. struct clk_hw *hw = &ctx->hw;
  848. struct clk *clk = clk_hw_get_clk(hw, NULL);
  849. struct clk *parent;
  850. int ret;
  851. kunit_skip(test, "This needs to be fixed in the core.");
  852. parent = clk_get_parent(clk);
  853. KUNIT_ASSERT_PTR_NE(test, parent, NULL);
  854. ret = clk_set_rate_range(parent, 1000, 2000);
  855. KUNIT_ASSERT_EQ(test, ret, 0);
  856. ret = clk_set_rate_range(clk, 3000, 4000);
  857. KUNIT_EXPECT_LT(test, ret, 0);
  858. clk_put(clk);
  859. }
  860. /*
  861. * Test that for a clock that can't modify its rate and with a single
  862. * parent, if we set disjoints range on the child and then the parent,
  863. * the second will return an error.
  864. *
  865. * FIXME: clk_set_rate_range() only considers the current clock when
  866. * evaluating whether ranges are disjoints and not the downstream clocks
  867. * ranges.
  868. */
  869. static void
  870. clk_test_single_parent_mux_set_range_disjoint_parent_last(struct kunit *test)
  871. {
  872. struct clk_single_parent_ctx *ctx = test->priv;
  873. struct clk_hw *hw = &ctx->hw;
  874. struct clk *clk = clk_hw_get_clk(hw, NULL);
  875. struct clk *parent;
  876. int ret;
  877. kunit_skip(test, "This needs to be fixed in the core.");
  878. parent = clk_get_parent(clk);
  879. KUNIT_ASSERT_PTR_NE(test, parent, NULL);
  880. ret = clk_set_rate_range(clk, 1000, 2000);
  881. KUNIT_ASSERT_EQ(test, ret, 0);
  882. ret = clk_set_rate_range(parent, 3000, 4000);
  883. KUNIT_EXPECT_LT(test, ret, 0);
  884. clk_put(clk);
  885. }
  886. /*
  887. * Test that for a clock that can't modify its rate and with a single
  888. * parent, if we set a range on the parent and then call
  889. * clk_round_rate(), the boundaries of the parent are taken into
  890. * account.
  891. */
  892. static void
  893. clk_test_single_parent_mux_set_range_round_rate_parent_only(struct kunit *test)
  894. {
  895. struct clk_single_parent_ctx *ctx = test->priv;
  896. struct clk_hw *hw = &ctx->hw;
  897. struct clk *clk = clk_hw_get_clk(hw, NULL);
  898. struct clk *parent;
  899. unsigned long rate;
  900. int ret;
  901. parent = clk_get_parent(clk);
  902. KUNIT_ASSERT_PTR_NE(test, parent, NULL);
  903. ret = clk_set_rate_range(parent, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
  904. KUNIT_ASSERT_EQ(test, ret, 0);
  905. rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
  906. KUNIT_ASSERT_GT(test, rate, 0);
  907. KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
  908. KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
  909. clk_put(clk);
  910. }
  911. /*
  912. * Test that for a clock that can't modify its rate and with a single
  913. * parent, if we set a range on the parent and a more restrictive one on
  914. * the child, and then call clk_round_rate(), the boundaries of the
  915. * two clocks are taken into account.
  916. */
  917. static void
  918. clk_test_single_parent_mux_set_range_round_rate_child_smaller(struct kunit *test)
  919. {
  920. struct clk_single_parent_ctx *ctx = test->priv;
  921. struct clk_hw *hw = &ctx->hw;
  922. struct clk *clk = clk_hw_get_clk(hw, NULL);
  923. struct clk *parent;
  924. unsigned long rate;
  925. int ret;
  926. parent = clk_get_parent(clk);
  927. KUNIT_ASSERT_PTR_NE(test, parent, NULL);
  928. ret = clk_set_rate_range(parent, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
  929. KUNIT_ASSERT_EQ(test, ret, 0);
  930. ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1 + 1000, DUMMY_CLOCK_RATE_2 - 1000);
  931. KUNIT_ASSERT_EQ(test, ret, 0);
  932. rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
  933. KUNIT_ASSERT_GT(test, rate, 0);
  934. KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
  935. KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
  936. rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
  937. KUNIT_ASSERT_GT(test, rate, 0);
  938. KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
  939. KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
  940. clk_put(clk);
  941. }
  942. /*
  943. * Test that for a clock that can't modify its rate and with a single
  944. * parent, if we set a range on the child and a more restrictive one on
  945. * the parent, and then call clk_round_rate(), the boundaries of the
  946. * two clocks are taken into account.
  947. */
  948. static void
  949. clk_test_single_parent_mux_set_range_round_rate_parent_smaller(struct kunit *test)
  950. {
  951. struct clk_single_parent_ctx *ctx = test->priv;
  952. struct clk_hw *hw = &ctx->hw;
  953. struct clk *clk = clk_hw_get_clk(hw, NULL);
  954. struct clk *parent;
  955. unsigned long rate;
  956. int ret;
  957. parent = clk_get_parent(clk);
  958. KUNIT_ASSERT_PTR_NE(test, parent, NULL);
  959. ret = clk_set_rate_range(parent, DUMMY_CLOCK_RATE_1 + 1000, DUMMY_CLOCK_RATE_2 - 1000);
  960. KUNIT_ASSERT_EQ(test, ret, 0);
  961. ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
  962. KUNIT_ASSERT_EQ(test, ret, 0);
  963. rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
  964. KUNIT_ASSERT_GT(test, rate, 0);
  965. KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
  966. KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
  967. rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
  968. KUNIT_ASSERT_GT(test, rate, 0);
  969. KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
  970. KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
  971. clk_put(clk);
  972. }
  973. static struct kunit_case clk_single_parent_mux_test_cases[] = {
  974. KUNIT_CASE(clk_test_single_parent_mux_get_parent),
  975. KUNIT_CASE(clk_test_single_parent_mux_has_parent),
  976. KUNIT_CASE(clk_test_single_parent_mux_set_range_disjoint_child_last),
  977. KUNIT_CASE(clk_test_single_parent_mux_set_range_disjoint_parent_last),
  978. KUNIT_CASE(clk_test_single_parent_mux_set_range_round_rate_child_smaller),
  979. KUNIT_CASE(clk_test_single_parent_mux_set_range_round_rate_parent_only),
  980. KUNIT_CASE(clk_test_single_parent_mux_set_range_round_rate_parent_smaller),
  981. {}
  982. };
  983. /*
  984. * Test suite for a basic mux clock with one parent, with
  985. * CLK_SET_RATE_PARENT on the child.
  986. *
  987. * These tests exercise the consumer API and check that the state of the
  988. * child and parent are sane and consistent.
  989. */
  990. static struct kunit_suite
  991. clk_single_parent_mux_test_suite = {
  992. .name = "clk-single-parent-mux-test",
  993. .init = clk_single_parent_mux_test_init,
  994. .exit = clk_single_parent_mux_test_exit,
  995. .test_cases = clk_single_parent_mux_test_cases,
  996. };
  997. static int clk_orphan_transparent_single_parent_mux_test_init(struct kunit *test)
  998. {
  999. struct clk_single_parent_ctx *ctx;
  1000. struct clk_init_data init = { };
  1001. const char * const parents[] = { "orphan_parent" };
  1002. int ret;
  1003. ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
  1004. if (!ctx)
  1005. return -ENOMEM;
  1006. test->priv = ctx;
  1007. init.name = "test_orphan_dummy_parent";
  1008. init.ops = &clk_dummy_single_parent_ops;
  1009. init.parent_names = parents;
  1010. init.num_parents = ARRAY_SIZE(parents);
  1011. init.flags = CLK_SET_RATE_PARENT;
  1012. ctx->hw.init = &init;
  1013. ret = clk_hw_register(NULL, &ctx->hw);
  1014. if (ret)
  1015. return ret;
  1016. memset(&init, 0, sizeof(init));
  1017. init.name = "orphan_parent";
  1018. init.ops = &clk_dummy_rate_ops;
  1019. ctx->parent_ctx.hw.init = &init;
  1020. ctx->parent_ctx.rate = DUMMY_CLOCK_INIT_RATE;
  1021. ret = clk_hw_register(NULL, &ctx->parent_ctx.hw);
  1022. if (ret)
  1023. return ret;
  1024. return 0;
  1025. }
  1026. /*
  1027. * Test that a mux-only clock, with an initial rate within a range,
  1028. * will still have the same rate after the range has been enforced.
  1029. *
  1030. * See:
  1031. * https://lore.kernel.org/linux-clk/[email protected]/
  1032. */
  1033. static void clk_test_orphan_transparent_parent_mux_set_range(struct kunit *test)
  1034. {
  1035. struct clk_single_parent_ctx *ctx = test->priv;
  1036. struct clk_hw *hw = &ctx->hw;
  1037. struct clk *clk = clk_hw_get_clk(hw, NULL);
  1038. unsigned long rate, new_rate;
  1039. rate = clk_get_rate(clk);
  1040. KUNIT_ASSERT_GT(test, rate, 0);
  1041. KUNIT_ASSERT_EQ(test,
  1042. clk_set_rate_range(clk,
  1043. ctx->parent_ctx.rate - 1000,
  1044. ctx->parent_ctx.rate + 1000),
  1045. 0);
  1046. new_rate = clk_get_rate(clk);
  1047. KUNIT_ASSERT_GT(test, new_rate, 0);
  1048. KUNIT_EXPECT_EQ(test, rate, new_rate);
  1049. clk_put(clk);
  1050. }
  1051. static struct kunit_case clk_orphan_transparent_single_parent_mux_test_cases[] = {
  1052. KUNIT_CASE(clk_test_orphan_transparent_parent_mux_set_range),
  1053. {}
  1054. };
  1055. /*
  1056. * Test suite for a basic mux clock with one parent. The parent is
  1057. * registered after its child. The clock will thus be an orphan when
  1058. * registered, but will no longer be when the tests run.
  1059. *
  1060. * These tests make sure a clock that used to be orphan has a sane,
  1061. * consistent, behaviour.
  1062. */
  1063. static struct kunit_suite clk_orphan_transparent_single_parent_test_suite = {
  1064. .name = "clk-orphan-transparent-single-parent-test",
  1065. .init = clk_orphan_transparent_single_parent_mux_test_init,
  1066. .exit = clk_single_parent_mux_test_exit,
  1067. .test_cases = clk_orphan_transparent_single_parent_mux_test_cases,
  1068. };
  1069. struct clk_single_parent_two_lvl_ctx {
  1070. struct clk_dummy_context parent_parent_ctx;
  1071. struct clk_dummy_context parent_ctx;
  1072. struct clk_hw hw;
  1073. };
  1074. static int
  1075. clk_orphan_two_level_root_last_test_init(struct kunit *test)
  1076. {
  1077. struct clk_single_parent_two_lvl_ctx *ctx;
  1078. int ret;
  1079. ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
  1080. if (!ctx)
  1081. return -ENOMEM;
  1082. test->priv = ctx;
  1083. ctx->parent_ctx.hw.init =
  1084. CLK_HW_INIT("intermediate-parent",
  1085. "root-parent",
  1086. &clk_dummy_single_parent_ops,
  1087. CLK_SET_RATE_PARENT);
  1088. ret = clk_hw_register(NULL, &ctx->parent_ctx.hw);
  1089. if (ret)
  1090. return ret;
  1091. ctx->hw.init =
  1092. CLK_HW_INIT("test-clk", "intermediate-parent",
  1093. &clk_dummy_single_parent_ops,
  1094. CLK_SET_RATE_PARENT);
  1095. ret = clk_hw_register(NULL, &ctx->hw);
  1096. if (ret)
  1097. return ret;
  1098. ctx->parent_parent_ctx.rate = DUMMY_CLOCK_INIT_RATE;
  1099. ctx->parent_parent_ctx.hw.init =
  1100. CLK_HW_INIT_NO_PARENT("root-parent",
  1101. &clk_dummy_rate_ops,
  1102. 0);
  1103. ret = clk_hw_register(NULL, &ctx->parent_parent_ctx.hw);
  1104. if (ret)
  1105. return ret;
  1106. return 0;
  1107. }
  1108. static void
  1109. clk_orphan_two_level_root_last_test_exit(struct kunit *test)
  1110. {
  1111. struct clk_single_parent_two_lvl_ctx *ctx = test->priv;
  1112. clk_hw_unregister(&ctx->hw);
  1113. clk_hw_unregister(&ctx->parent_ctx.hw);
  1114. clk_hw_unregister(&ctx->parent_parent_ctx.hw);
  1115. }
  1116. /*
  1117. * Test that, for a clock whose parent used to be orphan, clk_get_rate()
  1118. * will return the proper rate.
  1119. */
  1120. static void
  1121. clk_orphan_two_level_root_last_test_get_rate(struct kunit *test)
  1122. {
  1123. struct clk_single_parent_two_lvl_ctx *ctx = test->priv;
  1124. struct clk_hw *hw = &ctx->hw;
  1125. struct clk *clk = clk_hw_get_clk(hw, NULL);
  1126. unsigned long rate;
  1127. rate = clk_get_rate(clk);
  1128. KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_INIT_RATE);
  1129. clk_put(clk);
  1130. }
  1131. /*
  1132. * Test that, for a clock whose parent used to be orphan,
  1133. * clk_set_rate_range() won't affect its rate if it is already within
  1134. * range.
  1135. *
  1136. * See (for Exynos 4210):
  1137. * https://lore.kernel.org/linux-clk/[email protected]/
  1138. */
  1139. static void
  1140. clk_orphan_two_level_root_last_test_set_range(struct kunit *test)
  1141. {
  1142. struct clk_single_parent_two_lvl_ctx *ctx = test->priv;
  1143. struct clk_hw *hw = &ctx->hw;
  1144. struct clk *clk = clk_hw_get_clk(hw, NULL);
  1145. unsigned long rate;
  1146. int ret;
  1147. ret = clk_set_rate_range(clk,
  1148. DUMMY_CLOCK_INIT_RATE - 1000,
  1149. DUMMY_CLOCK_INIT_RATE + 1000);
  1150. KUNIT_ASSERT_EQ(test, ret, 0);
  1151. rate = clk_get_rate(clk);
  1152. KUNIT_ASSERT_GT(test, rate, 0);
  1153. KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_INIT_RATE);
  1154. clk_put(clk);
  1155. }
  1156. static struct kunit_case
  1157. clk_orphan_two_level_root_last_test_cases[] = {
  1158. KUNIT_CASE(clk_orphan_two_level_root_last_test_get_rate),
  1159. KUNIT_CASE(clk_orphan_two_level_root_last_test_set_range),
  1160. {}
  1161. };
  1162. /*
  1163. * Test suite for a basic, transparent, clock with a parent that is also
  1164. * such a clock. The parent's parent is registered last, while the
  1165. * parent and its child are registered in that order. The intermediate
  1166. * and leaf clocks will thus be orphan when registered, but the leaf
  1167. * clock itself will always have its parent and will never be
  1168. * reparented. Indeed, it's only orphan because its parent is.
  1169. *
  1170. * These tests exercise the behaviour of the consumer API when dealing
  1171. * with an orphan clock, and how we deal with the transition to a valid
  1172. * parent.
  1173. */
  1174. static struct kunit_suite
  1175. clk_orphan_two_level_root_last_test_suite = {
  1176. .name = "clk-orphan-two-level-root-last-test",
  1177. .init = clk_orphan_two_level_root_last_test_init,
  1178. .exit = clk_orphan_two_level_root_last_test_exit,
  1179. .test_cases = clk_orphan_two_level_root_last_test_cases,
  1180. };
  1181. /*
  1182. * Test that clk_set_rate_range won't return an error for a valid range
  1183. * and that it will make sure the rate of the clock is within the
  1184. * boundaries.
  1185. */
  1186. static void clk_range_test_set_range(struct kunit *test)
  1187. {
  1188. struct clk_dummy_context *ctx = test->priv;
  1189. struct clk_hw *hw = &ctx->hw;
  1190. struct clk *clk = clk_hw_get_clk(hw, NULL);
  1191. unsigned long rate;
  1192. KUNIT_ASSERT_EQ(test,
  1193. clk_set_rate_range(clk,
  1194. DUMMY_CLOCK_RATE_1,
  1195. DUMMY_CLOCK_RATE_2),
  1196. 0);
  1197. rate = clk_get_rate(clk);
  1198. KUNIT_ASSERT_GT(test, rate, 0);
  1199. KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
  1200. KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
  1201. clk_put(clk);
  1202. }
  1203. /*
  1204. * Test that calling clk_set_rate_range with a minimum rate higher than
  1205. * the maximum rate returns an error.
  1206. */
  1207. static void clk_range_test_set_range_invalid(struct kunit *test)
  1208. {
  1209. struct clk_dummy_context *ctx = test->priv;
  1210. struct clk_hw *hw = &ctx->hw;
  1211. struct clk *clk = clk_hw_get_clk(hw, NULL);
  1212. KUNIT_EXPECT_LT(test,
  1213. clk_set_rate_range(clk,
  1214. DUMMY_CLOCK_RATE_1 + 1000,
  1215. DUMMY_CLOCK_RATE_1),
  1216. 0);
  1217. clk_put(clk);
  1218. }
  1219. /*
  1220. * Test that users can't set multiple, disjoints, range that would be
  1221. * impossible to meet.
  1222. */
  1223. static void clk_range_test_multiple_disjoints_range(struct kunit *test)
  1224. {
  1225. struct clk_dummy_context *ctx = test->priv;
  1226. struct clk_hw *hw = &ctx->hw;
  1227. struct clk *user1, *user2;
  1228. user1 = clk_hw_get_clk(hw, NULL);
  1229. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
  1230. user2 = clk_hw_get_clk(hw, NULL);
  1231. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
  1232. KUNIT_ASSERT_EQ(test,
  1233. clk_set_rate_range(user1, 1000, 2000),
  1234. 0);
  1235. KUNIT_EXPECT_LT(test,
  1236. clk_set_rate_range(user2, 3000, 4000),
  1237. 0);
  1238. clk_put(user2);
  1239. clk_put(user1);
  1240. }
  1241. /*
  1242. * Test that if our clock has some boundaries and we try to round a rate
  1243. * lower than the minimum, the returned rate will be within range.
  1244. */
  1245. static void clk_range_test_set_range_round_rate_lower(struct kunit *test)
  1246. {
  1247. struct clk_dummy_context *ctx = test->priv;
  1248. struct clk_hw *hw = &ctx->hw;
  1249. struct clk *clk = clk_hw_get_clk(hw, NULL);
  1250. long rate;
  1251. KUNIT_ASSERT_EQ(test,
  1252. clk_set_rate_range(clk,
  1253. DUMMY_CLOCK_RATE_1,
  1254. DUMMY_CLOCK_RATE_2),
  1255. 0);
  1256. rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
  1257. KUNIT_ASSERT_GT(test, rate, 0);
  1258. KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
  1259. KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
  1260. clk_put(clk);
  1261. }
  1262. /*
  1263. * Test that if our clock has some boundaries and we try to set a rate
  1264. * higher than the maximum, the new rate will be within range.
  1265. */
  1266. static void clk_range_test_set_range_set_rate_lower(struct kunit *test)
  1267. {
  1268. struct clk_dummy_context *ctx = test->priv;
  1269. struct clk_hw *hw = &ctx->hw;
  1270. struct clk *clk = clk_hw_get_clk(hw, NULL);
  1271. unsigned long rate;
  1272. KUNIT_ASSERT_EQ(test,
  1273. clk_set_rate_range(clk,
  1274. DUMMY_CLOCK_RATE_1,
  1275. DUMMY_CLOCK_RATE_2),
  1276. 0);
  1277. KUNIT_ASSERT_EQ(test,
  1278. clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
  1279. 0);
  1280. rate = clk_get_rate(clk);
  1281. KUNIT_ASSERT_GT(test, rate, 0);
  1282. KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
  1283. KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
  1284. clk_put(clk);
  1285. }
  1286. /*
  1287. * Test that if our clock has some boundaries and we try to round and
  1288. * set a rate lower than the minimum, the rate returned by
  1289. * clk_round_rate() will be consistent with the new rate set by
  1290. * clk_set_rate().
  1291. */
  1292. static void clk_range_test_set_range_set_round_rate_consistent_lower(struct kunit *test)
  1293. {
  1294. struct clk_dummy_context *ctx = test->priv;
  1295. struct clk_hw *hw = &ctx->hw;
  1296. struct clk *clk = clk_hw_get_clk(hw, NULL);
  1297. long rounded;
  1298. KUNIT_ASSERT_EQ(test,
  1299. clk_set_rate_range(clk,
  1300. DUMMY_CLOCK_RATE_1,
  1301. DUMMY_CLOCK_RATE_2),
  1302. 0);
  1303. rounded = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
  1304. KUNIT_ASSERT_GT(test, rounded, 0);
  1305. KUNIT_ASSERT_EQ(test,
  1306. clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
  1307. 0);
  1308. KUNIT_EXPECT_EQ(test, rounded, clk_get_rate(clk));
  1309. clk_put(clk);
  1310. }
  1311. /*
  1312. * Test that if our clock has some boundaries and we try to round a rate
  1313. * higher than the maximum, the returned rate will be within range.
  1314. */
  1315. static void clk_range_test_set_range_round_rate_higher(struct kunit *test)
  1316. {
  1317. struct clk_dummy_context *ctx = test->priv;
  1318. struct clk_hw *hw = &ctx->hw;
  1319. struct clk *clk = clk_hw_get_clk(hw, NULL);
  1320. long rate;
  1321. KUNIT_ASSERT_EQ(test,
  1322. clk_set_rate_range(clk,
  1323. DUMMY_CLOCK_RATE_1,
  1324. DUMMY_CLOCK_RATE_2),
  1325. 0);
  1326. rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
  1327. KUNIT_ASSERT_GT(test, rate, 0);
  1328. KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
  1329. KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
  1330. clk_put(clk);
  1331. }
  1332. /*
  1333. * Test that if our clock has some boundaries and we try to set a rate
  1334. * higher than the maximum, the new rate will be within range.
  1335. */
  1336. static void clk_range_test_set_range_set_rate_higher(struct kunit *test)
  1337. {
  1338. struct clk_dummy_context *ctx = test->priv;
  1339. struct clk_hw *hw = &ctx->hw;
  1340. struct clk *clk = clk_hw_get_clk(hw, NULL);
  1341. unsigned long rate;
  1342. KUNIT_ASSERT_EQ(test,
  1343. clk_set_rate_range(clk,
  1344. DUMMY_CLOCK_RATE_1,
  1345. DUMMY_CLOCK_RATE_2),
  1346. 0);
  1347. KUNIT_ASSERT_EQ(test,
  1348. clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
  1349. 0);
  1350. rate = clk_get_rate(clk);
  1351. KUNIT_ASSERT_GT(test, rate, 0);
  1352. KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
  1353. KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
  1354. clk_put(clk);
  1355. }
  1356. /*
  1357. * Test that if our clock has some boundaries and we try to round and
  1358. * set a rate higher than the maximum, the rate returned by
  1359. * clk_round_rate() will be consistent with the new rate set by
  1360. * clk_set_rate().
  1361. */
  1362. static void clk_range_test_set_range_set_round_rate_consistent_higher(struct kunit *test)
  1363. {
  1364. struct clk_dummy_context *ctx = test->priv;
  1365. struct clk_hw *hw = &ctx->hw;
  1366. struct clk *clk = clk_hw_get_clk(hw, NULL);
  1367. long rounded;
  1368. KUNIT_ASSERT_EQ(test,
  1369. clk_set_rate_range(clk,
  1370. DUMMY_CLOCK_RATE_1,
  1371. DUMMY_CLOCK_RATE_2),
  1372. 0);
  1373. rounded = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
  1374. KUNIT_ASSERT_GT(test, rounded, 0);
  1375. KUNIT_ASSERT_EQ(test,
  1376. clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
  1377. 0);
  1378. KUNIT_EXPECT_EQ(test, rounded, clk_get_rate(clk));
  1379. clk_put(clk);
  1380. }
  1381. /*
  1382. * Test that if our clock has a rate lower than the minimum set by a
  1383. * call to clk_set_rate_range(), the rate will be raised to match the
  1384. * new minimum.
  1385. *
  1386. * This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
  1387. * modify the requested rate, which is our case in clk_dummy_rate_ops.
  1388. */
  1389. static void clk_range_test_set_range_get_rate_raised(struct kunit *test)
  1390. {
  1391. struct clk_dummy_context *ctx = test->priv;
  1392. struct clk_hw *hw = &ctx->hw;
  1393. struct clk *clk = clk_hw_get_clk(hw, NULL);
  1394. unsigned long rate;
  1395. KUNIT_ASSERT_EQ(test,
  1396. clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
  1397. 0);
  1398. KUNIT_ASSERT_EQ(test,
  1399. clk_set_rate_range(clk,
  1400. DUMMY_CLOCK_RATE_1,
  1401. DUMMY_CLOCK_RATE_2),
  1402. 0);
  1403. rate = clk_get_rate(clk);
  1404. KUNIT_ASSERT_GT(test, rate, 0);
  1405. KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
  1406. clk_put(clk);
  1407. }
  1408. /*
  1409. * Test that if our clock has a rate higher than the maximum set by a
  1410. * call to clk_set_rate_range(), the rate will be lowered to match the
  1411. * new maximum.
  1412. *
  1413. * This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
  1414. * modify the requested rate, which is our case in clk_dummy_rate_ops.
  1415. */
  1416. static void clk_range_test_set_range_get_rate_lowered(struct kunit *test)
  1417. {
  1418. struct clk_dummy_context *ctx = test->priv;
  1419. struct clk_hw *hw = &ctx->hw;
  1420. struct clk *clk = clk_hw_get_clk(hw, NULL);
  1421. unsigned long rate;
  1422. KUNIT_ASSERT_EQ(test,
  1423. clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
  1424. 0);
  1425. KUNIT_ASSERT_EQ(test,
  1426. clk_set_rate_range(clk,
  1427. DUMMY_CLOCK_RATE_1,
  1428. DUMMY_CLOCK_RATE_2),
  1429. 0);
  1430. rate = clk_get_rate(clk);
  1431. KUNIT_ASSERT_GT(test, rate, 0);
  1432. KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
  1433. clk_put(clk);
  1434. }
  1435. static struct kunit_case clk_range_test_cases[] = {
  1436. KUNIT_CASE(clk_range_test_set_range),
  1437. KUNIT_CASE(clk_range_test_set_range_invalid),
  1438. KUNIT_CASE(clk_range_test_multiple_disjoints_range),
  1439. KUNIT_CASE(clk_range_test_set_range_round_rate_lower),
  1440. KUNIT_CASE(clk_range_test_set_range_set_rate_lower),
  1441. KUNIT_CASE(clk_range_test_set_range_set_round_rate_consistent_lower),
  1442. KUNIT_CASE(clk_range_test_set_range_round_rate_higher),
  1443. KUNIT_CASE(clk_range_test_set_range_set_rate_higher),
  1444. KUNIT_CASE(clk_range_test_set_range_set_round_rate_consistent_higher),
  1445. KUNIT_CASE(clk_range_test_set_range_get_rate_raised),
  1446. KUNIT_CASE(clk_range_test_set_range_get_rate_lowered),
  1447. {}
  1448. };
  1449. /*
  1450. * Test suite for a basic rate clock, without any parent.
  1451. *
  1452. * These tests exercise the rate range API: clk_set_rate_range(),
  1453. * clk_set_min_rate(), clk_set_max_rate(), clk_drop_range().
  1454. */
  1455. static struct kunit_suite clk_range_test_suite = {
  1456. .name = "clk-range-test",
  1457. .init = clk_test_init,
  1458. .exit = clk_test_exit,
  1459. .test_cases = clk_range_test_cases,
  1460. };
  1461. /*
  1462. * Test that if we have several subsequent calls to
  1463. * clk_set_rate_range(), the core will reevaluate whether a new rate is
  1464. * needed each and every time.
  1465. *
  1466. * With clk_dummy_maximize_rate_ops, this means that the rate will
  1467. * trail along the maximum as it evolves.
  1468. */
  1469. static void clk_range_test_set_range_rate_maximized(struct kunit *test)
  1470. {
  1471. struct clk_dummy_context *ctx = test->priv;
  1472. struct clk_hw *hw = &ctx->hw;
  1473. struct clk *clk = clk_hw_get_clk(hw, NULL);
  1474. unsigned long rate;
  1475. KUNIT_ASSERT_EQ(test,
  1476. clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
  1477. 0);
  1478. KUNIT_ASSERT_EQ(test,
  1479. clk_set_rate_range(clk,
  1480. DUMMY_CLOCK_RATE_1,
  1481. DUMMY_CLOCK_RATE_2),
  1482. 0);
  1483. rate = clk_get_rate(clk);
  1484. KUNIT_ASSERT_GT(test, rate, 0);
  1485. KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
  1486. KUNIT_ASSERT_EQ(test,
  1487. clk_set_rate_range(clk,
  1488. DUMMY_CLOCK_RATE_1,
  1489. DUMMY_CLOCK_RATE_2 - 1000),
  1490. 0);
  1491. rate = clk_get_rate(clk);
  1492. KUNIT_ASSERT_GT(test, rate, 0);
  1493. KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
  1494. KUNIT_ASSERT_EQ(test,
  1495. clk_set_rate_range(clk,
  1496. DUMMY_CLOCK_RATE_1,
  1497. DUMMY_CLOCK_RATE_2),
  1498. 0);
  1499. rate = clk_get_rate(clk);
  1500. KUNIT_ASSERT_GT(test, rate, 0);
  1501. KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
  1502. clk_put(clk);
  1503. }
  1504. /*
  1505. * Test that if we have several subsequent calls to
  1506. * clk_set_rate_range(), across multiple users, the core will reevaluate
  1507. * whether a new rate is needed each and every time.
  1508. *
  1509. * With clk_dummy_maximize_rate_ops, this means that the rate will
  1510. * trail along the maximum as it evolves.
  1511. */
  1512. static void clk_range_test_multiple_set_range_rate_maximized(struct kunit *test)
  1513. {
  1514. struct clk_dummy_context *ctx = test->priv;
  1515. struct clk_hw *hw = &ctx->hw;
  1516. struct clk *clk = clk_hw_get_clk(hw, NULL);
  1517. struct clk *user1, *user2;
  1518. unsigned long rate;
  1519. user1 = clk_hw_get_clk(hw, NULL);
  1520. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
  1521. user2 = clk_hw_get_clk(hw, NULL);
  1522. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
  1523. KUNIT_ASSERT_EQ(test,
  1524. clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
  1525. 0);
  1526. KUNIT_ASSERT_EQ(test,
  1527. clk_set_rate_range(user1,
  1528. 0,
  1529. DUMMY_CLOCK_RATE_2),
  1530. 0);
  1531. rate = clk_get_rate(clk);
  1532. KUNIT_ASSERT_GT(test, rate, 0);
  1533. KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
  1534. KUNIT_ASSERT_EQ(test,
  1535. clk_set_rate_range(user2,
  1536. 0,
  1537. DUMMY_CLOCK_RATE_1),
  1538. 0);
  1539. rate = clk_get_rate(clk);
  1540. KUNIT_ASSERT_GT(test, rate, 0);
  1541. KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
  1542. KUNIT_ASSERT_EQ(test,
  1543. clk_drop_range(user2),
  1544. 0);
  1545. rate = clk_get_rate(clk);
  1546. KUNIT_ASSERT_GT(test, rate, 0);
  1547. KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
  1548. clk_put(user2);
  1549. clk_put(user1);
  1550. clk_put(clk);
  1551. }
  1552. /*
  1553. * Test that if we have several subsequent calls to
  1554. * clk_set_rate_range(), across multiple users, the core will reevaluate
  1555. * whether a new rate is needed, including when a user drop its clock.
  1556. *
  1557. * With clk_dummy_maximize_rate_ops, this means that the rate will
  1558. * trail along the maximum as it evolves.
  1559. */
  1560. static void clk_range_test_multiple_set_range_rate_put_maximized(struct kunit *test)
  1561. {
  1562. struct clk_dummy_context *ctx = test->priv;
  1563. struct clk_hw *hw = &ctx->hw;
  1564. struct clk *clk = clk_hw_get_clk(hw, NULL);
  1565. struct clk *user1, *user2;
  1566. unsigned long rate;
  1567. user1 = clk_hw_get_clk(hw, NULL);
  1568. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
  1569. user2 = clk_hw_get_clk(hw, NULL);
  1570. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
  1571. KUNIT_ASSERT_EQ(test,
  1572. clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
  1573. 0);
  1574. KUNIT_ASSERT_EQ(test,
  1575. clk_set_rate_range(user1,
  1576. 0,
  1577. DUMMY_CLOCK_RATE_2),
  1578. 0);
  1579. rate = clk_get_rate(clk);
  1580. KUNIT_ASSERT_GT(test, rate, 0);
  1581. KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
  1582. KUNIT_ASSERT_EQ(test,
  1583. clk_set_rate_range(user2,
  1584. 0,
  1585. DUMMY_CLOCK_RATE_1),
  1586. 0);
  1587. rate = clk_get_rate(clk);
  1588. KUNIT_ASSERT_GT(test, rate, 0);
  1589. KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
  1590. clk_put(user2);
  1591. rate = clk_get_rate(clk);
  1592. KUNIT_ASSERT_GT(test, rate, 0);
  1593. KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
  1594. clk_put(user1);
  1595. clk_put(clk);
  1596. }
  1597. static struct kunit_case clk_range_maximize_test_cases[] = {
  1598. KUNIT_CASE(clk_range_test_set_range_rate_maximized),
  1599. KUNIT_CASE(clk_range_test_multiple_set_range_rate_maximized),
  1600. KUNIT_CASE(clk_range_test_multiple_set_range_rate_put_maximized),
  1601. {}
  1602. };
  1603. /*
  1604. * Test suite for a basic rate clock, without any parent.
  1605. *
  1606. * These tests exercise the rate range API: clk_set_rate_range(),
  1607. * clk_set_min_rate(), clk_set_max_rate(), clk_drop_range(), with a
  1608. * driver that will always try to run at the highest possible rate.
  1609. */
  1610. static struct kunit_suite clk_range_maximize_test_suite = {
  1611. .name = "clk-range-maximize-test",
  1612. .init = clk_maximize_test_init,
  1613. .exit = clk_test_exit,
  1614. .test_cases = clk_range_maximize_test_cases,
  1615. };
  1616. /*
  1617. * Test that if we have several subsequent calls to
  1618. * clk_set_rate_range(), the core will reevaluate whether a new rate is
  1619. * needed each and every time.
  1620. *
  1621. * With clk_dummy_minimize_rate_ops, this means that the rate will
  1622. * trail along the minimum as it evolves.
  1623. */
  1624. static void clk_range_test_set_range_rate_minimized(struct kunit *test)
  1625. {
  1626. struct clk_dummy_context *ctx = test->priv;
  1627. struct clk_hw *hw = &ctx->hw;
  1628. struct clk *clk = clk_hw_get_clk(hw, NULL);
  1629. unsigned long rate;
  1630. KUNIT_ASSERT_EQ(test,
  1631. clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
  1632. 0);
  1633. KUNIT_ASSERT_EQ(test,
  1634. clk_set_rate_range(clk,
  1635. DUMMY_CLOCK_RATE_1,
  1636. DUMMY_CLOCK_RATE_2),
  1637. 0);
  1638. rate = clk_get_rate(clk);
  1639. KUNIT_ASSERT_GT(test, rate, 0);
  1640. KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
  1641. KUNIT_ASSERT_EQ(test,
  1642. clk_set_rate_range(clk,
  1643. DUMMY_CLOCK_RATE_1 + 1000,
  1644. DUMMY_CLOCK_RATE_2),
  1645. 0);
  1646. rate = clk_get_rate(clk);
  1647. KUNIT_ASSERT_GT(test, rate, 0);
  1648. KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
  1649. KUNIT_ASSERT_EQ(test,
  1650. clk_set_rate_range(clk,
  1651. DUMMY_CLOCK_RATE_1,
  1652. DUMMY_CLOCK_RATE_2),
  1653. 0);
  1654. rate = clk_get_rate(clk);
  1655. KUNIT_ASSERT_GT(test, rate, 0);
  1656. KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
  1657. clk_put(clk);
  1658. }
  1659. /*
  1660. * Test that if we have several subsequent calls to
  1661. * clk_set_rate_range(), across multiple users, the core will reevaluate
  1662. * whether a new rate is needed each and every time.
  1663. *
  1664. * With clk_dummy_minimize_rate_ops, this means that the rate will
  1665. * trail along the minimum as it evolves.
  1666. */
  1667. static void clk_range_test_multiple_set_range_rate_minimized(struct kunit *test)
  1668. {
  1669. struct clk_dummy_context *ctx = test->priv;
  1670. struct clk_hw *hw = &ctx->hw;
  1671. struct clk *clk = clk_hw_get_clk(hw, NULL);
  1672. struct clk *user1, *user2;
  1673. unsigned long rate;
  1674. user1 = clk_hw_get_clk(hw, NULL);
  1675. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
  1676. user2 = clk_hw_get_clk(hw, NULL);
  1677. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
  1678. KUNIT_ASSERT_EQ(test,
  1679. clk_set_rate_range(user1,
  1680. DUMMY_CLOCK_RATE_1,
  1681. ULONG_MAX),
  1682. 0);
  1683. rate = clk_get_rate(clk);
  1684. KUNIT_ASSERT_GT(test, rate, 0);
  1685. KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
  1686. KUNIT_ASSERT_EQ(test,
  1687. clk_set_rate_range(user2,
  1688. DUMMY_CLOCK_RATE_2,
  1689. ULONG_MAX),
  1690. 0);
  1691. rate = clk_get_rate(clk);
  1692. KUNIT_ASSERT_GT(test, rate, 0);
  1693. KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
  1694. KUNIT_ASSERT_EQ(test,
  1695. clk_drop_range(user2),
  1696. 0);
  1697. rate = clk_get_rate(clk);
  1698. KUNIT_ASSERT_GT(test, rate, 0);
  1699. KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
  1700. clk_put(user2);
  1701. clk_put(user1);
  1702. clk_put(clk);
  1703. }
  1704. /*
  1705. * Test that if we have several subsequent calls to
  1706. * clk_set_rate_range(), across multiple users, the core will reevaluate
  1707. * whether a new rate is needed, including when a user drop its clock.
  1708. *
  1709. * With clk_dummy_minimize_rate_ops, this means that the rate will
  1710. * trail along the minimum as it evolves.
  1711. */
  1712. static void clk_range_test_multiple_set_range_rate_put_minimized(struct kunit *test)
  1713. {
  1714. struct clk_dummy_context *ctx = test->priv;
  1715. struct clk_hw *hw = &ctx->hw;
  1716. struct clk *clk = clk_hw_get_clk(hw, NULL);
  1717. struct clk *user1, *user2;
  1718. unsigned long rate;
  1719. user1 = clk_hw_get_clk(hw, NULL);
  1720. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
  1721. user2 = clk_hw_get_clk(hw, NULL);
  1722. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
  1723. KUNIT_ASSERT_EQ(test,
  1724. clk_set_rate_range(user1,
  1725. DUMMY_CLOCK_RATE_1,
  1726. ULONG_MAX),
  1727. 0);
  1728. rate = clk_get_rate(clk);
  1729. KUNIT_ASSERT_GT(test, rate, 0);
  1730. KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
  1731. KUNIT_ASSERT_EQ(test,
  1732. clk_set_rate_range(user2,
  1733. DUMMY_CLOCK_RATE_2,
  1734. ULONG_MAX),
  1735. 0);
  1736. rate = clk_get_rate(clk);
  1737. KUNIT_ASSERT_GT(test, rate, 0);
  1738. KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
  1739. clk_put(user2);
  1740. rate = clk_get_rate(clk);
  1741. KUNIT_ASSERT_GT(test, rate, 0);
  1742. KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
  1743. clk_put(user1);
  1744. clk_put(clk);
  1745. }
  1746. static struct kunit_case clk_range_minimize_test_cases[] = {
  1747. KUNIT_CASE(clk_range_test_set_range_rate_minimized),
  1748. KUNIT_CASE(clk_range_test_multiple_set_range_rate_minimized),
  1749. KUNIT_CASE(clk_range_test_multiple_set_range_rate_put_minimized),
  1750. {}
  1751. };
  1752. /*
  1753. * Test suite for a basic rate clock, without any parent.
  1754. *
  1755. * These tests exercise the rate range API: clk_set_rate_range(),
  1756. * clk_set_min_rate(), clk_set_max_rate(), clk_drop_range(), with a
  1757. * driver that will always try to run at the lowest possible rate.
  1758. */
  1759. static struct kunit_suite clk_range_minimize_test_suite = {
  1760. .name = "clk-range-minimize-test",
  1761. .init = clk_minimize_test_init,
  1762. .exit = clk_test_exit,
  1763. .test_cases = clk_range_minimize_test_cases,
  1764. };
  1765. struct clk_leaf_mux_ctx {
  1766. struct clk_multiple_parent_ctx mux_ctx;
  1767. struct clk_hw hw;
  1768. };
  1769. static int
  1770. clk_leaf_mux_set_rate_parent_test_init(struct kunit *test)
  1771. {
  1772. struct clk_leaf_mux_ctx *ctx;
  1773. const char *top_parents[2] = { "parent-0", "parent-1" };
  1774. int ret;
  1775. ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
  1776. if (!ctx)
  1777. return -ENOMEM;
  1778. test->priv = ctx;
  1779. ctx->mux_ctx.parents_ctx[0].hw.init = CLK_HW_INIT_NO_PARENT("parent-0",
  1780. &clk_dummy_rate_ops,
  1781. 0);
  1782. ctx->mux_ctx.parents_ctx[0].rate = DUMMY_CLOCK_RATE_1;
  1783. ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[0].hw);
  1784. if (ret)
  1785. return ret;
  1786. ctx->mux_ctx.parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("parent-1",
  1787. &clk_dummy_rate_ops,
  1788. 0);
  1789. ctx->mux_ctx.parents_ctx[1].rate = DUMMY_CLOCK_RATE_2;
  1790. ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[1].hw);
  1791. if (ret)
  1792. return ret;
  1793. ctx->mux_ctx.current_parent = 0;
  1794. ctx->mux_ctx.hw.init = CLK_HW_INIT_PARENTS("test-mux", top_parents,
  1795. &clk_multiple_parents_mux_ops,
  1796. 0);
  1797. ret = clk_hw_register(NULL, &ctx->mux_ctx.hw);
  1798. if (ret)
  1799. return ret;
  1800. ctx->hw.init = CLK_HW_INIT_HW("test-clock", &ctx->mux_ctx.hw,
  1801. &clk_dummy_single_parent_ops,
  1802. CLK_SET_RATE_PARENT);
  1803. ret = clk_hw_register(NULL, &ctx->hw);
  1804. if (ret)
  1805. return ret;
  1806. return 0;
  1807. }
  1808. static void clk_leaf_mux_set_rate_parent_test_exit(struct kunit *test)
  1809. {
  1810. struct clk_leaf_mux_ctx *ctx = test->priv;
  1811. clk_hw_unregister(&ctx->hw);
  1812. clk_hw_unregister(&ctx->mux_ctx.hw);
  1813. clk_hw_unregister(&ctx->mux_ctx.parents_ctx[0].hw);
  1814. clk_hw_unregister(&ctx->mux_ctx.parents_ctx[1].hw);
  1815. }
  1816. /*
  1817. * Test that, for a clock that will forward any rate request to its
  1818. * parent, the rate request structure returned by __clk_determine_rate
  1819. * is sane and will be what we expect.
  1820. */
  1821. static void clk_leaf_mux_set_rate_parent_determine_rate(struct kunit *test)
  1822. {
  1823. struct clk_leaf_mux_ctx *ctx = test->priv;
  1824. struct clk_hw *hw = &ctx->hw;
  1825. struct clk *clk = clk_hw_get_clk(hw, NULL);
  1826. struct clk_rate_request req;
  1827. unsigned long rate;
  1828. int ret;
  1829. rate = clk_get_rate(clk);
  1830. KUNIT_ASSERT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
  1831. clk_hw_init_rate_request(hw, &req, DUMMY_CLOCK_RATE_2);
  1832. ret = __clk_determine_rate(hw, &req);
  1833. KUNIT_ASSERT_EQ(test, ret, 0);
  1834. KUNIT_EXPECT_EQ(test, req.rate, DUMMY_CLOCK_RATE_2);
  1835. KUNIT_EXPECT_EQ(test, req.best_parent_rate, DUMMY_CLOCK_RATE_2);
  1836. KUNIT_EXPECT_PTR_EQ(test, req.best_parent_hw, &ctx->mux_ctx.hw);
  1837. clk_put(clk);
  1838. }
  1839. static struct kunit_case clk_leaf_mux_set_rate_parent_test_cases[] = {
  1840. KUNIT_CASE(clk_leaf_mux_set_rate_parent_determine_rate),
  1841. {}
  1842. };
  1843. /*
  1844. * Test suite for a clock whose parent is a mux with multiple parents.
  1845. * The leaf clock has CLK_SET_RATE_PARENT, and will forward rate
  1846. * requests to the mux, which will then select which parent is the best
  1847. * fit for a given rate.
  1848. *
  1849. * These tests exercise the behaviour of muxes, and the proper selection
  1850. * of parents.
  1851. */
  1852. static struct kunit_suite clk_leaf_mux_set_rate_parent_test_suite = {
  1853. .name = "clk-leaf-mux-set-rate-parent",
  1854. .init = clk_leaf_mux_set_rate_parent_test_init,
  1855. .exit = clk_leaf_mux_set_rate_parent_test_exit,
  1856. .test_cases = clk_leaf_mux_set_rate_parent_test_cases,
  1857. };
  1858. struct clk_mux_notifier_rate_change {
  1859. bool done;
  1860. unsigned long old_rate;
  1861. unsigned long new_rate;
  1862. wait_queue_head_t wq;
  1863. };
  1864. struct clk_mux_notifier_ctx {
  1865. struct clk_multiple_parent_ctx mux_ctx;
  1866. struct clk *clk;
  1867. struct notifier_block clk_nb;
  1868. struct clk_mux_notifier_rate_change pre_rate_change;
  1869. struct clk_mux_notifier_rate_change post_rate_change;
  1870. };
  1871. #define NOTIFIER_TIMEOUT_MS 100
  1872. static int clk_mux_notifier_callback(struct notifier_block *nb,
  1873. unsigned long action, void *data)
  1874. {
  1875. struct clk_notifier_data *clk_data = data;
  1876. struct clk_mux_notifier_ctx *ctx = container_of(nb,
  1877. struct clk_mux_notifier_ctx,
  1878. clk_nb);
  1879. if (action & PRE_RATE_CHANGE) {
  1880. ctx->pre_rate_change.old_rate = clk_data->old_rate;
  1881. ctx->pre_rate_change.new_rate = clk_data->new_rate;
  1882. ctx->pre_rate_change.done = true;
  1883. wake_up_interruptible(&ctx->pre_rate_change.wq);
  1884. }
  1885. if (action & POST_RATE_CHANGE) {
  1886. ctx->post_rate_change.old_rate = clk_data->old_rate;
  1887. ctx->post_rate_change.new_rate = clk_data->new_rate;
  1888. ctx->post_rate_change.done = true;
  1889. wake_up_interruptible(&ctx->post_rate_change.wq);
  1890. }
  1891. return 0;
  1892. }
  1893. static int clk_mux_notifier_test_init(struct kunit *test)
  1894. {
  1895. struct clk_mux_notifier_ctx *ctx;
  1896. const char *top_parents[2] = { "parent-0", "parent-1" };
  1897. int ret;
  1898. ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
  1899. if (!ctx)
  1900. return -ENOMEM;
  1901. test->priv = ctx;
  1902. ctx->clk_nb.notifier_call = clk_mux_notifier_callback;
  1903. init_waitqueue_head(&ctx->pre_rate_change.wq);
  1904. init_waitqueue_head(&ctx->post_rate_change.wq);
  1905. ctx->mux_ctx.parents_ctx[0].hw.init = CLK_HW_INIT_NO_PARENT("parent-0",
  1906. &clk_dummy_rate_ops,
  1907. 0);
  1908. ctx->mux_ctx.parents_ctx[0].rate = DUMMY_CLOCK_RATE_1;
  1909. ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[0].hw);
  1910. if (ret)
  1911. return ret;
  1912. ctx->mux_ctx.parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("parent-1",
  1913. &clk_dummy_rate_ops,
  1914. 0);
  1915. ctx->mux_ctx.parents_ctx[1].rate = DUMMY_CLOCK_RATE_2;
  1916. ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[1].hw);
  1917. if (ret)
  1918. return ret;
  1919. ctx->mux_ctx.current_parent = 0;
  1920. ctx->mux_ctx.hw.init = CLK_HW_INIT_PARENTS("test-mux", top_parents,
  1921. &clk_multiple_parents_mux_ops,
  1922. 0);
  1923. ret = clk_hw_register(NULL, &ctx->mux_ctx.hw);
  1924. if (ret)
  1925. return ret;
  1926. ctx->clk = clk_hw_get_clk(&ctx->mux_ctx.hw, NULL);
  1927. ret = clk_notifier_register(ctx->clk, &ctx->clk_nb);
  1928. if (ret)
  1929. return ret;
  1930. return 0;
  1931. }
  1932. static void clk_mux_notifier_test_exit(struct kunit *test)
  1933. {
  1934. struct clk_mux_notifier_ctx *ctx = test->priv;
  1935. struct clk *clk = ctx->clk;
  1936. clk_notifier_unregister(clk, &ctx->clk_nb);
  1937. clk_put(clk);
  1938. clk_hw_unregister(&ctx->mux_ctx.hw);
  1939. clk_hw_unregister(&ctx->mux_ctx.parents_ctx[0].hw);
  1940. clk_hw_unregister(&ctx->mux_ctx.parents_ctx[1].hw);
  1941. }
  1942. /*
  1943. * Test that if the we have a notifier registered on a mux, the core
  1944. * will notify us when we switch to another parent, and with the proper
  1945. * old and new rates.
  1946. */
  1947. static void clk_mux_notifier_set_parent_test(struct kunit *test)
  1948. {
  1949. struct clk_mux_notifier_ctx *ctx = test->priv;
  1950. struct clk_hw *hw = &ctx->mux_ctx.hw;
  1951. struct clk *clk = clk_hw_get_clk(hw, NULL);
  1952. struct clk *new_parent = clk_hw_get_clk(&ctx->mux_ctx.parents_ctx[1].hw, NULL);
  1953. int ret;
  1954. ret = clk_set_parent(clk, new_parent);
  1955. KUNIT_ASSERT_EQ(test, ret, 0);
  1956. ret = wait_event_interruptible_timeout(ctx->pre_rate_change.wq,
  1957. ctx->pre_rate_change.done,
  1958. msecs_to_jiffies(NOTIFIER_TIMEOUT_MS));
  1959. KUNIT_ASSERT_GT(test, ret, 0);
  1960. KUNIT_EXPECT_EQ(test, ctx->pre_rate_change.old_rate, DUMMY_CLOCK_RATE_1);
  1961. KUNIT_EXPECT_EQ(test, ctx->pre_rate_change.new_rate, DUMMY_CLOCK_RATE_2);
  1962. ret = wait_event_interruptible_timeout(ctx->post_rate_change.wq,
  1963. ctx->post_rate_change.done,
  1964. msecs_to_jiffies(NOTIFIER_TIMEOUT_MS));
  1965. KUNIT_ASSERT_GT(test, ret, 0);
  1966. KUNIT_EXPECT_EQ(test, ctx->post_rate_change.old_rate, DUMMY_CLOCK_RATE_1);
  1967. KUNIT_EXPECT_EQ(test, ctx->post_rate_change.new_rate, DUMMY_CLOCK_RATE_2);
  1968. clk_put(new_parent);
  1969. clk_put(clk);
  1970. }
  1971. static struct kunit_case clk_mux_notifier_test_cases[] = {
  1972. KUNIT_CASE(clk_mux_notifier_set_parent_test),
  1973. {}
  1974. };
  1975. /*
  1976. * Test suite for a mux with multiple parents, and a notifier registered
  1977. * on the mux.
  1978. *
  1979. * These tests exercise the behaviour of notifiers.
  1980. */
  1981. static struct kunit_suite clk_mux_notifier_test_suite = {
  1982. .name = "clk-mux-notifier",
  1983. .init = clk_mux_notifier_test_init,
  1984. .exit = clk_mux_notifier_test_exit,
  1985. .test_cases = clk_mux_notifier_test_cases,
  1986. };
  1987. kunit_test_suites(
  1988. &clk_leaf_mux_set_rate_parent_test_suite,
  1989. &clk_test_suite,
  1990. &clk_multiple_parents_mux_test_suite,
  1991. &clk_mux_notifier_test_suite,
  1992. &clk_orphan_transparent_multiple_parent_mux_test_suite,
  1993. &clk_orphan_transparent_single_parent_test_suite,
  1994. &clk_orphan_two_level_root_last_test_suite,
  1995. &clk_range_test_suite,
  1996. &clk_range_maximize_test_suite,
  1997. &clk_range_minimize_test_suite,
  1998. &clk_single_parent_mux_test_suite,
  1999. &clk_uncached_test_suite
  2000. );
  2001. MODULE_LICENSE("GPL v2");