core.c 27 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Interconnect framework core driver
  4. *
  5. * Copyright (c) 2017-2019, Linaro Ltd.
  6. * Author: Georgi Djakov <[email protected]>
  7. */
  8. #include <linux/debugfs.h>
  9. #include <linux/device.h>
  10. #include <linux/idr.h>
  11. #include <linux/init.h>
  12. #include <linux/interconnect.h>
  13. #include <linux/interconnect-provider.h>
  14. #include <linux/list.h>
  15. #include <linux/module.h>
  16. #include <linux/mutex.h>
  17. #include <linux/slab.h>
  18. #include <linux/of.h>
  19. #include <linux/overflow.h>
  20. #include "internal.h"
  21. #define CREATE_TRACE_POINTS
  22. #include "trace.h"
  23. static DEFINE_IDR(icc_idr);
  24. static LIST_HEAD(icc_providers);
  25. static int providers_count;
  26. static bool synced_state;
  27. static DEFINE_MUTEX(icc_lock);
  28. static struct dentry *icc_debugfs_dir;
  29. static void icc_summary_show_one(struct seq_file *s, struct icc_node *n)
  30. {
  31. if (!n)
  32. return;
  33. seq_printf(s, "%-42s %12u %12u\n",
  34. n->name, n->avg_bw, n->peak_bw);
  35. }
  36. static int icc_summary_show(struct seq_file *s, void *data)
  37. {
  38. struct icc_provider *provider;
  39. seq_puts(s, " node tag avg peak\n");
  40. seq_puts(s, "--------------------------------------------------------------------\n");
  41. mutex_lock(&icc_lock);
  42. list_for_each_entry(provider, &icc_providers, provider_list) {
  43. struct icc_node *n;
  44. list_for_each_entry(n, &provider->nodes, node_list) {
  45. struct icc_req *r;
  46. icc_summary_show_one(s, n);
  47. hlist_for_each_entry(r, &n->req_list, req_node) {
  48. u32 avg_bw = 0, peak_bw = 0;
  49. if (!r->dev)
  50. continue;
  51. if (r->enabled) {
  52. avg_bw = r->avg_bw;
  53. peak_bw = r->peak_bw;
  54. }
  55. seq_printf(s, " %-27s %12u %12u %12u\n",
  56. dev_name(r->dev), r->tag, avg_bw, peak_bw);
  57. }
  58. }
  59. }
  60. mutex_unlock(&icc_lock);
  61. return 0;
  62. }
  63. DEFINE_SHOW_ATTRIBUTE(icc_summary);
  64. static void icc_graph_show_link(struct seq_file *s, int level,
  65. struct icc_node *n, struct icc_node *m)
  66. {
  67. seq_printf(s, "%s\"%d:%s\" -> \"%d:%s\"\n",
  68. level == 2 ? "\t\t" : "\t",
  69. n->id, n->name, m->id, m->name);
  70. }
  71. static void icc_graph_show_node(struct seq_file *s, struct icc_node *n)
  72. {
  73. seq_printf(s, "\t\t\"%d:%s\" [label=\"%d:%s",
  74. n->id, n->name, n->id, n->name);
  75. seq_printf(s, "\n\t\t\t|avg_bw=%ukBps", n->avg_bw);
  76. seq_printf(s, "\n\t\t\t|peak_bw=%ukBps", n->peak_bw);
  77. seq_puts(s, "\"]\n");
  78. }
  79. static int icc_graph_show(struct seq_file *s, void *data)
  80. {
  81. struct icc_provider *provider;
  82. struct icc_node *n;
  83. int cluster_index = 0;
  84. int i;
  85. seq_puts(s, "digraph {\n\trankdir = LR\n\tnode [shape = record]\n");
  86. mutex_lock(&icc_lock);
  87. /* draw providers as cluster subgraphs */
  88. cluster_index = 0;
  89. list_for_each_entry(provider, &icc_providers, provider_list) {
  90. seq_printf(s, "\tsubgraph cluster_%d {\n", ++cluster_index);
  91. if (provider->dev)
  92. seq_printf(s, "\t\tlabel = \"%s\"\n",
  93. dev_name(provider->dev));
  94. /* draw nodes */
  95. list_for_each_entry(n, &provider->nodes, node_list)
  96. icc_graph_show_node(s, n);
  97. /* draw internal links */
  98. list_for_each_entry(n, &provider->nodes, node_list)
  99. for (i = 0; i < n->num_links; ++i)
  100. if (n->provider == n->links[i]->provider)
  101. icc_graph_show_link(s, 2, n,
  102. n->links[i]);
  103. seq_puts(s, "\t}\n");
  104. }
  105. /* draw external links */
  106. list_for_each_entry(provider, &icc_providers, provider_list)
  107. list_for_each_entry(n, &provider->nodes, node_list)
  108. for (i = 0; i < n->num_links; ++i)
  109. if (n->provider != n->links[i]->provider)
  110. icc_graph_show_link(s, 1, n,
  111. n->links[i]);
  112. mutex_unlock(&icc_lock);
  113. seq_puts(s, "}");
  114. return 0;
  115. }
  116. DEFINE_SHOW_ATTRIBUTE(icc_graph);
  117. static struct icc_node *node_find(const int id)
  118. {
  119. return idr_find(&icc_idr, id);
  120. }
  121. static struct icc_path *path_init(struct device *dev, struct icc_node *dst,
  122. ssize_t num_nodes)
  123. {
  124. struct icc_node *node = dst;
  125. struct icc_path *path;
  126. int i;
  127. path = kzalloc(struct_size(path, reqs, num_nodes), GFP_KERNEL);
  128. if (!path)
  129. return ERR_PTR(-ENOMEM);
  130. path->num_nodes = num_nodes;
  131. for (i = num_nodes - 1; i >= 0; i--) {
  132. node->provider->users++;
  133. hlist_add_head(&path->reqs[i].req_node, &node->req_list);
  134. path->reqs[i].node = node;
  135. path->reqs[i].dev = dev;
  136. path->reqs[i].enabled = true;
  137. /* reference to previous node was saved during path traversal */
  138. node = node->reverse;
  139. }
  140. return path;
  141. }
  142. static struct icc_path *path_find(struct device *dev, struct icc_node *src,
  143. struct icc_node *dst)
  144. {
  145. struct icc_path *path = ERR_PTR(-EPROBE_DEFER);
  146. struct icc_node *n, *node = NULL;
  147. struct list_head traverse_list;
  148. struct list_head edge_list;
  149. struct list_head visited_list;
  150. size_t i, depth = 1;
  151. bool found = false;
  152. INIT_LIST_HEAD(&traverse_list);
  153. INIT_LIST_HEAD(&edge_list);
  154. INIT_LIST_HEAD(&visited_list);
  155. list_add(&src->search_list, &traverse_list);
  156. src->reverse = NULL;
  157. do {
  158. list_for_each_entry_safe(node, n, &traverse_list, search_list) {
  159. if (node == dst) {
  160. found = true;
  161. list_splice_init(&edge_list, &visited_list);
  162. list_splice_init(&traverse_list, &visited_list);
  163. break;
  164. }
  165. for (i = 0; i < node->num_links; i++) {
  166. struct icc_node *tmp = node->links[i];
  167. if (!tmp) {
  168. path = ERR_PTR(-ENOENT);
  169. goto out;
  170. }
  171. if (tmp->is_traversed)
  172. continue;
  173. tmp->is_traversed = true;
  174. tmp->reverse = node;
  175. list_add_tail(&tmp->search_list, &edge_list);
  176. }
  177. }
  178. if (found)
  179. break;
  180. list_splice_init(&traverse_list, &visited_list);
  181. list_splice_init(&edge_list, &traverse_list);
  182. /* count the hops including the source */
  183. depth++;
  184. } while (!list_empty(&traverse_list));
  185. out:
  186. /* reset the traversed state */
  187. list_for_each_entry_reverse(n, &visited_list, search_list)
  188. n->is_traversed = false;
  189. if (found)
  190. path = path_init(dev, dst, depth);
  191. return path;
  192. }
  193. /*
  194. * We want the path to honor all bandwidth requests, so the average and peak
  195. * bandwidth requirements from each consumer are aggregated at each node.
  196. * The aggregation is platform specific, so each platform can customize it by
  197. * implementing its own aggregate() function.
  198. */
  199. static int aggregate_requests(struct icc_node *node)
  200. {
  201. struct icc_provider *p = node->provider;
  202. struct icc_req *r;
  203. u32 avg_bw, peak_bw;
  204. node->avg_bw = 0;
  205. node->peak_bw = 0;
  206. if (p->pre_aggregate)
  207. p->pre_aggregate(node);
  208. hlist_for_each_entry(r, &node->req_list, req_node) {
  209. if (r->enabled) {
  210. avg_bw = r->avg_bw;
  211. peak_bw = r->peak_bw;
  212. } else {
  213. avg_bw = 0;
  214. peak_bw = 0;
  215. }
  216. p->aggregate(node, r->tag, avg_bw, peak_bw,
  217. &node->avg_bw, &node->peak_bw);
  218. /* during boot use the initial bandwidth as a floor value */
  219. if (!synced_state) {
  220. node->avg_bw = max(node->avg_bw, node->init_avg);
  221. node->peak_bw = max(node->peak_bw, node->init_peak);
  222. }
  223. }
  224. return 0;
  225. }
  226. static int apply_constraints(struct icc_path *path)
  227. {
  228. struct icc_node *next, *prev = NULL;
  229. struct icc_provider *p;
  230. int ret = -EINVAL;
  231. int i;
  232. for (i = 0; i < path->num_nodes; i++) {
  233. next = path->reqs[i].node;
  234. p = next->provider;
  235. /* both endpoints should be valid master-slave pairs */
  236. if (!prev || (p != prev->provider && !p->inter_set)) {
  237. prev = next;
  238. continue;
  239. }
  240. /* set the constraints */
  241. ret = p->set(prev, next);
  242. if (ret)
  243. goto out;
  244. prev = next;
  245. }
  246. out:
  247. return ret;
  248. }
  249. int icc_std_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
  250. u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
  251. {
  252. *agg_avg += avg_bw;
  253. *agg_peak = max(*agg_peak, peak_bw);
  254. return 0;
  255. }
  256. EXPORT_SYMBOL_GPL(icc_std_aggregate);
  257. /* of_icc_xlate_onecell() - Translate function using a single index.
  258. * @spec: OF phandle args to map into an interconnect node.
  259. * @data: private data (pointer to struct icc_onecell_data)
  260. *
  261. * This is a generic translate function that can be used to model simple
  262. * interconnect providers that have one device tree node and provide
  263. * multiple interconnect nodes. A single cell is used as an index into
  264. * an array of icc nodes specified in the icc_onecell_data struct when
  265. * registering the provider.
  266. */
  267. struct icc_node *of_icc_xlate_onecell(struct of_phandle_args *spec,
  268. void *data)
  269. {
  270. struct icc_onecell_data *icc_data = data;
  271. unsigned int idx = spec->args[0];
  272. if (idx >= icc_data->num_nodes) {
  273. pr_err("%s: invalid index %u\n", __func__, idx);
  274. return ERR_PTR(-EINVAL);
  275. }
  276. return icc_data->nodes[idx];
  277. }
  278. EXPORT_SYMBOL_GPL(of_icc_xlate_onecell);
  279. /**
  280. * of_icc_get_from_provider() - Look-up interconnect node
  281. * @spec: OF phandle args to use for look-up
  282. *
  283. * Looks for interconnect provider under the node specified by @spec and if
  284. * found, uses xlate function of the provider to map phandle args to node.
  285. *
  286. * Returns a valid pointer to struct icc_node_data on success or ERR_PTR()
  287. * on failure.
  288. */
  289. struct icc_node_data *of_icc_get_from_provider(struct of_phandle_args *spec)
  290. {
  291. struct icc_node *node = ERR_PTR(-EPROBE_DEFER);
  292. struct icc_node_data *data = NULL;
  293. struct icc_provider *provider;
  294. if (!spec)
  295. return ERR_PTR(-EINVAL);
  296. mutex_lock(&icc_lock);
  297. list_for_each_entry(provider, &icc_providers, provider_list) {
  298. if (provider->dev->of_node == spec->np) {
  299. if (provider->xlate_extended) {
  300. data = provider->xlate_extended(spec, provider->data);
  301. if (!IS_ERR(data)) {
  302. node = data->node;
  303. break;
  304. }
  305. } else {
  306. node = provider->xlate(spec, provider->data);
  307. if (!IS_ERR(node))
  308. break;
  309. }
  310. }
  311. }
  312. mutex_unlock(&icc_lock);
  313. if (IS_ERR(node))
  314. return ERR_CAST(node);
  315. if (!data) {
  316. data = kzalloc(sizeof(*data), GFP_KERNEL);
  317. if (!data)
  318. return ERR_PTR(-ENOMEM);
  319. data->node = node;
  320. }
  321. return data;
  322. }
  323. EXPORT_SYMBOL_GPL(of_icc_get_from_provider);
  324. static void devm_icc_release(struct device *dev, void *res)
  325. {
  326. icc_put(*(struct icc_path **)res);
  327. }
  328. struct icc_path *devm_of_icc_get(struct device *dev, const char *name)
  329. {
  330. struct icc_path **ptr, *path;
  331. ptr = devres_alloc(devm_icc_release, sizeof(*ptr), GFP_KERNEL);
  332. if (!ptr)
  333. return ERR_PTR(-ENOMEM);
  334. path = of_icc_get(dev, name);
  335. if (!IS_ERR(path)) {
  336. *ptr = path;
  337. devres_add(dev, ptr);
  338. } else {
  339. devres_free(ptr);
  340. }
  341. return path;
  342. }
  343. EXPORT_SYMBOL_GPL(devm_of_icc_get);
  344. /**
  345. * of_icc_get_by_index() - get a path handle from a DT node based on index
  346. * @dev: device pointer for the consumer device
  347. * @idx: interconnect path index
  348. *
  349. * This function will search for a path between two endpoints and return an
  350. * icc_path handle on success. Use icc_put() to release constraints when they
  351. * are not needed anymore.
  352. * If the interconnect API is disabled, NULL is returned and the consumer
  353. * drivers will still build. Drivers are free to handle this specifically,
  354. * but they don't have to.
  355. *
  356. * Return: icc_path pointer on success or ERR_PTR() on error. NULL is returned
  357. * when the API is disabled or the "interconnects" DT property is missing.
  358. */
  359. struct icc_path *of_icc_get_by_index(struct device *dev, int idx)
  360. {
  361. struct icc_path *path;
  362. struct icc_node_data *src_data, *dst_data;
  363. struct device_node *np;
  364. struct of_phandle_args src_args, dst_args;
  365. int ret;
  366. if (!dev || !dev->of_node)
  367. return ERR_PTR(-ENODEV);
  368. np = dev->of_node;
  369. /*
  370. * When the consumer DT node do not have "interconnects" property
  371. * return a NULL path to skip setting constraints.
  372. */
  373. if (!of_find_property(np, "interconnects", NULL))
  374. return NULL;
  375. /*
  376. * We use a combination of phandle and specifier for endpoint. For now
  377. * lets support only global ids and extend this in the future if needed
  378. * without breaking DT compatibility.
  379. */
  380. ret = of_parse_phandle_with_args(np, "interconnects",
  381. "#interconnect-cells", idx * 2,
  382. &src_args);
  383. if (ret)
  384. return ERR_PTR(ret);
  385. of_node_put(src_args.np);
  386. ret = of_parse_phandle_with_args(np, "interconnects",
  387. "#interconnect-cells", idx * 2 + 1,
  388. &dst_args);
  389. if (ret)
  390. return ERR_PTR(ret);
  391. of_node_put(dst_args.np);
  392. src_data = of_icc_get_from_provider(&src_args);
  393. if (IS_ERR(src_data)) {
  394. dev_err_probe(dev, PTR_ERR(src_data), "error finding src node\n");
  395. return ERR_CAST(src_data);
  396. }
  397. dst_data = of_icc_get_from_provider(&dst_args);
  398. if (IS_ERR(dst_data)) {
  399. dev_err_probe(dev, PTR_ERR(dst_data), "error finding dst node\n");
  400. kfree(src_data);
  401. return ERR_CAST(dst_data);
  402. }
  403. mutex_lock(&icc_lock);
  404. path = path_find(dev, src_data->node, dst_data->node);
  405. mutex_unlock(&icc_lock);
  406. if (IS_ERR(path)) {
  407. dev_err(dev, "%s: invalid path=%ld\n", __func__, PTR_ERR(path));
  408. goto free_icc_data;
  409. }
  410. if (src_data->tag && src_data->tag == dst_data->tag)
  411. icc_set_tag(path, src_data->tag);
  412. path->name = kasprintf(GFP_KERNEL, "%s-%s",
  413. src_data->node->name, dst_data->node->name);
  414. if (!path->name) {
  415. kfree(path);
  416. path = ERR_PTR(-ENOMEM);
  417. }
  418. free_icc_data:
  419. kfree(src_data);
  420. kfree(dst_data);
  421. return path;
  422. }
  423. EXPORT_SYMBOL_GPL(of_icc_get_by_index);
  424. /**
  425. * of_icc_get() - get a path handle from a DT node based on name
  426. * @dev: device pointer for the consumer device
  427. * @name: interconnect path name
  428. *
  429. * This function will search for a path between two endpoints and return an
  430. * icc_path handle on success. Use icc_put() to release constraints when they
  431. * are not needed anymore.
  432. * If the interconnect API is disabled, NULL is returned and the consumer
  433. * drivers will still build. Drivers are free to handle this specifically,
  434. * but they don't have to.
  435. *
  436. * Return: icc_path pointer on success or ERR_PTR() on error. NULL is returned
  437. * when the API is disabled or the "interconnects" DT property is missing.
  438. */
  439. struct icc_path *of_icc_get(struct device *dev, const char *name)
  440. {
  441. struct device_node *np;
  442. int idx = 0;
  443. if (!dev || !dev->of_node)
  444. return ERR_PTR(-ENODEV);
  445. np = dev->of_node;
  446. /*
  447. * When the consumer DT node do not have "interconnects" property
  448. * return a NULL path to skip setting constraints.
  449. */
  450. if (!of_find_property(np, "interconnects", NULL))
  451. return NULL;
  452. /*
  453. * We use a combination of phandle and specifier for endpoint. For now
  454. * lets support only global ids and extend this in the future if needed
  455. * without breaking DT compatibility.
  456. */
  457. if (name) {
  458. idx = of_property_match_string(np, "interconnect-names", name);
  459. if (idx < 0)
  460. return ERR_PTR(idx);
  461. }
  462. return of_icc_get_by_index(dev, idx);
  463. }
  464. EXPORT_SYMBOL_GPL(of_icc_get);
  465. /**
  466. * icc_set_tag() - set an optional tag on a path
  467. * @path: the path we want to tag
  468. * @tag: the tag value
  469. *
  470. * This function allows consumers to append a tag to the requests associated
  471. * with a path, so that a different aggregation could be done based on this tag.
  472. */
  473. void icc_set_tag(struct icc_path *path, u32 tag)
  474. {
  475. int i;
  476. if (!path)
  477. return;
  478. mutex_lock(&icc_lock);
  479. for (i = 0; i < path->num_nodes; i++)
  480. path->reqs[i].tag = tag;
  481. mutex_unlock(&icc_lock);
  482. }
  483. EXPORT_SYMBOL_GPL(icc_set_tag);
  484. /**
  485. * icc_get_name() - Get name of the icc path
  486. * @path: reference to the path returned by icc_get()
  487. *
  488. * This function is used by an interconnect consumer to get the name of the icc
  489. * path.
  490. *
  491. * Returns a valid pointer on success, or NULL otherwise.
  492. */
  493. const char *icc_get_name(struct icc_path *path)
  494. {
  495. if (!path)
  496. return NULL;
  497. return path->name;
  498. }
  499. EXPORT_SYMBOL_GPL(icc_get_name);
  500. /**
  501. * icc_set_bw() - set bandwidth constraints on an interconnect path
  502. * @path: reference to the path returned by icc_get()
  503. * @avg_bw: average bandwidth in kilobytes per second
  504. * @peak_bw: peak bandwidth in kilobytes per second
  505. *
  506. * This function is used by an interconnect consumer to express its own needs
  507. * in terms of bandwidth for a previously requested path between two endpoints.
  508. * The requests are aggregated and each node is updated accordingly. The entire
  509. * path is locked by a mutex to ensure that the set() is completed.
  510. * The @path can be NULL when the "interconnects" DT properties is missing,
  511. * which will mean that no constraints will be set.
  512. *
  513. * Returns 0 on success, or an appropriate error code otherwise.
  514. */
  515. int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw)
  516. {
  517. struct icc_node *node;
  518. u32 old_avg, old_peak;
  519. size_t i;
  520. int ret;
  521. if (!path)
  522. return 0;
  523. if (WARN_ON(IS_ERR(path) || !path->num_nodes))
  524. return -EINVAL;
  525. mutex_lock(&icc_lock);
  526. old_avg = path->reqs[0].avg_bw;
  527. old_peak = path->reqs[0].peak_bw;
  528. for (i = 0; i < path->num_nodes; i++) {
  529. node = path->reqs[i].node;
  530. /* update the consumer request for this path */
  531. path->reqs[i].avg_bw = avg_bw;
  532. path->reqs[i].peak_bw = peak_bw;
  533. /* aggregate requests for this node */
  534. aggregate_requests(node);
  535. trace_icc_set_bw(path, node, i, avg_bw, peak_bw);
  536. }
  537. ret = apply_constraints(path);
  538. if (ret) {
  539. pr_debug("interconnect: error applying constraints (%d)\n",
  540. ret);
  541. for (i = 0; i < path->num_nodes; i++) {
  542. node = path->reqs[i].node;
  543. path->reqs[i].avg_bw = old_avg;
  544. path->reqs[i].peak_bw = old_peak;
  545. aggregate_requests(node);
  546. }
  547. apply_constraints(path);
  548. }
  549. mutex_unlock(&icc_lock);
  550. trace_icc_set_bw_end(path, ret);
  551. return ret;
  552. }
  553. EXPORT_SYMBOL_GPL(icc_set_bw);
  554. static int __icc_enable(struct icc_path *path, bool enable)
  555. {
  556. int i;
  557. if (!path)
  558. return 0;
  559. if (WARN_ON(IS_ERR(path) || !path->num_nodes))
  560. return -EINVAL;
  561. mutex_lock(&icc_lock);
  562. for (i = 0; i < path->num_nodes; i++)
  563. path->reqs[i].enabled = enable;
  564. mutex_unlock(&icc_lock);
  565. return icc_set_bw(path, path->reqs[0].avg_bw,
  566. path->reqs[0].peak_bw);
  567. }
  568. int icc_enable(struct icc_path *path)
  569. {
  570. return __icc_enable(path, true);
  571. }
  572. EXPORT_SYMBOL_GPL(icc_enable);
  573. int icc_disable(struct icc_path *path)
  574. {
  575. return __icc_enable(path, false);
  576. }
  577. EXPORT_SYMBOL_GPL(icc_disable);
  578. /**
  579. * icc_get() - return a handle for path between two endpoints
  580. * @dev: the device requesting the path
  581. * @src_id: source device port id
  582. * @dst_id: destination device port id
  583. *
  584. * This function will search for a path between two endpoints and return an
  585. * icc_path handle on success. Use icc_put() to release
  586. * constraints when they are not needed anymore.
  587. * If the interconnect API is disabled, NULL is returned and the consumer
  588. * drivers will still build. Drivers are free to handle this specifically,
  589. * but they don't have to.
  590. *
  591. * Return: icc_path pointer on success, ERR_PTR() on error or NULL if the
  592. * interconnect API is disabled.
  593. */
  594. struct icc_path *icc_get(struct device *dev, const int src_id, const int dst_id)
  595. {
  596. struct icc_node *src, *dst;
  597. struct icc_path *path = ERR_PTR(-EPROBE_DEFER);
  598. mutex_lock(&icc_lock);
  599. src = node_find(src_id);
  600. if (!src)
  601. goto out;
  602. dst = node_find(dst_id);
  603. if (!dst)
  604. goto out;
  605. path = path_find(dev, src, dst);
  606. if (IS_ERR(path)) {
  607. dev_err(dev, "%s: invalid path=%ld\n", __func__, PTR_ERR(path));
  608. goto out;
  609. }
  610. path->name = kasprintf(GFP_KERNEL, "%s-%s", src->name, dst->name);
  611. if (!path->name) {
  612. kfree(path);
  613. path = ERR_PTR(-ENOMEM);
  614. }
  615. out:
  616. mutex_unlock(&icc_lock);
  617. return path;
  618. }
  619. EXPORT_SYMBOL_GPL(icc_get);
  620. /**
  621. * icc_put() - release the reference to the icc_path
  622. * @path: interconnect path
  623. *
  624. * Use this function to release the constraints on a path when the path is
  625. * no longer needed. The constraints will be re-aggregated.
  626. */
  627. void icc_put(struct icc_path *path)
  628. {
  629. struct icc_node *node;
  630. size_t i;
  631. int ret;
  632. if (!path || WARN_ON(IS_ERR(path)))
  633. return;
  634. ret = icc_set_bw(path, 0, 0);
  635. if (ret)
  636. pr_err("%s: error (%d)\n", __func__, ret);
  637. mutex_lock(&icc_lock);
  638. for (i = 0; i < path->num_nodes; i++) {
  639. node = path->reqs[i].node;
  640. hlist_del(&path->reqs[i].req_node);
  641. if (!WARN_ON(!node->provider->users))
  642. node->provider->users--;
  643. }
  644. mutex_unlock(&icc_lock);
  645. kfree_const(path->name);
  646. kfree(path);
  647. }
  648. EXPORT_SYMBOL_GPL(icc_put);
  649. static struct icc_node *icc_node_create_nolock(int id)
  650. {
  651. struct icc_node *node;
  652. /* check if node already exists */
  653. node = node_find(id);
  654. if (node)
  655. return node;
  656. node = kzalloc(sizeof(*node), GFP_KERNEL);
  657. if (!node)
  658. return ERR_PTR(-ENOMEM);
  659. id = idr_alloc(&icc_idr, node, id, id + 1, GFP_KERNEL);
  660. if (id < 0) {
  661. WARN(1, "%s: couldn't get idr\n", __func__);
  662. kfree(node);
  663. return ERR_PTR(id);
  664. }
  665. node->id = id;
  666. return node;
  667. }
  668. /**
  669. * icc_node_create() - create a node
  670. * @id: node id
  671. *
  672. * Return: icc_node pointer on success, or ERR_PTR() on error
  673. */
  674. struct icc_node *icc_node_create(int id)
  675. {
  676. struct icc_node *node;
  677. mutex_lock(&icc_lock);
  678. node = icc_node_create_nolock(id);
  679. mutex_unlock(&icc_lock);
  680. return node;
  681. }
  682. EXPORT_SYMBOL_GPL(icc_node_create);
  683. /**
  684. * icc_node_destroy() - destroy a node
  685. * @id: node id
  686. */
  687. void icc_node_destroy(int id)
  688. {
  689. struct icc_node *node;
  690. mutex_lock(&icc_lock);
  691. node = node_find(id);
  692. if (node) {
  693. idr_remove(&icc_idr, node->id);
  694. WARN_ON(!hlist_empty(&node->req_list));
  695. }
  696. mutex_unlock(&icc_lock);
  697. if (!node)
  698. return;
  699. kfree(node->links);
  700. kfree(node);
  701. }
  702. EXPORT_SYMBOL_GPL(icc_node_destroy);
  703. /**
  704. * icc_link_create() - create a link between two nodes
  705. * @node: source node id
  706. * @dst_id: destination node id
  707. *
  708. * Create a link between two nodes. The nodes might belong to different
  709. * interconnect providers and the @dst_id node might not exist (if the
  710. * provider driver has not probed yet). So just create the @dst_id node
  711. * and when the actual provider driver is probed, the rest of the node
  712. * data is filled.
  713. *
  714. * Return: 0 on success, or an error code otherwise
  715. */
  716. int icc_link_create(struct icc_node *node, const int dst_id)
  717. {
  718. struct icc_node *dst;
  719. struct icc_node **new;
  720. int ret = 0;
  721. if (!node->provider)
  722. return -EINVAL;
  723. mutex_lock(&icc_lock);
  724. dst = node_find(dst_id);
  725. if (!dst) {
  726. dst = icc_node_create_nolock(dst_id);
  727. if (IS_ERR(dst)) {
  728. ret = PTR_ERR(dst);
  729. goto out;
  730. }
  731. }
  732. new = krealloc(node->links,
  733. (node->num_links + 1) * sizeof(*node->links),
  734. GFP_KERNEL);
  735. if (!new) {
  736. ret = -ENOMEM;
  737. goto out;
  738. }
  739. node->links = new;
  740. node->links[node->num_links++] = dst;
  741. out:
  742. mutex_unlock(&icc_lock);
  743. return ret;
  744. }
  745. EXPORT_SYMBOL_GPL(icc_link_create);
  746. /**
  747. * icc_link_destroy() - destroy a link between two nodes
  748. * @src: pointer to source node
  749. * @dst: pointer to destination node
  750. *
  751. * Return: 0 on success, or an error code otherwise
  752. */
  753. int icc_link_destroy(struct icc_node *src, struct icc_node *dst)
  754. {
  755. struct icc_node **new;
  756. size_t slot;
  757. int ret = 0;
  758. if (IS_ERR_OR_NULL(src))
  759. return -EINVAL;
  760. if (IS_ERR_OR_NULL(dst))
  761. return -EINVAL;
  762. mutex_lock(&icc_lock);
  763. for (slot = 0; slot < src->num_links; slot++)
  764. if (src->links[slot] == dst)
  765. break;
  766. if (WARN_ON(slot == src->num_links)) {
  767. ret = -ENXIO;
  768. goto out;
  769. }
  770. src->links[slot] = src->links[--src->num_links];
  771. new = krealloc(src->links, src->num_links * sizeof(*src->links),
  772. GFP_KERNEL);
  773. if (new)
  774. src->links = new;
  775. else
  776. ret = -ENOMEM;
  777. out:
  778. mutex_unlock(&icc_lock);
  779. return ret;
  780. }
  781. EXPORT_SYMBOL_GPL(icc_link_destroy);
  782. /**
  783. * icc_node_add() - add interconnect node to interconnect provider
  784. * @node: pointer to the interconnect node
  785. * @provider: pointer to the interconnect provider
  786. */
  787. void icc_node_add(struct icc_node *node, struct icc_provider *provider)
  788. {
  789. if (WARN_ON(node->provider))
  790. return;
  791. mutex_lock(&icc_lock);
  792. node->provider = provider;
  793. list_add_tail(&node->node_list, &provider->nodes);
  794. /* get the initial bandwidth values and sync them with hardware */
  795. if (provider->get_bw) {
  796. provider->get_bw(node, &node->init_avg, &node->init_peak);
  797. } else {
  798. node->init_avg = INT_MAX;
  799. node->init_peak = INT_MAX;
  800. }
  801. node->avg_bw = node->init_avg;
  802. node->peak_bw = node->init_peak;
  803. if (provider->pre_aggregate)
  804. provider->pre_aggregate(node);
  805. if (provider->aggregate)
  806. provider->aggregate(node, 0, node->init_avg, node->init_peak,
  807. &node->avg_bw, &node->peak_bw);
  808. provider->set(node, node);
  809. node->avg_bw = 0;
  810. node->peak_bw = 0;
  811. mutex_unlock(&icc_lock);
  812. }
  813. EXPORT_SYMBOL_GPL(icc_node_add);
  814. /**
  815. * icc_node_del() - delete interconnect node from interconnect provider
  816. * @node: pointer to the interconnect node
  817. */
  818. void icc_node_del(struct icc_node *node)
  819. {
  820. mutex_lock(&icc_lock);
  821. list_del(&node->node_list);
  822. mutex_unlock(&icc_lock);
  823. }
  824. EXPORT_SYMBOL_GPL(icc_node_del);
  825. /**
  826. * icc_nodes_remove() - remove all previously added nodes from provider
  827. * @provider: the interconnect provider we are removing nodes from
  828. *
  829. * Return: 0 on success, or an error code otherwise
  830. */
  831. int icc_nodes_remove(struct icc_provider *provider)
  832. {
  833. struct icc_node *n, *tmp;
  834. if (WARN_ON(IS_ERR_OR_NULL(provider)))
  835. return -EINVAL;
  836. list_for_each_entry_safe_reverse(n, tmp, &provider->nodes, node_list) {
  837. icc_node_del(n);
  838. icc_node_destroy(n->id);
  839. }
  840. return 0;
  841. }
  842. EXPORT_SYMBOL_GPL(icc_nodes_remove);
  843. /**
  844. * icc_provider_init() - initialize a new interconnect provider
  845. * @provider: the interconnect provider to initialize
  846. *
  847. * Must be called before adding nodes to the provider.
  848. */
  849. void icc_provider_init(struct icc_provider *provider)
  850. {
  851. WARN_ON(!provider->set);
  852. INIT_LIST_HEAD(&provider->nodes);
  853. }
  854. EXPORT_SYMBOL_GPL(icc_provider_init);
  855. /**
  856. * icc_provider_register() - register a new interconnect provider
  857. * @provider: the interconnect provider to register
  858. *
  859. * Return: 0 on success, or an error code otherwise
  860. */
  861. int icc_provider_register(struct icc_provider *provider)
  862. {
  863. if (WARN_ON(!provider->xlate && !provider->xlate_extended))
  864. return -EINVAL;
  865. mutex_lock(&icc_lock);
  866. list_add_tail(&provider->provider_list, &icc_providers);
  867. mutex_unlock(&icc_lock);
  868. dev_dbg(provider->dev, "interconnect provider registered\n");
  869. return 0;
  870. }
  871. EXPORT_SYMBOL_GPL(icc_provider_register);
  872. /**
  873. * icc_provider_deregister() - deregister an interconnect provider
  874. * @provider: the interconnect provider to deregister
  875. */
  876. void icc_provider_deregister(struct icc_provider *provider)
  877. {
  878. mutex_lock(&icc_lock);
  879. WARN_ON(provider->users);
  880. list_del(&provider->provider_list);
  881. mutex_unlock(&icc_lock);
  882. }
  883. EXPORT_SYMBOL_GPL(icc_provider_deregister);
  884. int icc_provider_add(struct icc_provider *provider)
  885. {
  886. icc_provider_init(provider);
  887. return icc_provider_register(provider);
  888. }
  889. EXPORT_SYMBOL_GPL(icc_provider_add);
  890. void icc_provider_del(struct icc_provider *provider)
  891. {
  892. WARN_ON(!list_empty(&provider->nodes));
  893. icc_provider_deregister(provider);
  894. }
  895. EXPORT_SYMBOL_GPL(icc_provider_del);
  896. static const struct of_device_id __maybe_unused ignore_list[] = {
  897. { .compatible = "qcom,sc7180-ipa-virt" },
  898. { .compatible = "qcom,sdx55-ipa-virt" },
  899. { .compatible = "qcom,sm8150-ipa-virt" },
  900. {}
  901. };
  902. static int of_count_icc_providers(struct device_node *np)
  903. {
  904. struct device_node *child;
  905. int count = 0;
  906. for_each_available_child_of_node(np, child) {
  907. if (of_property_read_bool(child, "#interconnect-cells") &&
  908. likely(!of_match_node(ignore_list, child)))
  909. count++;
  910. count += of_count_icc_providers(child);
  911. }
  912. return count;
  913. }
  914. void icc_sync_state(struct device *dev)
  915. {
  916. struct icc_provider *p;
  917. struct icc_node *n;
  918. static int count;
  919. count++;
  920. if (count < providers_count)
  921. return;
  922. mutex_lock(&icc_lock);
  923. synced_state = true;
  924. list_for_each_entry(p, &icc_providers, provider_list) {
  925. dev_dbg(p->dev, "interconnect provider is in synced state\n");
  926. list_for_each_entry(n, &p->nodes, node_list) {
  927. if (n->init_avg || n->init_peak) {
  928. n->init_avg = 0;
  929. n->init_peak = 0;
  930. aggregate_requests(n);
  931. p->set(n, n);
  932. }
  933. }
  934. }
  935. mutex_unlock(&icc_lock);
  936. }
  937. EXPORT_SYMBOL_GPL(icc_sync_state);
  938. static int __init icc_init(void)
  939. {
  940. struct device_node *root = of_find_node_by_path("/");
  941. providers_count = of_count_icc_providers(root);
  942. of_node_put(root);
  943. icc_debugfs_dir = debugfs_create_dir("interconnect", NULL);
  944. debugfs_create_file("interconnect_summary", 0444,
  945. icc_debugfs_dir, NULL, &icc_summary_fops);
  946. debugfs_create_file("interconnect_graph", 0444,
  947. icc_debugfs_dir, NULL, &icc_graph_fops);
  948. return 0;
  949. }
  950. device_initcall(icc_init);
  951. MODULE_AUTHOR("Georgi Djakov <[email protected]>");
  952. MODULE_DESCRIPTION("Interconnect Driver Core");
  953. MODULE_LICENSE("GPL v2");