core-topology.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Incremental bus scan, based on bus topology
  4. *
  5. * Copyright (C) 2004-2006 Kristian Hoegsberg <[email protected]>
  6. */
  7. #include <linux/bug.h>
  8. #include <linux/errno.h>
  9. #include <linux/firewire.h>
  10. #include <linux/firewire-constants.h>
  11. #include <linux/jiffies.h>
  12. #include <linux/kernel.h>
  13. #include <linux/list.h>
  14. #include <linux/module.h>
  15. #include <linux/slab.h>
  16. #include <linux/spinlock.h>
  17. #include <linux/atomic.h>
  18. #include <asm/byteorder.h>
  19. #include "core.h"
  20. #define SELF_ID_PHY_ID(q) (((q) >> 24) & 0x3f)
  21. #define SELF_ID_EXTENDED(q) (((q) >> 23) & 0x01)
  22. #define SELF_ID_LINK_ON(q) (((q) >> 22) & 0x01)
  23. #define SELF_ID_GAP_COUNT(q) (((q) >> 16) & 0x3f)
  24. #define SELF_ID_PHY_SPEED(q) (((q) >> 14) & 0x03)
  25. #define SELF_ID_CONTENDER(q) (((q) >> 11) & 0x01)
  26. #define SELF_ID_PHY_INITIATOR(q) (((q) >> 1) & 0x01)
  27. #define SELF_ID_MORE_PACKETS(q) (((q) >> 0) & 0x01)
  28. #define SELF_ID_EXT_SEQUENCE(q) (((q) >> 20) & 0x07)
  29. #define SELFID_PORT_CHILD 0x3
  30. #define SELFID_PORT_PARENT 0x2
  31. #define SELFID_PORT_NCONN 0x1
  32. #define SELFID_PORT_NONE 0x0
  33. static u32 *count_ports(u32 *sid, int *total_port_count, int *child_port_count)
  34. {
  35. u32 q;
  36. int port_type, shift, seq;
  37. *total_port_count = 0;
  38. *child_port_count = 0;
  39. shift = 6;
  40. q = *sid;
  41. seq = 0;
  42. while (1) {
  43. port_type = (q >> shift) & 0x03;
  44. switch (port_type) {
  45. case SELFID_PORT_CHILD:
  46. (*child_port_count)++;
  47. fallthrough;
  48. case SELFID_PORT_PARENT:
  49. case SELFID_PORT_NCONN:
  50. (*total_port_count)++;
  51. fallthrough;
  52. case SELFID_PORT_NONE:
  53. break;
  54. }
  55. shift -= 2;
  56. if (shift == 0) {
  57. if (!SELF_ID_MORE_PACKETS(q))
  58. return sid + 1;
  59. shift = 16;
  60. sid++;
  61. q = *sid;
  62. /*
  63. * Check that the extra packets actually are
  64. * extended self ID packets and that the
  65. * sequence numbers in the extended self ID
  66. * packets increase as expected.
  67. */
  68. if (!SELF_ID_EXTENDED(q) ||
  69. seq != SELF_ID_EXT_SEQUENCE(q))
  70. return NULL;
  71. seq++;
  72. }
  73. }
  74. }
  75. static int get_port_type(u32 *sid, int port_index)
  76. {
  77. int index, shift;
  78. index = (port_index + 5) / 8;
  79. shift = 16 - ((port_index + 5) & 7) * 2;
  80. return (sid[index] >> shift) & 0x03;
  81. }
  82. static struct fw_node *fw_node_create(u32 sid, int port_count, int color)
  83. {
  84. struct fw_node *node;
  85. node = kzalloc(struct_size(node, ports, port_count), GFP_ATOMIC);
  86. if (node == NULL)
  87. return NULL;
  88. node->color = color;
  89. node->node_id = LOCAL_BUS | SELF_ID_PHY_ID(sid);
  90. node->link_on = SELF_ID_LINK_ON(sid);
  91. node->phy_speed = SELF_ID_PHY_SPEED(sid);
  92. node->initiated_reset = SELF_ID_PHY_INITIATOR(sid);
  93. node->port_count = port_count;
  94. refcount_set(&node->ref_count, 1);
  95. INIT_LIST_HEAD(&node->link);
  96. return node;
  97. }
  98. /*
  99. * Compute the maximum hop count for this node and it's children. The
  100. * maximum hop count is the maximum number of connections between any
  101. * two nodes in the subtree rooted at this node. We need this for
  102. * setting the gap count. As we build the tree bottom up in
  103. * build_tree() below, this is fairly easy to do: for each node we
  104. * maintain the max hop count and the max depth, ie the number of hops
  105. * to the furthest leaf. Computing the max hop count breaks down into
  106. * two cases: either the path goes through this node, in which case
  107. * the hop count is the sum of the two biggest child depths plus 2.
  108. * Or it could be the case that the max hop path is entirely
  109. * containted in a child tree, in which case the max hop count is just
  110. * the max hop count of this child.
  111. */
  112. static void update_hop_count(struct fw_node *node)
  113. {
  114. int depths[2] = { -1, -1 };
  115. int max_child_hops = 0;
  116. int i;
  117. for (i = 0; i < node->port_count; i++) {
  118. if (node->ports[i] == NULL)
  119. continue;
  120. if (node->ports[i]->max_hops > max_child_hops)
  121. max_child_hops = node->ports[i]->max_hops;
  122. if (node->ports[i]->max_depth > depths[0]) {
  123. depths[1] = depths[0];
  124. depths[0] = node->ports[i]->max_depth;
  125. } else if (node->ports[i]->max_depth > depths[1])
  126. depths[1] = node->ports[i]->max_depth;
  127. }
  128. node->max_depth = depths[0] + 1;
  129. node->max_hops = max(max_child_hops, depths[0] + depths[1] + 2);
  130. }
  131. static inline struct fw_node *fw_node(struct list_head *l)
  132. {
  133. return list_entry(l, struct fw_node, link);
  134. }
  135. /*
  136. * This function builds the tree representation of the topology given
  137. * by the self IDs from the latest bus reset. During the construction
  138. * of the tree, the function checks that the self IDs are valid and
  139. * internally consistent. On success this function returns the
  140. * fw_node corresponding to the local card otherwise NULL.
  141. */
  142. static struct fw_node *build_tree(struct fw_card *card,
  143. u32 *sid, int self_id_count)
  144. {
  145. struct fw_node *node, *child, *local_node, *irm_node;
  146. struct list_head stack, *h;
  147. u32 *next_sid, *end, q;
  148. int i, port_count, child_port_count, phy_id, parent_count, stack_depth;
  149. int gap_count;
  150. bool beta_repeaters_present;
  151. local_node = NULL;
  152. node = NULL;
  153. INIT_LIST_HEAD(&stack);
  154. stack_depth = 0;
  155. end = sid + self_id_count;
  156. phy_id = 0;
  157. irm_node = NULL;
  158. gap_count = SELF_ID_GAP_COUNT(*sid);
  159. beta_repeaters_present = false;
  160. while (sid < end) {
  161. next_sid = count_ports(sid, &port_count, &child_port_count);
  162. if (next_sid == NULL) {
  163. fw_err(card, "inconsistent extended self IDs\n");
  164. return NULL;
  165. }
  166. q = *sid;
  167. if (phy_id != SELF_ID_PHY_ID(q)) {
  168. fw_err(card, "PHY ID mismatch in self ID: %d != %d\n",
  169. phy_id, SELF_ID_PHY_ID(q));
  170. return NULL;
  171. }
  172. if (child_port_count > stack_depth) {
  173. fw_err(card, "topology stack underflow\n");
  174. return NULL;
  175. }
  176. /*
  177. * Seek back from the top of our stack to find the
  178. * start of the child nodes for this node.
  179. */
  180. for (i = 0, h = &stack; i < child_port_count; i++)
  181. h = h->prev;
  182. /*
  183. * When the stack is empty, this yields an invalid value,
  184. * but that pointer will never be dereferenced.
  185. */
  186. child = fw_node(h);
  187. node = fw_node_create(q, port_count, card->color);
  188. if (node == NULL) {
  189. fw_err(card, "out of memory while building topology\n");
  190. return NULL;
  191. }
  192. if (phy_id == (card->node_id & 0x3f))
  193. local_node = node;
  194. if (SELF_ID_CONTENDER(q))
  195. irm_node = node;
  196. parent_count = 0;
  197. for (i = 0; i < port_count; i++) {
  198. switch (get_port_type(sid, i)) {
  199. case SELFID_PORT_PARENT:
  200. /*
  201. * Who's your daddy? We dont know the
  202. * parent node at this time, so we
  203. * temporarily abuse node->color for
  204. * remembering the entry in the
  205. * node->ports array where the parent
  206. * node should be. Later, when we
  207. * handle the parent node, we fix up
  208. * the reference.
  209. */
  210. parent_count++;
  211. node->color = i;
  212. break;
  213. case SELFID_PORT_CHILD:
  214. node->ports[i] = child;
  215. /*
  216. * Fix up parent reference for this
  217. * child node.
  218. */
  219. child->ports[child->color] = node;
  220. child->color = card->color;
  221. child = fw_node(child->link.next);
  222. break;
  223. }
  224. }
  225. /*
  226. * Check that the node reports exactly one parent
  227. * port, except for the root, which of course should
  228. * have no parents.
  229. */
  230. if ((next_sid == end && parent_count != 0) ||
  231. (next_sid < end && parent_count != 1)) {
  232. fw_err(card, "parent port inconsistency for node %d: "
  233. "parent_count=%d\n", phy_id, parent_count);
  234. return NULL;
  235. }
  236. /* Pop the child nodes off the stack and push the new node. */
  237. __list_del(h->prev, &stack);
  238. list_add_tail(&node->link, &stack);
  239. stack_depth += 1 - child_port_count;
  240. if (node->phy_speed == SCODE_BETA &&
  241. parent_count + child_port_count > 1)
  242. beta_repeaters_present = true;
  243. /*
  244. * If PHYs report different gap counts, set an invalid count
  245. * which will force a gap count reconfiguration and a reset.
  246. */
  247. if (SELF_ID_GAP_COUNT(q) != gap_count)
  248. gap_count = 0;
  249. update_hop_count(node);
  250. sid = next_sid;
  251. phy_id++;
  252. }
  253. card->root_node = node;
  254. card->irm_node = irm_node;
  255. card->gap_count = gap_count;
  256. card->beta_repeaters_present = beta_repeaters_present;
  257. return local_node;
  258. }
  259. typedef void (*fw_node_callback_t)(struct fw_card * card,
  260. struct fw_node * node,
  261. struct fw_node * parent);
  262. static void for_each_fw_node(struct fw_card *card, struct fw_node *root,
  263. fw_node_callback_t callback)
  264. {
  265. struct list_head list;
  266. struct fw_node *node, *next, *child, *parent;
  267. int i;
  268. INIT_LIST_HEAD(&list);
  269. fw_node_get(root);
  270. list_add_tail(&root->link, &list);
  271. parent = NULL;
  272. list_for_each_entry(node, &list, link) {
  273. node->color = card->color;
  274. for (i = 0; i < node->port_count; i++) {
  275. child = node->ports[i];
  276. if (!child)
  277. continue;
  278. if (child->color == card->color)
  279. parent = child;
  280. else {
  281. fw_node_get(child);
  282. list_add_tail(&child->link, &list);
  283. }
  284. }
  285. callback(card, node, parent);
  286. }
  287. list_for_each_entry_safe(node, next, &list, link)
  288. fw_node_put(node);
  289. }
  290. static void report_lost_node(struct fw_card *card,
  291. struct fw_node *node, struct fw_node *parent)
  292. {
  293. fw_node_event(card, node, FW_NODE_DESTROYED);
  294. fw_node_put(node);
  295. /* Topology has changed - reset bus manager retry counter */
  296. card->bm_retries = 0;
  297. }
  298. static void report_found_node(struct fw_card *card,
  299. struct fw_node *node, struct fw_node *parent)
  300. {
  301. int b_path = (node->phy_speed == SCODE_BETA);
  302. if (parent != NULL) {
  303. /* min() macro doesn't work here with gcc 3.4 */
  304. node->max_speed = parent->max_speed < node->phy_speed ?
  305. parent->max_speed : node->phy_speed;
  306. node->b_path = parent->b_path && b_path;
  307. } else {
  308. node->max_speed = node->phy_speed;
  309. node->b_path = b_path;
  310. }
  311. fw_node_event(card, node, FW_NODE_CREATED);
  312. /* Topology has changed - reset bus manager retry counter */
  313. card->bm_retries = 0;
  314. }
  315. /* Must be called with card->lock held */
  316. void fw_destroy_nodes(struct fw_card *card)
  317. {
  318. card->color++;
  319. if (card->local_node != NULL)
  320. for_each_fw_node(card, card->local_node, report_lost_node);
  321. card->local_node = NULL;
  322. }
  323. static void move_tree(struct fw_node *node0, struct fw_node *node1, int port)
  324. {
  325. struct fw_node *tree;
  326. int i;
  327. tree = node1->ports[port];
  328. node0->ports[port] = tree;
  329. for (i = 0; i < tree->port_count; i++) {
  330. if (tree->ports[i] == node1) {
  331. tree->ports[i] = node0;
  332. break;
  333. }
  334. }
  335. }
  336. /*
  337. * Compare the old topology tree for card with the new one specified by root.
  338. * Queue the nodes and mark them as either found, lost or updated.
  339. * Update the nodes in the card topology tree as we go.
  340. */
  341. static void update_tree(struct fw_card *card, struct fw_node *root)
  342. {
  343. struct list_head list0, list1;
  344. struct fw_node *node0, *node1, *next1;
  345. int i, event;
  346. INIT_LIST_HEAD(&list0);
  347. list_add_tail(&card->local_node->link, &list0);
  348. INIT_LIST_HEAD(&list1);
  349. list_add_tail(&root->link, &list1);
  350. node0 = fw_node(list0.next);
  351. node1 = fw_node(list1.next);
  352. while (&node0->link != &list0) {
  353. WARN_ON(node0->port_count != node1->port_count);
  354. if (node0->link_on && !node1->link_on)
  355. event = FW_NODE_LINK_OFF;
  356. else if (!node0->link_on && node1->link_on)
  357. event = FW_NODE_LINK_ON;
  358. else if (node1->initiated_reset && node1->link_on)
  359. event = FW_NODE_INITIATED_RESET;
  360. else
  361. event = FW_NODE_UPDATED;
  362. node0->node_id = node1->node_id;
  363. node0->color = card->color;
  364. node0->link_on = node1->link_on;
  365. node0->initiated_reset = node1->initiated_reset;
  366. node0->max_hops = node1->max_hops;
  367. node1->color = card->color;
  368. fw_node_event(card, node0, event);
  369. if (card->root_node == node1)
  370. card->root_node = node0;
  371. if (card->irm_node == node1)
  372. card->irm_node = node0;
  373. for (i = 0; i < node0->port_count; i++) {
  374. if (node0->ports[i] && node1->ports[i]) {
  375. /*
  376. * This port didn't change, queue the
  377. * connected node for further
  378. * investigation.
  379. */
  380. if (node0->ports[i]->color == card->color)
  381. continue;
  382. list_add_tail(&node0->ports[i]->link, &list0);
  383. list_add_tail(&node1->ports[i]->link, &list1);
  384. } else if (node0->ports[i]) {
  385. /*
  386. * The nodes connected here were
  387. * unplugged; unref the lost nodes and
  388. * queue FW_NODE_LOST callbacks for
  389. * them.
  390. */
  391. for_each_fw_node(card, node0->ports[i],
  392. report_lost_node);
  393. node0->ports[i] = NULL;
  394. } else if (node1->ports[i]) {
  395. /*
  396. * One or more node were connected to
  397. * this port. Move the new nodes into
  398. * the tree and queue FW_NODE_CREATED
  399. * callbacks for them.
  400. */
  401. move_tree(node0, node1, i);
  402. for_each_fw_node(card, node0->ports[i],
  403. report_found_node);
  404. }
  405. }
  406. node0 = fw_node(node0->link.next);
  407. next1 = fw_node(node1->link.next);
  408. fw_node_put(node1);
  409. node1 = next1;
  410. }
  411. }
  412. static void update_topology_map(struct fw_card *card,
  413. u32 *self_ids, int self_id_count)
  414. {
  415. int node_count = (card->root_node->node_id & 0x3f) + 1;
  416. __be32 *map = card->topology_map;
  417. *map++ = cpu_to_be32((self_id_count + 2) << 16);
  418. *map++ = cpu_to_be32(be32_to_cpu(card->topology_map[1]) + 1);
  419. *map++ = cpu_to_be32((node_count << 16) | self_id_count);
  420. while (self_id_count--)
  421. *map++ = cpu_to_be32p(self_ids++);
  422. fw_compute_block_crc(card->topology_map);
  423. }
  424. void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
  425. int self_id_count, u32 *self_ids, bool bm_abdicate)
  426. {
  427. struct fw_node *local_node;
  428. unsigned long flags;
  429. spin_lock_irqsave(&card->lock, flags);
  430. /*
  431. * If the selfID buffer is not the immediate successor of the
  432. * previously processed one, we cannot reliably compare the
  433. * old and new topologies.
  434. */
  435. if (!is_next_generation(generation, card->generation) &&
  436. card->local_node != NULL) {
  437. fw_destroy_nodes(card);
  438. card->bm_retries = 0;
  439. }
  440. card->broadcast_channel_allocated = card->broadcast_channel_auto_allocated;
  441. card->node_id = node_id;
  442. /*
  443. * Update node_id before generation to prevent anybody from using
  444. * a stale node_id together with a current generation.
  445. */
  446. smp_wmb();
  447. card->generation = generation;
  448. card->reset_jiffies = get_jiffies_64();
  449. card->bm_node_id = 0xffff;
  450. card->bm_abdicate = bm_abdicate;
  451. fw_schedule_bm_work(card, 0);
  452. local_node = build_tree(card, self_ids, self_id_count);
  453. update_topology_map(card, self_ids, self_id_count);
  454. card->color++;
  455. if (local_node == NULL) {
  456. fw_err(card, "topology build failed\n");
  457. /* FIXME: We need to issue a bus reset in this case. */
  458. } else if (card->local_node == NULL) {
  459. card->local_node = local_node;
  460. for_each_fw_node(card, local_node, report_found_node);
  461. } else {
  462. update_tree(card, local_node);
  463. }
  464. spin_unlock_irqrestore(&card->lock, flags);
  465. }
  466. EXPORT_SYMBOL(fw_core_handle_bus_reset);