tb.h 45 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * Thunderbolt driver - bus logic (NHI independent)
  4. *
  5. * Copyright (c) 2014 Andreas Noever <[email protected]>
  6. * Copyright (C) 2018, Intel Corporation
  7. */
  8. #ifndef TB_H_
  9. #define TB_H_
  10. #include <linux/nvmem-provider.h>
  11. #include <linux/pci.h>
  12. #include <linux/thunderbolt.h>
  13. #include <linux/uuid.h>
  14. #include <linux/bitfield.h>
  15. #include "tb_regs.h"
  16. #include "ctl.h"
  17. #include "dma_port.h"
  18. #define NVM_MIN_SIZE SZ_32K
  19. #define NVM_MAX_SIZE SZ_512K
  20. #define NVM_DATA_DWORDS 16
  21. /* Keep link controller awake during update */
  22. #define QUIRK_FORCE_POWER_LINK_CONTROLLER BIT(0)
  23. /* Disable CLx if not supported */
  24. #define QUIRK_NO_CLX BIT(1)
  25. /**
  26. * struct tb_nvm - Structure holding NVM information
  27. * @dev: Owner of the NVM
  28. * @major: Major version number of the active NVM portion
  29. * @minor: Minor version number of the active NVM portion
  30. * @id: Identifier used with both NVM portions
  31. * @active: Active portion NVMem device
  32. * @active_size: Size in bytes of the active NVM
  33. * @non_active: Non-active portion NVMem device
  34. * @buf: Buffer where the NVM image is stored before it is written to
  35. * the actual NVM flash device
  36. * @buf_data_start: Where the actual image starts after skipping
  37. * possible headers
  38. * @buf_data_size: Number of bytes actually consumed by the new NVM
  39. * image
  40. * @authenticating: The device is authenticating the new NVM
  41. * @flushed: The image has been flushed to the storage area
  42. * @vops: Router vendor specific NVM operations (optional)
  43. *
  44. * The user of this structure needs to handle serialization of possible
  45. * concurrent access.
  46. */
  47. struct tb_nvm {
  48. struct device *dev;
  49. u32 major;
  50. u32 minor;
  51. int id;
  52. struct nvmem_device *active;
  53. size_t active_size;
  54. struct nvmem_device *non_active;
  55. void *buf;
  56. void *buf_data_start;
  57. size_t buf_data_size;
  58. bool authenticating;
  59. bool flushed;
  60. const struct tb_nvm_vendor_ops *vops;
  61. };
  62. enum tb_nvm_write_ops {
  63. WRITE_AND_AUTHENTICATE = 1,
  64. WRITE_ONLY = 2,
  65. AUTHENTICATE_ONLY = 3,
  66. };
  67. #define TB_SWITCH_KEY_SIZE 32
  68. #define TB_SWITCH_MAX_DEPTH 6
  69. #define USB4_SWITCH_MAX_DEPTH 5
  70. /**
  71. * enum tb_switch_tmu_rate - TMU refresh rate
  72. * @TB_SWITCH_TMU_RATE_OFF: %0 (Disable Time Sync handshake)
  73. * @TB_SWITCH_TMU_RATE_HIFI: %16 us time interval between successive
  74. * transmission of the Delay Request TSNOS
  75. * (Time Sync Notification Ordered Set) on a Link
  76. * @TB_SWITCH_TMU_RATE_NORMAL: %1 ms time interval between successive
  77. * transmission of the Delay Request TSNOS on
  78. * a Link
  79. */
  80. enum tb_switch_tmu_rate {
  81. TB_SWITCH_TMU_RATE_OFF = 0,
  82. TB_SWITCH_TMU_RATE_HIFI = 16,
  83. TB_SWITCH_TMU_RATE_NORMAL = 1000,
  84. };
  85. /**
  86. * struct tb_switch_tmu - Structure holding switch TMU configuration
  87. * @cap: Offset to the TMU capability (%0 if not found)
  88. * @has_ucap: Does the switch support uni-directional mode
  89. * @rate: TMU refresh rate related to upstream switch. In case of root
  90. * switch this holds the domain rate. Reflects the HW setting.
  91. * @unidirectional: Is the TMU in uni-directional or bi-directional mode
  92. * related to upstream switch. Don't care for root switch.
  93. * Reflects the HW setting.
  94. * @unidirectional_request: Is the new TMU mode: uni-directional or bi-directional
  95. * that is requested to be set. Related to upstream switch.
  96. * Don't care for root switch.
  97. * @rate_request: TMU new refresh rate related to upstream switch that is
  98. * requested to be set. In case of root switch, this holds
  99. * the new domain rate that is requested to be set.
  100. */
  101. struct tb_switch_tmu {
  102. int cap;
  103. bool has_ucap;
  104. enum tb_switch_tmu_rate rate;
  105. bool unidirectional;
  106. bool unidirectional_request;
  107. enum tb_switch_tmu_rate rate_request;
  108. };
  109. enum tb_clx {
  110. TB_CLX_DISABLE,
  111. /* CL0s and CL1 are enabled and supported together */
  112. TB_CL1 = BIT(0),
  113. TB_CL2 = BIT(1),
  114. };
  115. /**
  116. * struct tb_switch - a thunderbolt switch
  117. * @dev: Device for the switch
  118. * @config: Switch configuration
  119. * @ports: Ports in this switch
  120. * @dma_port: If the switch has port supporting DMA configuration based
  121. * mailbox this will hold the pointer to that (%NULL
  122. * otherwise). If set it also means the switch has
  123. * upgradeable NVM.
  124. * @tmu: The switch TMU configuration
  125. * @tb: Pointer to the domain the switch belongs to
  126. * @uid: Unique ID of the switch
  127. * @uuid: UUID of the switch (or %NULL if not supported)
  128. * @vendor: Vendor ID of the switch
  129. * @device: Device ID of the switch
  130. * @vendor_name: Name of the vendor (or %NULL if not known)
  131. * @device_name: Name of the device (or %NULL if not known)
  132. * @link_speed: Speed of the link in Gb/s
  133. * @link_width: Width of the link (1 or 2)
  134. * @link_usb4: Upstream link is USB4
  135. * @generation: Switch Thunderbolt generation
  136. * @cap_plug_events: Offset to the plug events capability (%0 if not found)
  137. * @cap_vsec_tmu: Offset to the TMU vendor specific capability (%0 if not found)
  138. * @cap_lc: Offset to the link controller capability (%0 if not found)
  139. * @cap_lp: Offset to the low power (CLx for TBT) capability (%0 if not found)
  140. * @is_unplugged: The switch is going away
  141. * @drom: DROM of the switch (%NULL if not found)
  142. * @nvm: Pointer to the NVM if the switch has one (%NULL otherwise)
  143. * @no_nvm_upgrade: Prevent NVM upgrade of this switch
  144. * @safe_mode: The switch is in safe-mode
  145. * @boot: Whether the switch was already authorized on boot or not
  146. * @rpm: The switch supports runtime PM
  147. * @authorized: Whether the switch is authorized by user or policy
  148. * @security_level: Switch supported security level
  149. * @debugfs_dir: Pointer to the debugfs structure
  150. * @key: Contains the key used to challenge the device or %NULL if not
  151. * supported. Size of the key is %TB_SWITCH_KEY_SIZE.
  152. * @connection_id: Connection ID used with ICM messaging
  153. * @connection_key: Connection key used with ICM messaging
  154. * @link: Root switch link this switch is connected (ICM only)
  155. * @depth: Depth in the chain this switch is connected (ICM only)
  156. * @rpm_complete: Completion used to wait for runtime resume to
  157. * complete (ICM only)
  158. * @quirks: Quirks used for this Thunderbolt switch
  159. * @credit_allocation: Are the below buffer allocation parameters valid
  160. * @max_usb3_credits: Router preferred number of buffers for USB 3.x
  161. * @min_dp_aux_credits: Router preferred minimum number of buffers for DP AUX
  162. * @min_dp_main_credits: Router preferred minimum number of buffers for DP MAIN
  163. * @max_pcie_credits: Router preferred number of buffers for PCIe
  164. * @max_dma_credits: Router preferred number of buffers for DMA/P2P
  165. * @clx: CLx state on the upstream link of the router
  166. *
  167. * When the switch is being added or removed to the domain (other
  168. * switches) you need to have domain lock held.
  169. *
  170. * In USB4 terminology this structure represents a router.
  171. */
  172. struct tb_switch {
  173. struct device dev;
  174. struct tb_regs_switch_header config;
  175. struct tb_port *ports;
  176. struct tb_dma_port *dma_port;
  177. struct tb_switch_tmu tmu;
  178. struct tb *tb;
  179. u64 uid;
  180. uuid_t *uuid;
  181. u16 vendor;
  182. u16 device;
  183. const char *vendor_name;
  184. const char *device_name;
  185. unsigned int link_speed;
  186. unsigned int link_width;
  187. bool link_usb4;
  188. unsigned int generation;
  189. int cap_plug_events;
  190. int cap_vsec_tmu;
  191. int cap_lc;
  192. int cap_lp;
  193. bool is_unplugged;
  194. u8 *drom;
  195. struct tb_nvm *nvm;
  196. bool no_nvm_upgrade;
  197. bool safe_mode;
  198. bool boot;
  199. bool rpm;
  200. unsigned int authorized;
  201. enum tb_security_level security_level;
  202. struct dentry *debugfs_dir;
  203. u8 *key;
  204. u8 connection_id;
  205. u8 connection_key;
  206. u8 link;
  207. u8 depth;
  208. struct completion rpm_complete;
  209. unsigned long quirks;
  210. bool credit_allocation;
  211. unsigned int max_usb3_credits;
  212. unsigned int min_dp_aux_credits;
  213. unsigned int min_dp_main_credits;
  214. unsigned int max_pcie_credits;
  215. unsigned int max_dma_credits;
  216. enum tb_clx clx;
  217. };
  218. /**
  219. * struct tb_port - a thunderbolt port, part of a tb_switch
  220. * @config: Cached port configuration read from registers
  221. * @sw: Switch the port belongs to
  222. * @remote: Remote port (%NULL if not connected)
  223. * @xdomain: Remote host (%NULL if not connected)
  224. * @cap_phy: Offset, zero if not found
  225. * @cap_tmu: Offset of the adapter specific TMU capability (%0 if not present)
  226. * @cap_adap: Offset of the adapter specific capability (%0 if not present)
  227. * @cap_usb4: Offset to the USB4 port capability (%0 if not present)
  228. * @usb4: Pointer to the USB4 port structure (only if @cap_usb4 is != %0)
  229. * @port: Port number on switch
  230. * @disabled: Disabled by eeprom or enabled but not implemented
  231. * @bonded: true if the port is bonded (two lanes combined as one)
  232. * @dual_link_port: If the switch is connected using two ports, points
  233. * to the other port.
  234. * @link_nr: Is this primary or secondary port on the dual_link.
  235. * @in_hopids: Currently allocated input HopIDs
  236. * @out_hopids: Currently allocated output HopIDs
  237. * @list: Used to link ports to DP resources list
  238. * @total_credits: Total number of buffers available for this port
  239. * @ctl_credits: Buffers reserved for control path
  240. * @dma_credits: Number of credits allocated for DMA tunneling for all
  241. * DMA paths through this port.
  242. * @max_bw: Maximum possible bandwidth through this adapter if set to
  243. * non-zero.
  244. *
  245. * In USB4 terminology this structure represents an adapter (protocol or
  246. * lane adapter).
  247. */
  248. struct tb_port {
  249. struct tb_regs_port_header config;
  250. struct tb_switch *sw;
  251. struct tb_port *remote;
  252. struct tb_xdomain *xdomain;
  253. int cap_phy;
  254. int cap_tmu;
  255. int cap_adap;
  256. int cap_usb4;
  257. struct usb4_port *usb4;
  258. u8 port;
  259. bool disabled;
  260. bool bonded;
  261. struct tb_port *dual_link_port;
  262. u8 link_nr:1;
  263. struct ida in_hopids;
  264. struct ida out_hopids;
  265. struct list_head list;
  266. unsigned int total_credits;
  267. unsigned int ctl_credits;
  268. unsigned int dma_credits;
  269. unsigned int max_bw;
  270. };
  271. /**
  272. * struct usb4_port - USB4 port device
  273. * @dev: Device for the port
  274. * @port: Pointer to the lane 0 adapter
  275. * @can_offline: Does the port have necessary platform support to moved
  276. * it into offline mode and back
  277. * @offline: The port is currently in offline mode
  278. * @margining: Pointer to margining structure if enabled
  279. */
  280. struct usb4_port {
  281. struct device dev;
  282. struct tb_port *port;
  283. bool can_offline;
  284. bool offline;
  285. #ifdef CONFIG_USB4_DEBUGFS_MARGINING
  286. struct tb_margining *margining;
  287. #endif
  288. };
  289. /**
  290. * tb_retimer: Thunderbolt retimer
  291. * @dev: Device for the retimer
  292. * @tb: Pointer to the domain the retimer belongs to
  293. * @index: Retimer index facing the router USB4 port
  294. * @vendor: Vendor ID of the retimer
  295. * @device: Device ID of the retimer
  296. * @port: Pointer to the lane 0 adapter
  297. * @nvm: Pointer to the NVM if the retimer has one (%NULL otherwise)
  298. * @no_nvm_upgrade: Prevent NVM upgrade of this retimer
  299. * @auth_status: Status of last NVM authentication
  300. */
  301. struct tb_retimer {
  302. struct device dev;
  303. struct tb *tb;
  304. u8 index;
  305. u32 vendor;
  306. u32 device;
  307. struct tb_port *port;
  308. struct tb_nvm *nvm;
  309. bool no_nvm_upgrade;
  310. u32 auth_status;
  311. };
  312. /**
  313. * struct tb_path_hop - routing information for a tb_path
  314. * @in_port: Ingress port of a switch
  315. * @out_port: Egress port of a switch where the packet is routed out
  316. * (must be on the same switch than @in_port)
  317. * @in_hop_index: HopID where the path configuration entry is placed in
  318. * the path config space of @in_port.
  319. * @in_counter_index: Used counter index (not used in the driver
  320. * currently, %-1 to disable)
  321. * @next_hop_index: HopID of the packet when it is routed out from @out_port
  322. * @initial_credits: Number of initial flow control credits allocated for
  323. * the path
  324. * @nfc_credits: Number of non-flow controlled buffers allocated for the
  325. * @in_port.
  326. *
  327. * Hop configuration is always done on the IN port of a switch.
  328. * in_port and out_port have to be on the same switch. Packets arriving on
  329. * in_port with "hop" = in_hop_index will get routed to through out_port. The
  330. * next hop to take (on out_port->remote) is determined by
  331. * next_hop_index. When routing packet to another switch (out->remote is
  332. * set) the @next_hop_index must match the @in_hop_index of that next
  333. * hop to make routing possible.
  334. *
  335. * in_counter_index is the index of a counter (in TB_CFG_COUNTERS) on the in
  336. * port.
  337. */
  338. struct tb_path_hop {
  339. struct tb_port *in_port;
  340. struct tb_port *out_port;
  341. int in_hop_index;
  342. int in_counter_index;
  343. int next_hop_index;
  344. unsigned int initial_credits;
  345. unsigned int nfc_credits;
  346. };
  347. /**
  348. * enum tb_path_port - path options mask
  349. * @TB_PATH_NONE: Do not activate on any hop on path
  350. * @TB_PATH_SOURCE: Activate on the first hop (out of src)
  351. * @TB_PATH_INTERNAL: Activate on the intermediate hops (not the first/last)
  352. * @TB_PATH_DESTINATION: Activate on the last hop (into dst)
  353. * @TB_PATH_ALL: Activate on all hops on the path
  354. */
  355. enum tb_path_port {
  356. TB_PATH_NONE = 0,
  357. TB_PATH_SOURCE = 1,
  358. TB_PATH_INTERNAL = 2,
  359. TB_PATH_DESTINATION = 4,
  360. TB_PATH_ALL = 7,
  361. };
  362. /**
  363. * struct tb_path - a unidirectional path between two ports
  364. * @tb: Pointer to the domain structure
  365. * @name: Name of the path (used for debugging)
  366. * @ingress_shared_buffer: Shared buffering used for ingress ports on the path
  367. * @egress_shared_buffer: Shared buffering used for egress ports on the path
  368. * @ingress_fc_enable: Flow control for ingress ports on the path
  369. * @egress_fc_enable: Flow control for egress ports on the path
  370. * @priority: Priority group if the path
  371. * @weight: Weight of the path inside the priority group
  372. * @drop_packages: Drop packages from queue tail or head
  373. * @activated: Is the path active
  374. * @clear_fc: Clear all flow control from the path config space entries
  375. * when deactivating this path
  376. * @hops: Path hops
  377. * @path_length: How many hops the path uses
  378. * @alloc_hopid: Does this path consume port HopID
  379. *
  380. * A path consists of a number of hops (see &struct tb_path_hop). To
  381. * establish a PCIe tunnel two paths have to be created between the two
  382. * PCIe ports.
  383. */
  384. struct tb_path {
  385. struct tb *tb;
  386. const char *name;
  387. enum tb_path_port ingress_shared_buffer;
  388. enum tb_path_port egress_shared_buffer;
  389. enum tb_path_port ingress_fc_enable;
  390. enum tb_path_port egress_fc_enable;
  391. unsigned int priority:3;
  392. int weight:4;
  393. bool drop_packages;
  394. bool activated;
  395. bool clear_fc;
  396. struct tb_path_hop *hops;
  397. int path_length;
  398. bool alloc_hopid;
  399. };
  400. /* HopIDs 0-7 are reserved by the Thunderbolt protocol */
  401. #define TB_PATH_MIN_HOPID 8
  402. /*
  403. * Support paths from the farthest (depth 6) router to the host and back
  404. * to the same level (not necessarily to the same router).
  405. */
  406. #define TB_PATH_MAX_HOPS (7 * 2)
  407. /* Possible wake types */
  408. #define TB_WAKE_ON_CONNECT BIT(0)
  409. #define TB_WAKE_ON_DISCONNECT BIT(1)
  410. #define TB_WAKE_ON_USB4 BIT(2)
  411. #define TB_WAKE_ON_USB3 BIT(3)
  412. #define TB_WAKE_ON_PCIE BIT(4)
  413. #define TB_WAKE_ON_DP BIT(5)
  414. /**
  415. * struct tb_cm_ops - Connection manager specific operations vector
  416. * @driver_ready: Called right after control channel is started. Used by
  417. * ICM to send driver ready message to the firmware.
  418. * @start: Starts the domain
  419. * @stop: Stops the domain
  420. * @suspend_noirq: Connection manager specific suspend_noirq
  421. * @resume_noirq: Connection manager specific resume_noirq
  422. * @suspend: Connection manager specific suspend
  423. * @freeze_noirq: Connection manager specific freeze_noirq
  424. * @thaw_noirq: Connection manager specific thaw_noirq
  425. * @complete: Connection manager specific complete
  426. * @runtime_suspend: Connection manager specific runtime_suspend
  427. * @runtime_resume: Connection manager specific runtime_resume
  428. * @runtime_suspend_switch: Runtime suspend a switch
  429. * @runtime_resume_switch: Runtime resume a switch
  430. * @handle_event: Handle thunderbolt event
  431. * @get_boot_acl: Get boot ACL list
  432. * @set_boot_acl: Set boot ACL list
  433. * @disapprove_switch: Disapprove switch (disconnect PCIe tunnel)
  434. * @approve_switch: Approve switch
  435. * @add_switch_key: Add key to switch
  436. * @challenge_switch_key: Challenge switch using key
  437. * @disconnect_pcie_paths: Disconnects PCIe paths before NVM update
  438. * @approve_xdomain_paths: Approve (establish) XDomain DMA paths
  439. * @disconnect_xdomain_paths: Disconnect XDomain DMA paths
  440. * @usb4_switch_op: Optional proxy for USB4 router operations. If set
  441. * this will be called whenever USB4 router operation is
  442. * performed. If this returns %-EOPNOTSUPP then the
  443. * native USB4 router operation is called.
  444. * @usb4_switch_nvm_authenticate_status: Optional callback that the CM
  445. * implementation can be used to
  446. * return status of USB4 NVM_AUTH
  447. * router operation.
  448. */
  449. struct tb_cm_ops {
  450. int (*driver_ready)(struct tb *tb);
  451. int (*start)(struct tb *tb);
  452. void (*stop)(struct tb *tb);
  453. int (*suspend_noirq)(struct tb *tb);
  454. int (*resume_noirq)(struct tb *tb);
  455. int (*suspend)(struct tb *tb);
  456. int (*freeze_noirq)(struct tb *tb);
  457. int (*thaw_noirq)(struct tb *tb);
  458. void (*complete)(struct tb *tb);
  459. int (*runtime_suspend)(struct tb *tb);
  460. int (*runtime_resume)(struct tb *tb);
  461. int (*runtime_suspend_switch)(struct tb_switch *sw);
  462. int (*runtime_resume_switch)(struct tb_switch *sw);
  463. void (*handle_event)(struct tb *tb, enum tb_cfg_pkg_type,
  464. const void *buf, size_t size);
  465. int (*get_boot_acl)(struct tb *tb, uuid_t *uuids, size_t nuuids);
  466. int (*set_boot_acl)(struct tb *tb, const uuid_t *uuids, size_t nuuids);
  467. int (*disapprove_switch)(struct tb *tb, struct tb_switch *sw);
  468. int (*approve_switch)(struct tb *tb, struct tb_switch *sw);
  469. int (*add_switch_key)(struct tb *tb, struct tb_switch *sw);
  470. int (*challenge_switch_key)(struct tb *tb, struct tb_switch *sw,
  471. const u8 *challenge, u8 *response);
  472. int (*disconnect_pcie_paths)(struct tb *tb);
  473. int (*approve_xdomain_paths)(struct tb *tb, struct tb_xdomain *xd,
  474. int transmit_path, int transmit_ring,
  475. int receive_path, int receive_ring);
  476. int (*disconnect_xdomain_paths)(struct tb *tb, struct tb_xdomain *xd,
  477. int transmit_path, int transmit_ring,
  478. int receive_path, int receive_ring);
  479. int (*usb4_switch_op)(struct tb_switch *sw, u16 opcode, u32 *metadata,
  480. u8 *status, const void *tx_data, size_t tx_data_len,
  481. void *rx_data, size_t rx_data_len);
  482. int (*usb4_switch_nvm_authenticate_status)(struct tb_switch *sw,
  483. u32 *status);
  484. };
  485. static inline void *tb_priv(struct tb *tb)
  486. {
  487. return (void *)tb->privdata;
  488. }
  489. #define TB_AUTOSUSPEND_DELAY 15000 /* ms */
  490. /* helper functions & macros */
  491. /**
  492. * tb_upstream_port() - return the upstream port of a switch
  493. *
  494. * Every switch has an upstream port (for the root switch it is the NHI).
  495. *
  496. * During switch alloc/init tb_upstream_port()->remote may be NULL, even for
  497. * non root switches (on the NHI port remote is always NULL).
  498. *
  499. * Return: Returns the upstream port of the switch.
  500. */
  501. static inline struct tb_port *tb_upstream_port(struct tb_switch *sw)
  502. {
  503. return &sw->ports[sw->config.upstream_port_number];
  504. }
  505. /**
  506. * tb_is_upstream_port() - Is the port upstream facing
  507. * @port: Port to check
  508. *
  509. * Returns true if @port is upstream facing port. In case of dual link
  510. * ports both return true.
  511. */
  512. static inline bool tb_is_upstream_port(const struct tb_port *port)
  513. {
  514. const struct tb_port *upstream_port = tb_upstream_port(port->sw);
  515. return port == upstream_port || port->dual_link_port == upstream_port;
  516. }
  517. static inline u64 tb_route(const struct tb_switch *sw)
  518. {
  519. return ((u64) sw->config.route_hi) << 32 | sw->config.route_lo;
  520. }
  521. static inline struct tb_port *tb_port_at(u64 route, struct tb_switch *sw)
  522. {
  523. u8 port;
  524. port = route >> (sw->config.depth * 8);
  525. if (WARN_ON(port > sw->config.max_port_number))
  526. return NULL;
  527. return &sw->ports[port];
  528. }
  529. /**
  530. * tb_port_has_remote() - Does the port have switch connected downstream
  531. * @port: Port to check
  532. *
  533. * Returns true only when the port is primary port and has remote set.
  534. */
  535. static inline bool tb_port_has_remote(const struct tb_port *port)
  536. {
  537. if (tb_is_upstream_port(port))
  538. return false;
  539. if (!port->remote)
  540. return false;
  541. if (port->dual_link_port && port->link_nr)
  542. return false;
  543. return true;
  544. }
  545. static inline bool tb_port_is_null(const struct tb_port *port)
  546. {
  547. return port && port->port && port->config.type == TB_TYPE_PORT;
  548. }
  549. static inline bool tb_port_is_nhi(const struct tb_port *port)
  550. {
  551. return port && port->config.type == TB_TYPE_NHI;
  552. }
  553. static inline bool tb_port_is_pcie_down(const struct tb_port *port)
  554. {
  555. return port && port->config.type == TB_TYPE_PCIE_DOWN;
  556. }
  557. static inline bool tb_port_is_pcie_up(const struct tb_port *port)
  558. {
  559. return port && port->config.type == TB_TYPE_PCIE_UP;
  560. }
  561. static inline bool tb_port_is_dpin(const struct tb_port *port)
  562. {
  563. return port && port->config.type == TB_TYPE_DP_HDMI_IN;
  564. }
  565. static inline bool tb_port_is_dpout(const struct tb_port *port)
  566. {
  567. return port && port->config.type == TB_TYPE_DP_HDMI_OUT;
  568. }
  569. static inline bool tb_port_is_usb3_down(const struct tb_port *port)
  570. {
  571. return port && port->config.type == TB_TYPE_USB3_DOWN;
  572. }
  573. static inline bool tb_port_is_usb3_up(const struct tb_port *port)
  574. {
  575. return port && port->config.type == TB_TYPE_USB3_UP;
  576. }
  577. static inline int tb_sw_read(struct tb_switch *sw, void *buffer,
  578. enum tb_cfg_space space, u32 offset, u32 length)
  579. {
  580. if (sw->is_unplugged)
  581. return -ENODEV;
  582. return tb_cfg_read(sw->tb->ctl,
  583. buffer,
  584. tb_route(sw),
  585. 0,
  586. space,
  587. offset,
  588. length);
  589. }
  590. static inline int tb_sw_write(struct tb_switch *sw, const void *buffer,
  591. enum tb_cfg_space space, u32 offset, u32 length)
  592. {
  593. if (sw->is_unplugged)
  594. return -ENODEV;
  595. return tb_cfg_write(sw->tb->ctl,
  596. buffer,
  597. tb_route(sw),
  598. 0,
  599. space,
  600. offset,
  601. length);
  602. }
  603. static inline int tb_port_read(struct tb_port *port, void *buffer,
  604. enum tb_cfg_space space, u32 offset, u32 length)
  605. {
  606. if (port->sw->is_unplugged)
  607. return -ENODEV;
  608. return tb_cfg_read(port->sw->tb->ctl,
  609. buffer,
  610. tb_route(port->sw),
  611. port->port,
  612. space,
  613. offset,
  614. length);
  615. }
  616. static inline int tb_port_write(struct tb_port *port, const void *buffer,
  617. enum tb_cfg_space space, u32 offset, u32 length)
  618. {
  619. if (port->sw->is_unplugged)
  620. return -ENODEV;
  621. return tb_cfg_write(port->sw->tb->ctl,
  622. buffer,
  623. tb_route(port->sw),
  624. port->port,
  625. space,
  626. offset,
  627. length);
  628. }
  629. #define tb_err(tb, fmt, arg...) dev_err(&(tb)->nhi->pdev->dev, fmt, ## arg)
  630. #define tb_WARN(tb, fmt, arg...) dev_WARN(&(tb)->nhi->pdev->dev, fmt, ## arg)
  631. #define tb_warn(tb, fmt, arg...) dev_warn(&(tb)->nhi->pdev->dev, fmt, ## arg)
  632. #define tb_info(tb, fmt, arg...) dev_info(&(tb)->nhi->pdev->dev, fmt, ## arg)
  633. #define tb_dbg(tb, fmt, arg...) dev_dbg(&(tb)->nhi->pdev->dev, fmt, ## arg)
  634. #define __TB_SW_PRINT(level, sw, fmt, arg...) \
  635. do { \
  636. const struct tb_switch *__sw = (sw); \
  637. level(__sw->tb, "%llx: " fmt, \
  638. tb_route(__sw), ## arg); \
  639. } while (0)
  640. #define tb_sw_WARN(sw, fmt, arg...) __TB_SW_PRINT(tb_WARN, sw, fmt, ##arg)
  641. #define tb_sw_warn(sw, fmt, arg...) __TB_SW_PRINT(tb_warn, sw, fmt, ##arg)
  642. #define tb_sw_info(sw, fmt, arg...) __TB_SW_PRINT(tb_info, sw, fmt, ##arg)
  643. #define tb_sw_dbg(sw, fmt, arg...) __TB_SW_PRINT(tb_dbg, sw, fmt, ##arg)
  644. #define __TB_PORT_PRINT(level, _port, fmt, arg...) \
  645. do { \
  646. const struct tb_port *__port = (_port); \
  647. level(__port->sw->tb, "%llx:%u: " fmt, \
  648. tb_route(__port->sw), __port->port, ## arg); \
  649. } while (0)
  650. #define tb_port_WARN(port, fmt, arg...) \
  651. __TB_PORT_PRINT(tb_WARN, port, fmt, ##arg)
  652. #define tb_port_warn(port, fmt, arg...) \
  653. __TB_PORT_PRINT(tb_warn, port, fmt, ##arg)
  654. #define tb_port_info(port, fmt, arg...) \
  655. __TB_PORT_PRINT(tb_info, port, fmt, ##arg)
  656. #define tb_port_dbg(port, fmt, arg...) \
  657. __TB_PORT_PRINT(tb_dbg, port, fmt, ##arg)
  658. struct tb *icm_probe(struct tb_nhi *nhi);
  659. struct tb *tb_probe(struct tb_nhi *nhi);
  660. extern struct device_type tb_domain_type;
  661. extern struct device_type tb_retimer_type;
  662. extern struct device_type tb_switch_type;
  663. extern struct device_type usb4_port_device_type;
  664. int tb_domain_init(void);
  665. void tb_domain_exit(void);
  666. int tb_xdomain_init(void);
  667. void tb_xdomain_exit(void);
  668. struct tb *tb_domain_alloc(struct tb_nhi *nhi, int timeout_msec, size_t privsize);
  669. int tb_domain_add(struct tb *tb);
  670. void tb_domain_remove(struct tb *tb);
  671. int tb_domain_suspend_noirq(struct tb *tb);
  672. int tb_domain_resume_noirq(struct tb *tb);
  673. int tb_domain_suspend(struct tb *tb);
  674. int tb_domain_freeze_noirq(struct tb *tb);
  675. int tb_domain_thaw_noirq(struct tb *tb);
  676. void tb_domain_complete(struct tb *tb);
  677. int tb_domain_runtime_suspend(struct tb *tb);
  678. int tb_domain_runtime_resume(struct tb *tb);
  679. int tb_domain_disapprove_switch(struct tb *tb, struct tb_switch *sw);
  680. int tb_domain_approve_switch(struct tb *tb, struct tb_switch *sw);
  681. int tb_domain_approve_switch_key(struct tb *tb, struct tb_switch *sw);
  682. int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw);
  683. int tb_domain_disconnect_pcie_paths(struct tb *tb);
  684. int tb_domain_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
  685. int transmit_path, int transmit_ring,
  686. int receive_path, int receive_ring);
  687. int tb_domain_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
  688. int transmit_path, int transmit_ring,
  689. int receive_path, int receive_ring);
  690. int tb_domain_disconnect_all_paths(struct tb *tb);
  691. static inline struct tb *tb_domain_get(struct tb *tb)
  692. {
  693. if (tb)
  694. get_device(&tb->dev);
  695. return tb;
  696. }
  697. static inline void tb_domain_put(struct tb *tb)
  698. {
  699. put_device(&tb->dev);
  700. }
  701. struct tb_nvm *tb_nvm_alloc(struct device *dev);
  702. int tb_nvm_read_version(struct tb_nvm *nvm);
  703. int tb_nvm_validate(struct tb_nvm *nvm);
  704. int tb_nvm_write_headers(struct tb_nvm *nvm);
  705. int tb_nvm_add_active(struct tb_nvm *nvm, nvmem_reg_read_t reg_read);
  706. int tb_nvm_write_buf(struct tb_nvm *nvm, unsigned int offset, void *val,
  707. size_t bytes);
  708. int tb_nvm_add_non_active(struct tb_nvm *nvm, nvmem_reg_write_t reg_write);
  709. void tb_nvm_free(struct tb_nvm *nvm);
  710. void tb_nvm_exit(void);
  711. typedef int (*read_block_fn)(void *, unsigned int, void *, size_t);
  712. typedef int (*write_block_fn)(void *, unsigned int, const void *, size_t);
  713. int tb_nvm_read_data(unsigned int address, void *buf, size_t size,
  714. unsigned int retries, read_block_fn read_block,
  715. void *read_block_data);
  716. int tb_nvm_write_data(unsigned int address, const void *buf, size_t size,
  717. unsigned int retries, write_block_fn write_next_block,
  718. void *write_block_data);
  719. int tb_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf,
  720. size_t size);
  721. struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
  722. u64 route);
  723. struct tb_switch *tb_switch_alloc_safe_mode(struct tb *tb,
  724. struct device *parent, u64 route);
  725. int tb_switch_configure(struct tb_switch *sw);
  726. int tb_switch_add(struct tb_switch *sw);
  727. void tb_switch_remove(struct tb_switch *sw);
  728. void tb_switch_suspend(struct tb_switch *sw, bool runtime);
  729. int tb_switch_resume(struct tb_switch *sw);
  730. int tb_switch_reset(struct tb_switch *sw);
  731. int tb_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit,
  732. u32 value, int timeout_msec);
  733. void tb_sw_set_unplugged(struct tb_switch *sw);
  734. struct tb_port *tb_switch_find_port(struct tb_switch *sw,
  735. enum tb_port_type type);
  736. struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link,
  737. u8 depth);
  738. struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid);
  739. struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route);
  740. /**
  741. * tb_switch_for_each_port() - Iterate over each switch port
  742. * @sw: Switch whose ports to iterate
  743. * @p: Port used as iterator
  744. *
  745. * Iterates over each switch port skipping the control port (port %0).
  746. */
  747. #define tb_switch_for_each_port(sw, p) \
  748. for ((p) = &(sw)->ports[1]; \
  749. (p) <= &(sw)->ports[(sw)->config.max_port_number]; (p)++)
  750. static inline struct tb_switch *tb_switch_get(struct tb_switch *sw)
  751. {
  752. if (sw)
  753. get_device(&sw->dev);
  754. return sw;
  755. }
  756. static inline void tb_switch_put(struct tb_switch *sw)
  757. {
  758. put_device(&sw->dev);
  759. }
  760. static inline bool tb_is_switch(const struct device *dev)
  761. {
  762. return dev->type == &tb_switch_type;
  763. }
  764. static inline struct tb_switch *tb_to_switch(struct device *dev)
  765. {
  766. if (tb_is_switch(dev))
  767. return container_of(dev, struct tb_switch, dev);
  768. return NULL;
  769. }
  770. static inline struct tb_switch *tb_switch_parent(struct tb_switch *sw)
  771. {
  772. return tb_to_switch(sw->dev.parent);
  773. }
  774. static inline bool tb_switch_is_light_ridge(const struct tb_switch *sw)
  775. {
  776. return sw->config.vendor_id == PCI_VENDOR_ID_INTEL &&
  777. sw->config.device_id == PCI_DEVICE_ID_INTEL_LIGHT_RIDGE;
  778. }
  779. static inline bool tb_switch_is_eagle_ridge(const struct tb_switch *sw)
  780. {
  781. return sw->config.vendor_id == PCI_VENDOR_ID_INTEL &&
  782. sw->config.device_id == PCI_DEVICE_ID_INTEL_EAGLE_RIDGE;
  783. }
  784. static inline bool tb_switch_is_cactus_ridge(const struct tb_switch *sw)
  785. {
  786. if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) {
  787. switch (sw->config.device_id) {
  788. case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C:
  789. case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
  790. return true;
  791. }
  792. }
  793. return false;
  794. }
  795. static inline bool tb_switch_is_falcon_ridge(const struct tb_switch *sw)
  796. {
  797. if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) {
  798. switch (sw->config.device_id) {
  799. case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE:
  800. case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE:
  801. return true;
  802. }
  803. }
  804. return false;
  805. }
  806. static inline bool tb_switch_is_alpine_ridge(const struct tb_switch *sw)
  807. {
  808. if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) {
  809. switch (sw->config.device_id) {
  810. case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
  811. case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE:
  812. case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
  813. case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
  814. case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
  815. return true;
  816. }
  817. }
  818. return false;
  819. }
  820. static inline bool tb_switch_is_titan_ridge(const struct tb_switch *sw)
  821. {
  822. if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) {
  823. switch (sw->config.device_id) {
  824. case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE:
  825. case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE:
  826. case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE:
  827. return true;
  828. }
  829. }
  830. return false;
  831. }
  832. static inline bool tb_switch_is_tiger_lake(const struct tb_switch *sw)
  833. {
  834. if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) {
  835. switch (sw->config.device_id) {
  836. case PCI_DEVICE_ID_INTEL_TGL_NHI0:
  837. case PCI_DEVICE_ID_INTEL_TGL_NHI1:
  838. case PCI_DEVICE_ID_INTEL_TGL_H_NHI0:
  839. case PCI_DEVICE_ID_INTEL_TGL_H_NHI1:
  840. return true;
  841. }
  842. }
  843. return false;
  844. }
  845. /**
  846. * tb_switch_is_usb4() - Is the switch USB4 compliant
  847. * @sw: Switch to check
  848. *
  849. * Returns true if the @sw is USB4 compliant router, false otherwise.
  850. */
  851. static inline bool tb_switch_is_usb4(const struct tb_switch *sw)
  852. {
  853. return sw->config.thunderbolt_version == USB4_VERSION_1_0;
  854. }
  855. /**
  856. * tb_switch_is_icm() - Is the switch handled by ICM firmware
  857. * @sw: Switch to check
  858. *
  859. * In case there is a need to differentiate whether ICM firmware or SW CM
  860. * is handling @sw this function can be called. It is valid to call this
  861. * after tb_switch_alloc() and tb_switch_configure() has been called
  862. * (latter only for SW CM case).
  863. */
  864. static inline bool tb_switch_is_icm(const struct tb_switch *sw)
  865. {
  866. return !sw->config.enabled;
  867. }
  868. int tb_switch_lane_bonding_enable(struct tb_switch *sw);
  869. void tb_switch_lane_bonding_disable(struct tb_switch *sw);
  870. int tb_switch_configure_link(struct tb_switch *sw);
  871. void tb_switch_unconfigure_link(struct tb_switch *sw);
  872. bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in);
  873. int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in);
  874. void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in);
  875. int tb_switch_tmu_init(struct tb_switch *sw);
  876. int tb_switch_tmu_post_time(struct tb_switch *sw);
  877. int tb_switch_tmu_disable(struct tb_switch *sw);
  878. int tb_switch_tmu_enable(struct tb_switch *sw);
  879. void tb_switch_tmu_configure(struct tb_switch *sw,
  880. enum tb_switch_tmu_rate rate,
  881. bool unidirectional);
  882. void tb_switch_enable_tmu_1st_child(struct tb_switch *sw,
  883. enum tb_switch_tmu_rate rate);
  884. /**
  885. * tb_switch_tmu_is_enabled() - Checks if the specified TMU mode is enabled
  886. * @sw: Router whose TMU mode to check
  887. * @unidirectional: If uni-directional (bi-directional otherwise)
  888. *
  889. * Return true if hardware TMU configuration matches the one passed in
  890. * as parameter. That is HiFi/Normal and either uni-directional or bi-directional.
  891. */
  892. static inline bool tb_switch_tmu_is_enabled(const struct tb_switch *sw,
  893. bool unidirectional)
  894. {
  895. return sw->tmu.rate == sw->tmu.rate_request &&
  896. sw->tmu.unidirectional == unidirectional;
  897. }
  898. static inline const char *tb_switch_clx_name(enum tb_clx clx)
  899. {
  900. switch (clx) {
  901. /* CL0s and CL1 are enabled and supported together */
  902. case TB_CL1:
  903. return "CL0s/CL1";
  904. default:
  905. return "unknown";
  906. }
  907. }
  908. int tb_switch_enable_clx(struct tb_switch *sw, enum tb_clx clx);
  909. int tb_switch_disable_clx(struct tb_switch *sw, enum tb_clx clx);
  910. /**
  911. * tb_switch_is_clx_enabled() - Checks if the CLx is enabled
  912. * @sw: Router to check for the CLx
  913. * @clx: The CLx state to check for
  914. *
  915. * Checks if the specified CLx is enabled on the router upstream link.
  916. * Not applicable for a host router.
  917. */
  918. static inline bool tb_switch_is_clx_enabled(const struct tb_switch *sw,
  919. enum tb_clx clx)
  920. {
  921. return sw->clx == clx;
  922. }
  923. /**
  924. * tb_switch_is_clx_supported() - Is CLx supported on this type of router
  925. * @sw: The router to check CLx support for
  926. */
  927. static inline bool tb_switch_is_clx_supported(const struct tb_switch *sw)
  928. {
  929. if (sw->quirks & QUIRK_NO_CLX)
  930. return false;
  931. return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw);
  932. }
  933. int tb_switch_mask_clx_objections(struct tb_switch *sw);
  934. int tb_switch_pcie_l1_enable(struct tb_switch *sw);
  935. int tb_switch_xhci_connect(struct tb_switch *sw);
  936. void tb_switch_xhci_disconnect(struct tb_switch *sw);
  937. int tb_port_state(struct tb_port *port);
  938. int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged);
  939. int tb_port_add_nfc_credits(struct tb_port *port, int credits);
  940. int tb_port_clear_counter(struct tb_port *port, int counter);
  941. int tb_port_unlock(struct tb_port *port);
  942. int tb_port_enable(struct tb_port *port);
  943. int tb_port_disable(struct tb_port *port);
  944. int tb_port_alloc_in_hopid(struct tb_port *port, int hopid, int max_hopid);
  945. void tb_port_release_in_hopid(struct tb_port *port, int hopid);
  946. int tb_port_alloc_out_hopid(struct tb_port *port, int hopid, int max_hopid);
  947. void tb_port_release_out_hopid(struct tb_port *port, int hopid);
  948. struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end,
  949. struct tb_port *prev);
  950. static inline bool tb_port_use_credit_allocation(const struct tb_port *port)
  951. {
  952. return tb_port_is_null(port) && port->sw->credit_allocation;
  953. }
  954. /**
  955. * tb_for_each_port_on_path() - Iterate over each port on path
  956. * @src: Source port
  957. * @dst: Destination port
  958. * @p: Port used as iterator
  959. *
  960. * Walks over each port on path from @src to @dst.
  961. */
  962. #define tb_for_each_port_on_path(src, dst, p) \
  963. for ((p) = tb_next_port_on_path((src), (dst), NULL); (p); \
  964. (p) = tb_next_port_on_path((src), (dst), (p)))
  965. int tb_port_get_link_speed(struct tb_port *port);
  966. int tb_port_get_link_width(struct tb_port *port);
  967. int tb_port_set_link_width(struct tb_port *port, unsigned int width);
  968. int tb_port_set_lane_bonding(struct tb_port *port, bool bonding);
  969. int tb_port_lane_bonding_enable(struct tb_port *port);
  970. void tb_port_lane_bonding_disable(struct tb_port *port);
  971. int tb_port_wait_for_link_width(struct tb_port *port, int width,
  972. int timeout_msec);
  973. int tb_port_update_credits(struct tb_port *port);
  974. bool tb_port_is_clx_enabled(struct tb_port *port, unsigned int clx);
  975. int tb_switch_find_vse_cap(struct tb_switch *sw, enum tb_switch_vse_cap vsec);
  976. int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap);
  977. int tb_switch_next_cap(struct tb_switch *sw, unsigned int offset);
  978. int tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap);
  979. int tb_port_next_cap(struct tb_port *port, unsigned int offset);
  980. bool tb_port_is_enabled(struct tb_port *port);
  981. bool tb_usb3_port_is_enabled(struct tb_port *port);
  982. int tb_usb3_port_enable(struct tb_port *port, bool enable);
  983. bool tb_pci_port_is_enabled(struct tb_port *port);
  984. int tb_pci_port_enable(struct tb_port *port, bool enable);
  985. int tb_dp_port_hpd_is_active(struct tb_port *port);
  986. int tb_dp_port_hpd_clear(struct tb_port *port);
  987. int tb_dp_port_set_hops(struct tb_port *port, unsigned int video,
  988. unsigned int aux_tx, unsigned int aux_rx);
  989. bool tb_dp_port_is_enabled(struct tb_port *port);
  990. int tb_dp_port_enable(struct tb_port *port, bool enable);
  991. struct tb_path *tb_path_discover(struct tb_port *src, int src_hopid,
  992. struct tb_port *dst, int dst_hopid,
  993. struct tb_port **last, const char *name,
  994. bool alloc_hopid);
  995. struct tb_path *tb_path_alloc(struct tb *tb, struct tb_port *src, int src_hopid,
  996. struct tb_port *dst, int dst_hopid, int link_nr,
  997. const char *name);
  998. void tb_path_free(struct tb_path *path);
  999. int tb_path_activate(struct tb_path *path);
  1000. void tb_path_deactivate(struct tb_path *path);
  1001. bool tb_path_is_invalid(struct tb_path *path);
  1002. bool tb_path_port_on_path(const struct tb_path *path,
  1003. const struct tb_port *port);
  1004. /**
  1005. * tb_path_for_each_hop() - Iterate over each hop on path
  1006. * @path: Path whose hops to iterate
  1007. * @hop: Hop used as iterator
  1008. *
  1009. * Iterates over each hop on path.
  1010. */
  1011. #define tb_path_for_each_hop(path, hop) \
  1012. for ((hop) = &(path)->hops[0]; \
  1013. (hop) <= &(path)->hops[(path)->path_length - 1]; (hop)++)
  1014. int tb_drom_read(struct tb_switch *sw);
  1015. int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid);
  1016. int tb_lc_read_uuid(struct tb_switch *sw, u32 *uuid);
  1017. int tb_lc_configure_port(struct tb_port *port);
  1018. void tb_lc_unconfigure_port(struct tb_port *port);
  1019. int tb_lc_configure_xdomain(struct tb_port *port);
  1020. void tb_lc_unconfigure_xdomain(struct tb_port *port);
  1021. int tb_lc_start_lane_initialization(struct tb_port *port);
  1022. bool tb_lc_is_clx_supported(struct tb_port *port);
  1023. bool tb_lc_is_usb_plugged(struct tb_port *port);
  1024. bool tb_lc_is_xhci_connected(struct tb_port *port);
  1025. int tb_lc_xhci_connect(struct tb_port *port);
  1026. void tb_lc_xhci_disconnect(struct tb_port *port);
  1027. int tb_lc_set_wake(struct tb_switch *sw, unsigned int flags);
  1028. int tb_lc_set_sleep(struct tb_switch *sw);
  1029. bool tb_lc_lane_bonding_possible(struct tb_switch *sw);
  1030. bool tb_lc_dp_sink_query(struct tb_switch *sw, struct tb_port *in);
  1031. int tb_lc_dp_sink_alloc(struct tb_switch *sw, struct tb_port *in);
  1032. int tb_lc_dp_sink_dealloc(struct tb_switch *sw, struct tb_port *in);
  1033. int tb_lc_force_power(struct tb_switch *sw);
  1034. static inline int tb_route_length(u64 route)
  1035. {
  1036. return (fls64(route) + TB_ROUTE_SHIFT - 1) / TB_ROUTE_SHIFT;
  1037. }
  1038. /**
  1039. * tb_downstream_route() - get route to downstream switch
  1040. *
  1041. * Port must not be the upstream port (otherwise a loop is created).
  1042. *
  1043. * Return: Returns a route to the switch behind @port.
  1044. */
  1045. static inline u64 tb_downstream_route(struct tb_port *port)
  1046. {
  1047. return tb_route(port->sw)
  1048. | ((u64) port->port << (port->sw->config.depth * 8));
  1049. }
  1050. bool tb_is_xdomain_enabled(void);
  1051. bool tb_xdomain_handle_request(struct tb *tb, enum tb_cfg_pkg_type type,
  1052. const void *buf, size_t size);
  1053. struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent,
  1054. u64 route, const uuid_t *local_uuid,
  1055. const uuid_t *remote_uuid);
  1056. void tb_xdomain_add(struct tb_xdomain *xd);
  1057. void tb_xdomain_remove(struct tb_xdomain *xd);
  1058. struct tb_xdomain *tb_xdomain_find_by_link_depth(struct tb *tb, u8 link,
  1059. u8 depth);
  1060. static inline struct tb_switch *tb_xdomain_parent(struct tb_xdomain *xd)
  1061. {
  1062. return tb_to_switch(xd->dev.parent);
  1063. }
  1064. int tb_retimer_nvm_read(struct tb_retimer *rt, unsigned int address, void *buf,
  1065. size_t size);
  1066. int tb_retimer_scan(struct tb_port *port, bool add);
  1067. void tb_retimer_remove_all(struct tb_port *port);
  1068. static inline bool tb_is_retimer(const struct device *dev)
  1069. {
  1070. return dev->type == &tb_retimer_type;
  1071. }
  1072. static inline struct tb_retimer *tb_to_retimer(struct device *dev)
  1073. {
  1074. if (tb_is_retimer(dev))
  1075. return container_of(dev, struct tb_retimer, dev);
  1076. return NULL;
  1077. }
  1078. int usb4_switch_setup(struct tb_switch *sw);
  1079. int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid);
  1080. int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf,
  1081. size_t size);
  1082. bool usb4_switch_lane_bonding_possible(struct tb_switch *sw);
  1083. int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags);
  1084. int usb4_switch_set_sleep(struct tb_switch *sw);
  1085. int usb4_switch_nvm_sector_size(struct tb_switch *sw);
  1086. int usb4_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf,
  1087. size_t size);
  1088. int usb4_switch_nvm_set_offset(struct tb_switch *sw, unsigned int address);
  1089. int usb4_switch_nvm_write(struct tb_switch *sw, unsigned int address,
  1090. const void *buf, size_t size);
  1091. int usb4_switch_nvm_authenticate(struct tb_switch *sw);
  1092. int usb4_switch_nvm_authenticate_status(struct tb_switch *sw, u32 *status);
  1093. int usb4_switch_credits_init(struct tb_switch *sw);
  1094. bool usb4_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in);
  1095. int usb4_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in);
  1096. int usb4_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in);
  1097. struct tb_port *usb4_switch_map_pcie_down(struct tb_switch *sw,
  1098. const struct tb_port *port);
  1099. struct tb_port *usb4_switch_map_usb3_down(struct tb_switch *sw,
  1100. const struct tb_port *port);
  1101. int usb4_switch_add_ports(struct tb_switch *sw);
  1102. void usb4_switch_remove_ports(struct tb_switch *sw);
  1103. int usb4_port_unlock(struct tb_port *port);
  1104. int usb4_port_hotplug_enable(struct tb_port *port);
  1105. int usb4_port_configure(struct tb_port *port);
  1106. void usb4_port_unconfigure(struct tb_port *port);
  1107. int usb4_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd);
  1108. void usb4_port_unconfigure_xdomain(struct tb_port *port);
  1109. int usb4_port_router_offline(struct tb_port *port);
  1110. int usb4_port_router_online(struct tb_port *port);
  1111. int usb4_port_enumerate_retimers(struct tb_port *port);
  1112. bool usb4_port_clx_supported(struct tb_port *port);
  1113. int usb4_port_margining_caps(struct tb_port *port, u32 *caps);
  1114. int usb4_port_hw_margin(struct tb_port *port, unsigned int lanes,
  1115. unsigned int ber_level, bool timing, bool right_high,
  1116. u32 *results);
  1117. int usb4_port_sw_margin(struct tb_port *port, unsigned int lanes, bool timing,
  1118. bool right_high, u32 counter);
  1119. int usb4_port_sw_margin_errors(struct tb_port *port, u32 *errors);
  1120. int usb4_port_retimer_set_inbound_sbtx(struct tb_port *port, u8 index);
  1121. int usb4_port_retimer_unset_inbound_sbtx(struct tb_port *port, u8 index);
  1122. int usb4_port_retimer_read(struct tb_port *port, u8 index, u8 reg, void *buf,
  1123. u8 size);
  1124. int usb4_port_retimer_write(struct tb_port *port, u8 index, u8 reg,
  1125. const void *buf, u8 size);
  1126. int usb4_port_retimer_is_last(struct tb_port *port, u8 index);
  1127. int usb4_port_retimer_nvm_sector_size(struct tb_port *port, u8 index);
  1128. int usb4_port_retimer_nvm_set_offset(struct tb_port *port, u8 index,
  1129. unsigned int address);
  1130. int usb4_port_retimer_nvm_write(struct tb_port *port, u8 index,
  1131. unsigned int address, const void *buf,
  1132. size_t size);
  1133. int usb4_port_retimer_nvm_authenticate(struct tb_port *port, u8 index);
  1134. int usb4_port_retimer_nvm_authenticate_status(struct tb_port *port, u8 index,
  1135. u32 *status);
  1136. int usb4_port_retimer_nvm_read(struct tb_port *port, u8 index,
  1137. unsigned int address, void *buf, size_t size);
  1138. int usb4_usb3_port_max_link_rate(struct tb_port *port);
  1139. int usb4_usb3_port_actual_link_rate(struct tb_port *port);
  1140. int usb4_usb3_port_allocated_bandwidth(struct tb_port *port, int *upstream_bw,
  1141. int *downstream_bw);
  1142. int usb4_usb3_port_allocate_bandwidth(struct tb_port *port, int *upstream_bw,
  1143. int *downstream_bw);
  1144. int usb4_usb3_port_release_bandwidth(struct tb_port *port, int *upstream_bw,
  1145. int *downstream_bw);
  1146. static inline bool tb_is_usb4_port_device(const struct device *dev)
  1147. {
  1148. return dev->type == &usb4_port_device_type;
  1149. }
  1150. static inline struct usb4_port *tb_to_usb4_port_device(struct device *dev)
  1151. {
  1152. if (tb_is_usb4_port_device(dev))
  1153. return container_of(dev, struct usb4_port, dev);
  1154. return NULL;
  1155. }
  1156. struct usb4_port *usb4_port_device_add(struct tb_port *port);
  1157. void usb4_port_device_remove(struct usb4_port *usb4);
  1158. int usb4_port_device_resume(struct usb4_port *usb4);
  1159. void tb_check_quirks(struct tb_switch *sw);
  1160. #ifdef CONFIG_ACPI
  1161. void tb_acpi_add_links(struct tb_nhi *nhi);
  1162. bool tb_acpi_is_native(void);
  1163. bool tb_acpi_may_tunnel_usb3(void);
  1164. bool tb_acpi_may_tunnel_dp(void);
  1165. bool tb_acpi_may_tunnel_pcie(void);
  1166. bool tb_acpi_is_xdomain_allowed(void);
  1167. int tb_acpi_init(void);
  1168. void tb_acpi_exit(void);
  1169. int tb_acpi_power_on_retimers(struct tb_port *port);
  1170. int tb_acpi_power_off_retimers(struct tb_port *port);
  1171. #else
  1172. static inline void tb_acpi_add_links(struct tb_nhi *nhi) { }
  1173. static inline bool tb_acpi_is_native(void) { return true; }
  1174. static inline bool tb_acpi_may_tunnel_usb3(void) { return true; }
  1175. static inline bool tb_acpi_may_tunnel_dp(void) { return true; }
  1176. static inline bool tb_acpi_may_tunnel_pcie(void) { return true; }
  1177. static inline bool tb_acpi_is_xdomain_allowed(void) { return true; }
  1178. static inline int tb_acpi_init(void) { return 0; }
  1179. static inline void tb_acpi_exit(void) { }
  1180. static inline int tb_acpi_power_on_retimers(struct tb_port *port) { return 0; }
  1181. static inline int tb_acpi_power_off_retimers(struct tb_port *port) { return 0; }
  1182. #endif
  1183. #ifdef CONFIG_DEBUG_FS
  1184. void tb_debugfs_init(void);
  1185. void tb_debugfs_exit(void);
  1186. void tb_switch_debugfs_init(struct tb_switch *sw);
  1187. void tb_switch_debugfs_remove(struct tb_switch *sw);
  1188. void tb_xdomain_debugfs_init(struct tb_xdomain *xd);
  1189. void tb_xdomain_debugfs_remove(struct tb_xdomain *xd);
  1190. void tb_service_debugfs_init(struct tb_service *svc);
  1191. void tb_service_debugfs_remove(struct tb_service *svc);
  1192. #else
  1193. static inline void tb_debugfs_init(void) { }
  1194. static inline void tb_debugfs_exit(void) { }
  1195. static inline void tb_switch_debugfs_init(struct tb_switch *sw) { }
  1196. static inline void tb_switch_debugfs_remove(struct tb_switch *sw) { }
  1197. static inline void tb_xdomain_debugfs_init(struct tb_xdomain *xd) { }
  1198. static inline void tb_xdomain_debugfs_remove(struct tb_xdomain *xd) { }
  1199. static inline void tb_service_debugfs_init(struct tb_service *svc) { }
  1200. static inline void tb_service_debugfs_remove(struct tb_service *svc) { }
  1201. #endif
  1202. #endif