firewire.h 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _LINUX_FIREWIRE_H
  3. #define _LINUX_FIREWIRE_H
  4. #include <linux/completion.h>
  5. #include <linux/device.h>
  6. #include <linux/dma-mapping.h>
  7. #include <linux/kernel.h>
  8. #include <linux/kref.h>
  9. #include <linux/list.h>
  10. #include <linux/mutex.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/sysfs.h>
  13. #include <linux/timer.h>
  14. #include <linux/types.h>
  15. #include <linux/workqueue.h>
  16. #include <linux/atomic.h>
  17. #include <asm/byteorder.h>
  18. #define CSR_REGISTER_BASE 0xfffff0000000ULL
  19. /* register offsets are relative to CSR_REGISTER_BASE */
  20. #define CSR_STATE_CLEAR 0x0
  21. #define CSR_STATE_SET 0x4
  22. #define CSR_NODE_IDS 0x8
  23. #define CSR_RESET_START 0xc
  24. #define CSR_SPLIT_TIMEOUT_HI 0x18
  25. #define CSR_SPLIT_TIMEOUT_LO 0x1c
  26. #define CSR_CYCLE_TIME 0x200
  27. #define CSR_BUS_TIME 0x204
  28. #define CSR_BUSY_TIMEOUT 0x210
  29. #define CSR_PRIORITY_BUDGET 0x218
  30. #define CSR_BUS_MANAGER_ID 0x21c
  31. #define CSR_BANDWIDTH_AVAILABLE 0x220
  32. #define CSR_CHANNELS_AVAILABLE 0x224
  33. #define CSR_CHANNELS_AVAILABLE_HI 0x224
  34. #define CSR_CHANNELS_AVAILABLE_LO 0x228
  35. #define CSR_MAINT_UTILITY 0x230
  36. #define CSR_BROADCAST_CHANNEL 0x234
  37. #define CSR_CONFIG_ROM 0x400
  38. #define CSR_CONFIG_ROM_END 0x800
  39. #define CSR_OMPR 0x900
  40. #define CSR_OPCR(i) (0x904 + (i) * 4)
  41. #define CSR_IMPR 0x980
  42. #define CSR_IPCR(i) (0x984 + (i) * 4)
  43. #define CSR_FCP_COMMAND 0xB00
  44. #define CSR_FCP_RESPONSE 0xD00
  45. #define CSR_FCP_END 0xF00
  46. #define CSR_TOPOLOGY_MAP 0x1000
  47. #define CSR_TOPOLOGY_MAP_END 0x1400
  48. #define CSR_SPEED_MAP 0x2000
  49. #define CSR_SPEED_MAP_END 0x3000
  50. #define CSR_OFFSET 0x40
  51. #define CSR_LEAF 0x80
  52. #define CSR_DIRECTORY 0xc0
  53. #define CSR_DESCRIPTOR 0x01
  54. #define CSR_VENDOR 0x03
  55. #define CSR_HARDWARE_VERSION 0x04
  56. #define CSR_UNIT 0x11
  57. #define CSR_SPECIFIER_ID 0x12
  58. #define CSR_VERSION 0x13
  59. #define CSR_DEPENDENT_INFO 0x14
  60. #define CSR_MODEL 0x17
  61. #define CSR_DIRECTORY_ID 0x20
  62. struct fw_csr_iterator {
  63. const u32 *p;
  64. const u32 *end;
  65. };
  66. void fw_csr_iterator_init(struct fw_csr_iterator *ci, const u32 *p);
  67. int fw_csr_iterator_next(struct fw_csr_iterator *ci, int *key, int *value);
  68. int fw_csr_string(const u32 *directory, int key, char *buf, size_t size);
  69. extern struct bus_type fw_bus_type;
  70. struct fw_card_driver;
  71. struct fw_node;
  72. struct fw_card {
  73. const struct fw_card_driver *driver;
  74. struct device *device;
  75. struct kref kref;
  76. struct completion done;
  77. int node_id;
  78. int generation;
  79. int current_tlabel;
  80. u64 tlabel_mask;
  81. struct list_head transaction_list;
  82. u64 reset_jiffies;
  83. u32 split_timeout_hi;
  84. u32 split_timeout_lo;
  85. unsigned int split_timeout_cycles;
  86. unsigned int split_timeout_jiffies;
  87. unsigned long long guid;
  88. unsigned max_receive;
  89. int link_speed;
  90. int config_rom_generation;
  91. spinlock_t lock; /* Take this lock when handling the lists in
  92. * this struct. */
  93. struct fw_node *local_node;
  94. struct fw_node *root_node;
  95. struct fw_node *irm_node;
  96. u8 color; /* must be u8 to match the definition in struct fw_node */
  97. int gap_count;
  98. bool beta_repeaters_present;
  99. int index;
  100. struct list_head link;
  101. struct list_head phy_receiver_list;
  102. struct delayed_work br_work; /* bus reset job */
  103. bool br_short;
  104. struct delayed_work bm_work; /* bus manager job */
  105. int bm_retries;
  106. int bm_generation;
  107. int bm_node_id;
  108. bool bm_abdicate;
  109. bool priority_budget_implemented; /* controller feature */
  110. bool broadcast_channel_auto_allocated; /* controller feature */
  111. bool broadcast_channel_allocated;
  112. u32 broadcast_channel;
  113. __be32 topology_map[(CSR_TOPOLOGY_MAP_END - CSR_TOPOLOGY_MAP) / 4];
  114. __be32 maint_utility_register;
  115. };
  116. static inline struct fw_card *fw_card_get(struct fw_card *card)
  117. {
  118. kref_get(&card->kref);
  119. return card;
  120. }
  121. void fw_card_release(struct kref *kref);
  122. static inline void fw_card_put(struct fw_card *card)
  123. {
  124. kref_put(&card->kref, fw_card_release);
  125. }
  126. int fw_card_read_cycle_time(struct fw_card *card, u32 *cycle_time);
  127. struct fw_attribute_group {
  128. struct attribute_group *groups[2];
  129. struct attribute_group group;
  130. struct attribute *attrs[13];
  131. };
  132. enum fw_device_state {
  133. FW_DEVICE_INITIALIZING,
  134. FW_DEVICE_RUNNING,
  135. FW_DEVICE_GONE,
  136. FW_DEVICE_SHUTDOWN,
  137. };
  138. /*
  139. * Note, fw_device.generation always has to be read before fw_device.node_id.
  140. * Use SMP memory barriers to ensure this. Otherwise requests will be sent
  141. * to an outdated node_id if the generation was updated in the meantime due
  142. * to a bus reset.
  143. *
  144. * Likewise, fw-core will take care to update .node_id before .generation so
  145. * that whenever fw_device.generation is current WRT the actual bus generation,
  146. * fw_device.node_id is guaranteed to be current too.
  147. *
  148. * The same applies to fw_device.card->node_id vs. fw_device.generation.
  149. *
  150. * fw_device.config_rom and fw_device.config_rom_length may be accessed during
  151. * the lifetime of any fw_unit belonging to the fw_device, before device_del()
  152. * was called on the last fw_unit. Alternatively, they may be accessed while
  153. * holding fw_device_rwsem.
  154. */
  155. struct fw_device {
  156. atomic_t state;
  157. struct fw_node *node;
  158. int node_id;
  159. int generation;
  160. unsigned max_speed;
  161. struct fw_card *card;
  162. struct device device;
  163. struct mutex client_list_mutex;
  164. struct list_head client_list;
  165. const u32 *config_rom;
  166. size_t config_rom_length;
  167. int config_rom_retries;
  168. unsigned is_local:1;
  169. unsigned max_rec:4;
  170. unsigned cmc:1;
  171. unsigned irmc:1;
  172. unsigned bc_implemented:2;
  173. work_func_t workfn;
  174. struct delayed_work work;
  175. struct fw_attribute_group attribute_group;
  176. };
  177. static inline struct fw_device *fw_device(struct device *dev)
  178. {
  179. return container_of(dev, struct fw_device, device);
  180. }
  181. static inline int fw_device_is_shutdown(struct fw_device *device)
  182. {
  183. return atomic_read(&device->state) == FW_DEVICE_SHUTDOWN;
  184. }
  185. int fw_device_enable_phys_dma(struct fw_device *device);
  186. /*
  187. * fw_unit.directory must not be accessed after device_del(&fw_unit.device).
  188. */
  189. struct fw_unit {
  190. struct device device;
  191. const u32 *directory;
  192. struct fw_attribute_group attribute_group;
  193. };
  194. static inline struct fw_unit *fw_unit(struct device *dev)
  195. {
  196. return container_of(dev, struct fw_unit, device);
  197. }
  198. static inline struct fw_unit *fw_unit_get(struct fw_unit *unit)
  199. {
  200. get_device(&unit->device);
  201. return unit;
  202. }
  203. static inline void fw_unit_put(struct fw_unit *unit)
  204. {
  205. put_device(&unit->device);
  206. }
  207. static inline struct fw_device *fw_parent_device(struct fw_unit *unit)
  208. {
  209. return fw_device(unit->device.parent);
  210. }
  211. struct ieee1394_device_id;
  212. struct fw_driver {
  213. struct device_driver driver;
  214. int (*probe)(struct fw_unit *unit, const struct ieee1394_device_id *id);
  215. /* Called when the parent device sits through a bus reset. */
  216. void (*update)(struct fw_unit *unit);
  217. void (*remove)(struct fw_unit *unit);
  218. const struct ieee1394_device_id *id_table;
  219. };
  220. struct fw_packet;
  221. struct fw_request;
  222. typedef void (*fw_packet_callback_t)(struct fw_packet *packet,
  223. struct fw_card *card, int status);
  224. typedef void (*fw_transaction_callback_t)(struct fw_card *card, int rcode,
  225. void *data, size_t length,
  226. void *callback_data);
  227. /*
  228. * This callback handles an inbound request subaction. It is called in
  229. * RCU read-side context, therefore must not sleep.
  230. *
  231. * The callback should not initiate outbound request subactions directly.
  232. * Otherwise there is a danger of recursion of inbound and outbound
  233. * transactions from and to the local node.
  234. *
  235. * The callback is responsible that either fw_send_response() or kfree()
  236. * is called on the @request, except for FCP registers for which the core
  237. * takes care of that.
  238. */
  239. typedef void (*fw_address_callback_t)(struct fw_card *card,
  240. struct fw_request *request,
  241. int tcode, int destination, int source,
  242. int generation,
  243. unsigned long long offset,
  244. void *data, size_t length,
  245. void *callback_data);
  246. struct fw_packet {
  247. int speed;
  248. int generation;
  249. u32 header[4];
  250. size_t header_length;
  251. void *payload;
  252. size_t payload_length;
  253. dma_addr_t payload_bus;
  254. bool payload_mapped;
  255. u32 timestamp;
  256. /*
  257. * This callback is called when the packet transmission has completed.
  258. * For successful transmission, the status code is the ack received
  259. * from the destination. Otherwise it is one of the juju-specific
  260. * rcodes: RCODE_SEND_ERROR, _CANCELLED, _BUSY, _GENERATION, _NO_ACK.
  261. * The callback can be called from tasklet context and thus
  262. * must never block.
  263. */
  264. fw_packet_callback_t callback;
  265. int ack;
  266. struct list_head link;
  267. void *driver_data;
  268. };
  269. struct fw_transaction {
  270. int node_id; /* The generation is implied; it is always the current. */
  271. int tlabel;
  272. struct list_head link;
  273. struct fw_card *card;
  274. bool is_split_transaction;
  275. struct timer_list split_timeout_timer;
  276. struct fw_packet packet;
  277. /*
  278. * The data passed to the callback is valid only during the
  279. * callback.
  280. */
  281. fw_transaction_callback_t callback;
  282. void *callback_data;
  283. };
  284. struct fw_address_handler {
  285. u64 offset;
  286. u64 length;
  287. fw_address_callback_t address_callback;
  288. void *callback_data;
  289. struct list_head link;
  290. };
  291. struct fw_address_region {
  292. u64 start;
  293. u64 end;
  294. };
  295. extern const struct fw_address_region fw_high_memory_region;
  296. int fw_core_add_address_handler(struct fw_address_handler *handler,
  297. const struct fw_address_region *region);
  298. void fw_core_remove_address_handler(struct fw_address_handler *handler);
  299. void fw_send_response(struct fw_card *card,
  300. struct fw_request *request, int rcode);
  301. int fw_get_request_speed(struct fw_request *request);
  302. u32 fw_request_get_timestamp(const struct fw_request *request);
  303. void fw_send_request(struct fw_card *card, struct fw_transaction *t,
  304. int tcode, int destination_id, int generation, int speed,
  305. unsigned long long offset, void *payload, size_t length,
  306. fw_transaction_callback_t callback, void *callback_data);
  307. int fw_cancel_transaction(struct fw_card *card,
  308. struct fw_transaction *transaction);
  309. int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
  310. int generation, int speed, unsigned long long offset,
  311. void *payload, size_t length);
  312. const char *fw_rcode_string(int rcode);
  313. static inline int fw_stream_packet_destination_id(int tag, int channel, int sy)
  314. {
  315. return tag << 14 | channel << 8 | sy;
  316. }
  317. void fw_schedule_bus_reset(struct fw_card *card, bool delayed,
  318. bool short_reset);
  319. struct fw_descriptor {
  320. struct list_head link;
  321. size_t length;
  322. u32 immediate;
  323. u32 key;
  324. const u32 *data;
  325. };
  326. int fw_core_add_descriptor(struct fw_descriptor *desc);
  327. void fw_core_remove_descriptor(struct fw_descriptor *desc);
  328. /*
  329. * The iso packet format allows for an immediate header/payload part
  330. * stored in 'header' immediately after the packet info plus an
  331. * indirect payload part that is pointer to by the 'payload' field.
  332. * Applications can use one or the other or both to implement simple
  333. * low-bandwidth streaming (e.g. audio) or more advanced
  334. * scatter-gather streaming (e.g. assembling video frame automatically).
  335. */
  336. struct fw_iso_packet {
  337. u16 payload_length; /* Length of indirect payload */
  338. u32 interrupt:1; /* Generate interrupt on this packet */
  339. u32 skip:1; /* tx: Set to not send packet at all */
  340. /* rx: Sync bit, wait for matching sy */
  341. u32 tag:2; /* tx: Tag in packet header */
  342. u32 sy:4; /* tx: Sy in packet header */
  343. u32 header_length:8; /* Length of immediate header */
  344. u32 header[0]; /* tx: Top of 1394 isoch. data_block */
  345. };
  346. #define FW_ISO_CONTEXT_TRANSMIT 0
  347. #define FW_ISO_CONTEXT_RECEIVE 1
  348. #define FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL 2
  349. #define FW_ISO_CONTEXT_MATCH_TAG0 1
  350. #define FW_ISO_CONTEXT_MATCH_TAG1 2
  351. #define FW_ISO_CONTEXT_MATCH_TAG2 4
  352. #define FW_ISO_CONTEXT_MATCH_TAG3 8
  353. #define FW_ISO_CONTEXT_MATCH_ALL_TAGS 15
  354. /*
  355. * An iso buffer is just a set of pages mapped for DMA in the
  356. * specified direction. Since the pages are to be used for DMA, they
  357. * are not mapped into the kernel virtual address space. We store the
  358. * DMA address in the page private. The helper function
  359. * fw_iso_buffer_map() will map the pages into a given vma.
  360. */
  361. struct fw_iso_buffer {
  362. enum dma_data_direction direction;
  363. struct page **pages;
  364. int page_count;
  365. int page_count_mapped;
  366. };
  367. int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
  368. int page_count, enum dma_data_direction direction);
  369. void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card);
  370. size_t fw_iso_buffer_lookup(struct fw_iso_buffer *buffer, dma_addr_t completed);
  371. struct fw_iso_context;
  372. typedef void (*fw_iso_callback_t)(struct fw_iso_context *context,
  373. u32 cycle, size_t header_length,
  374. void *header, void *data);
  375. typedef void (*fw_iso_mc_callback_t)(struct fw_iso_context *context,
  376. dma_addr_t completed, void *data);
  377. union fw_iso_callback {
  378. fw_iso_callback_t sc;
  379. fw_iso_mc_callback_t mc;
  380. };
  381. struct fw_iso_context {
  382. struct fw_card *card;
  383. int type;
  384. int channel;
  385. int speed;
  386. bool drop_overflow_headers;
  387. size_t header_size;
  388. union fw_iso_callback callback;
  389. void *callback_data;
  390. };
  391. struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
  392. int type, int channel, int speed, size_t header_size,
  393. fw_iso_callback_t callback, void *callback_data);
  394. int fw_iso_context_set_channels(struct fw_iso_context *ctx, u64 *channels);
  395. int fw_iso_context_queue(struct fw_iso_context *ctx,
  396. struct fw_iso_packet *packet,
  397. struct fw_iso_buffer *buffer,
  398. unsigned long payload);
  399. void fw_iso_context_queue_flush(struct fw_iso_context *ctx);
  400. int fw_iso_context_flush_completions(struct fw_iso_context *ctx);
  401. int fw_iso_context_start(struct fw_iso_context *ctx,
  402. int cycle, int sync, int tags);
  403. int fw_iso_context_stop(struct fw_iso_context *ctx);
  404. void fw_iso_context_destroy(struct fw_iso_context *ctx);
  405. void fw_iso_resource_manage(struct fw_card *card, int generation,
  406. u64 channels_mask, int *channel, int *bandwidth,
  407. bool allocate);
  408. extern struct workqueue_struct *fw_workqueue;
  409. #endif /* _LINUX_FIREWIRE_H */