host1x.h 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491
  1. /* SPDX-License-Identifier: GPL-2.0-or-later */
  2. /*
  3. * Copyright (c) 2009-2013, NVIDIA Corporation. All rights reserved.
  4. */
  5. #ifndef __LINUX_HOST1X_H
  6. #define __LINUX_HOST1X_H
  7. #include <linux/device.h>
  8. #include <linux/dma-direction.h>
  9. #include <linux/spinlock.h>
  10. #include <linux/types.h>
  11. enum host1x_class {
  12. HOST1X_CLASS_HOST1X = 0x1,
  13. HOST1X_CLASS_GR2D = 0x51,
  14. HOST1X_CLASS_GR2D_SB = 0x52,
  15. HOST1X_CLASS_VIC = 0x5D,
  16. HOST1X_CLASS_GR3D = 0x60,
  17. HOST1X_CLASS_NVDEC = 0xF0,
  18. HOST1X_CLASS_NVDEC1 = 0xF5,
  19. };
  20. struct host1x;
  21. struct host1x_client;
  22. struct iommu_group;
  23. u64 host1x_get_dma_mask(struct host1x *host1x);
  24. /**
  25. * struct host1x_bo_cache - host1x buffer object cache
  26. * @mappings: list of mappings
  27. * @lock: synchronizes accesses to the list of mappings
  28. *
  29. * Note that entries are not periodically evicted from this cache and instead need to be
  30. * explicitly released. This is used primarily for DRM/KMS where the cache's reference is
  31. * released when the last reference to a buffer object represented by a mapping in this
  32. * cache is dropped.
  33. */
  34. struct host1x_bo_cache {
  35. struct list_head mappings;
  36. struct mutex lock;
  37. };
  38. static inline void host1x_bo_cache_init(struct host1x_bo_cache *cache)
  39. {
  40. INIT_LIST_HEAD(&cache->mappings);
  41. mutex_init(&cache->lock);
  42. }
  43. static inline void host1x_bo_cache_destroy(struct host1x_bo_cache *cache)
  44. {
  45. /* XXX warn if not empty? */
  46. mutex_destroy(&cache->lock);
  47. }
  48. /**
  49. * struct host1x_client_ops - host1x client operations
  50. * @early_init: host1x client early initialization code
  51. * @init: host1x client initialization code
  52. * @exit: host1x client tear down code
  53. * @late_exit: host1x client late tear down code
  54. * @suspend: host1x client suspend code
  55. * @resume: host1x client resume code
  56. */
  57. struct host1x_client_ops {
  58. int (*early_init)(struct host1x_client *client);
  59. int (*init)(struct host1x_client *client);
  60. int (*exit)(struct host1x_client *client);
  61. int (*late_exit)(struct host1x_client *client);
  62. int (*suspend)(struct host1x_client *client);
  63. int (*resume)(struct host1x_client *client);
  64. };
  65. /**
  66. * struct host1x_client - host1x client structure
  67. * @list: list node for the host1x client
  68. * @host: pointer to struct device representing the host1x controller
  69. * @dev: pointer to struct device backing this host1x client
  70. * @group: IOMMU group that this client is a member of
  71. * @ops: host1x client operations
  72. * @class: host1x class represented by this client
  73. * @channel: host1x channel associated with this client
  74. * @syncpts: array of syncpoints requested for this client
  75. * @num_syncpts: number of syncpoints requested for this client
  76. * @parent: pointer to parent structure
  77. * @usecount: reference count for this structure
  78. * @lock: mutex for mutually exclusive concurrency
  79. * @cache: host1x buffer object cache
  80. */
  81. struct host1x_client {
  82. struct list_head list;
  83. struct device *host;
  84. struct device *dev;
  85. struct iommu_group *group;
  86. const struct host1x_client_ops *ops;
  87. enum host1x_class class;
  88. struct host1x_channel *channel;
  89. struct host1x_syncpt **syncpts;
  90. unsigned int num_syncpts;
  91. struct host1x_client *parent;
  92. unsigned int usecount;
  93. struct mutex lock;
  94. struct host1x_bo_cache cache;
  95. };
  96. /*
  97. * host1x buffer objects
  98. */
  99. struct host1x_bo;
  100. struct sg_table;
  101. struct host1x_bo_mapping {
  102. struct kref ref;
  103. struct dma_buf_attachment *attach;
  104. enum dma_data_direction direction;
  105. struct list_head list;
  106. struct host1x_bo *bo;
  107. struct sg_table *sgt;
  108. unsigned int chunks;
  109. struct device *dev;
  110. dma_addr_t phys;
  111. size_t size;
  112. struct host1x_bo_cache *cache;
  113. struct list_head entry;
  114. };
  115. static inline struct host1x_bo_mapping *to_host1x_bo_mapping(struct kref *ref)
  116. {
  117. return container_of(ref, struct host1x_bo_mapping, ref);
  118. }
  119. struct host1x_bo_ops {
  120. struct host1x_bo *(*get)(struct host1x_bo *bo);
  121. void (*put)(struct host1x_bo *bo);
  122. struct host1x_bo_mapping *(*pin)(struct device *dev, struct host1x_bo *bo,
  123. enum dma_data_direction dir);
  124. void (*unpin)(struct host1x_bo_mapping *map);
  125. void *(*mmap)(struct host1x_bo *bo);
  126. void (*munmap)(struct host1x_bo *bo, void *addr);
  127. };
  128. struct host1x_bo {
  129. const struct host1x_bo_ops *ops;
  130. struct list_head mappings;
  131. spinlock_t lock;
  132. };
  133. static inline void host1x_bo_init(struct host1x_bo *bo,
  134. const struct host1x_bo_ops *ops)
  135. {
  136. INIT_LIST_HEAD(&bo->mappings);
  137. spin_lock_init(&bo->lock);
  138. bo->ops = ops;
  139. }
  140. static inline struct host1x_bo *host1x_bo_get(struct host1x_bo *bo)
  141. {
  142. return bo->ops->get(bo);
  143. }
  144. static inline void host1x_bo_put(struct host1x_bo *bo)
  145. {
  146. bo->ops->put(bo);
  147. }
  148. struct host1x_bo_mapping *host1x_bo_pin(struct device *dev, struct host1x_bo *bo,
  149. enum dma_data_direction dir,
  150. struct host1x_bo_cache *cache);
  151. void host1x_bo_unpin(struct host1x_bo_mapping *map);
  152. static inline void *host1x_bo_mmap(struct host1x_bo *bo)
  153. {
  154. return bo->ops->mmap(bo);
  155. }
  156. static inline void host1x_bo_munmap(struct host1x_bo *bo, void *addr)
  157. {
  158. bo->ops->munmap(bo, addr);
  159. }
  160. /*
  161. * host1x syncpoints
  162. */
  163. #define HOST1X_SYNCPT_CLIENT_MANAGED (1 << 0)
  164. #define HOST1X_SYNCPT_HAS_BASE (1 << 1)
  165. struct host1x_syncpt_base;
  166. struct host1x_syncpt;
  167. struct host1x;
  168. struct host1x_syncpt *host1x_syncpt_get_by_id(struct host1x *host, u32 id);
  169. struct host1x_syncpt *host1x_syncpt_get_by_id_noref(struct host1x *host, u32 id);
  170. struct host1x_syncpt *host1x_syncpt_get(struct host1x_syncpt *sp);
  171. u32 host1x_syncpt_id(struct host1x_syncpt *sp);
  172. u32 host1x_syncpt_read_min(struct host1x_syncpt *sp);
  173. u32 host1x_syncpt_read_max(struct host1x_syncpt *sp);
  174. u32 host1x_syncpt_read(struct host1x_syncpt *sp);
  175. int host1x_syncpt_incr(struct host1x_syncpt *sp);
  176. u32 host1x_syncpt_incr_max(struct host1x_syncpt *sp, u32 incrs);
  177. int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
  178. u32 *value);
  179. struct host1x_syncpt *host1x_syncpt_request(struct host1x_client *client,
  180. unsigned long flags);
  181. void host1x_syncpt_put(struct host1x_syncpt *sp);
  182. struct host1x_syncpt *host1x_syncpt_alloc(struct host1x *host,
  183. unsigned long flags,
  184. const char *name);
  185. struct host1x_syncpt_base *host1x_syncpt_get_base(struct host1x_syncpt *sp);
  186. u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base);
  187. void host1x_syncpt_release_vblank_reservation(struct host1x_client *client,
  188. u32 syncpt_id);
  189. struct dma_fence *host1x_fence_create(struct host1x_syncpt *sp, u32 threshold);
  190. /*
  191. * host1x channel
  192. */
  193. struct host1x_channel;
  194. struct host1x_job;
  195. struct host1x_channel *host1x_channel_request(struct host1x_client *client);
  196. struct host1x_channel *host1x_channel_get(struct host1x_channel *channel);
  197. void host1x_channel_stop(struct host1x_channel *channel);
  198. void host1x_channel_put(struct host1x_channel *channel);
  199. int host1x_job_submit(struct host1x_job *job);
  200. /*
  201. * host1x job
  202. */
  203. #define HOST1X_RELOC_READ (1 << 0)
  204. #define HOST1X_RELOC_WRITE (1 << 1)
  205. struct host1x_reloc {
  206. struct {
  207. struct host1x_bo *bo;
  208. unsigned long offset;
  209. } cmdbuf;
  210. struct {
  211. struct host1x_bo *bo;
  212. unsigned long offset;
  213. } target;
  214. unsigned long shift;
  215. unsigned long flags;
  216. };
  217. struct host1x_job {
  218. /* When refcount goes to zero, job can be freed */
  219. struct kref ref;
  220. /* List entry */
  221. struct list_head list;
  222. /* Channel where job is submitted to */
  223. struct host1x_channel *channel;
  224. /* client where the job originated */
  225. struct host1x_client *client;
  226. /* Gathers and their memory */
  227. struct host1x_job_cmd *cmds;
  228. unsigned int num_cmds;
  229. /* Array of handles to be pinned & unpinned */
  230. struct host1x_reloc *relocs;
  231. unsigned int num_relocs;
  232. struct host1x_job_unpin_data *unpins;
  233. unsigned int num_unpins;
  234. dma_addr_t *addr_phys;
  235. dma_addr_t *gather_addr_phys;
  236. dma_addr_t *reloc_addr_phys;
  237. /* Sync point id, number of increments and end related to the submit */
  238. struct host1x_syncpt *syncpt;
  239. u32 syncpt_incrs;
  240. u32 syncpt_end;
  241. /* Completion waiter ref */
  242. void *waiter;
  243. /* Maximum time to wait for this job */
  244. unsigned int timeout;
  245. /* Job has timed out and should be released */
  246. bool cancelled;
  247. /* Index and number of slots used in the push buffer */
  248. unsigned int first_get;
  249. unsigned int num_slots;
  250. /* Copy of gathers */
  251. size_t gather_copy_size;
  252. dma_addr_t gather_copy;
  253. u8 *gather_copy_mapped;
  254. /* Check if register is marked as an address reg */
  255. int (*is_addr_reg)(struct device *dev, u32 class, u32 reg);
  256. /* Check if class belongs to the unit */
  257. int (*is_valid_class)(u32 class);
  258. /* Request a SETCLASS to this class */
  259. u32 class;
  260. /* Add a channel wait for previous ops to complete */
  261. bool serialize;
  262. /* Fast-forward syncpoint increments on job timeout */
  263. bool syncpt_recovery;
  264. /* Callback called when job is freed */
  265. void (*release)(struct host1x_job *job);
  266. void *user_data;
  267. /* Whether host1x-side firewall should be ran for this job or not */
  268. bool enable_firewall;
  269. /* Options for configuring engine data stream ID */
  270. /* Context device to use for job */
  271. struct host1x_memory_context *memory_context;
  272. /* Stream ID to use if context isolation is disabled (!memory_context) */
  273. u32 engine_fallback_streamid;
  274. /* Engine offset to program stream ID to */
  275. u32 engine_streamid_offset;
  276. };
  277. struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
  278. u32 num_cmdbufs, u32 num_relocs,
  279. bool skip_firewall);
  280. void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo,
  281. unsigned int words, unsigned int offset);
  282. void host1x_job_add_wait(struct host1x_job *job, u32 id, u32 thresh,
  283. bool relative, u32 next_class);
  284. struct host1x_job *host1x_job_get(struct host1x_job *job);
  285. void host1x_job_put(struct host1x_job *job);
  286. int host1x_job_pin(struct host1x_job *job, struct device *dev);
  287. void host1x_job_unpin(struct host1x_job *job);
  288. /*
  289. * subdevice probe infrastructure
  290. */
  291. struct host1x_device;
  292. /**
  293. * struct host1x_driver - host1x logical device driver
  294. * @driver: core driver
  295. * @subdevs: table of OF device IDs matching subdevices for this driver
  296. * @list: list node for the driver
  297. * @probe: called when the host1x logical device is probed
  298. * @remove: called when the host1x logical device is removed
  299. * @shutdown: called when the host1x logical device is shut down
  300. */
  301. struct host1x_driver {
  302. struct device_driver driver;
  303. const struct of_device_id *subdevs;
  304. struct list_head list;
  305. int (*probe)(struct host1x_device *device);
  306. int (*remove)(struct host1x_device *device);
  307. void (*shutdown)(struct host1x_device *device);
  308. };
  309. static inline struct host1x_driver *
  310. to_host1x_driver(struct device_driver *driver)
  311. {
  312. return container_of(driver, struct host1x_driver, driver);
  313. }
  314. int host1x_driver_register_full(struct host1x_driver *driver,
  315. struct module *owner);
  316. void host1x_driver_unregister(struct host1x_driver *driver);
  317. #define host1x_driver_register(driver) \
  318. host1x_driver_register_full(driver, THIS_MODULE)
  319. struct host1x_device {
  320. struct host1x_driver *driver;
  321. struct list_head list;
  322. struct device dev;
  323. struct mutex subdevs_lock;
  324. struct list_head subdevs;
  325. struct list_head active;
  326. struct mutex clients_lock;
  327. struct list_head clients;
  328. bool registered;
  329. struct device_dma_parameters dma_parms;
  330. };
  331. static inline struct host1x_device *to_host1x_device(struct device *dev)
  332. {
  333. return container_of(dev, struct host1x_device, dev);
  334. }
  335. int host1x_device_init(struct host1x_device *device);
  336. int host1x_device_exit(struct host1x_device *device);
  337. void __host1x_client_init(struct host1x_client *client, struct lock_class_key *key);
  338. void host1x_client_exit(struct host1x_client *client);
  339. #define host1x_client_init(client) \
  340. ({ \
  341. static struct lock_class_key __key; \
  342. __host1x_client_init(client, &__key); \
  343. })
  344. int __host1x_client_register(struct host1x_client *client);
  345. /*
  346. * Note that this wrapper calls __host1x_client_init() for compatibility
  347. * with existing callers. Callers that want to separately initialize and
  348. * register a host1x client must first initialize using either of the
  349. * __host1x_client_init() or host1x_client_init() functions and then use
  350. * the low-level __host1x_client_register() function to avoid the client
  351. * getting reinitialized.
  352. */
  353. #define host1x_client_register(client) \
  354. ({ \
  355. static struct lock_class_key __key; \
  356. __host1x_client_init(client, &__key); \
  357. __host1x_client_register(client); \
  358. })
  359. int host1x_client_unregister(struct host1x_client *client);
  360. int host1x_client_suspend(struct host1x_client *client);
  361. int host1x_client_resume(struct host1x_client *client);
  362. struct tegra_mipi_device;
  363. struct tegra_mipi_device *tegra_mipi_request(struct device *device,
  364. struct device_node *np);
  365. void tegra_mipi_free(struct tegra_mipi_device *device);
  366. int tegra_mipi_enable(struct tegra_mipi_device *device);
  367. int tegra_mipi_disable(struct tegra_mipi_device *device);
  368. int tegra_mipi_start_calibration(struct tegra_mipi_device *device);
  369. int tegra_mipi_finish_calibration(struct tegra_mipi_device *device);
  370. /* host1x memory contexts */
  371. struct host1x_memory_context {
  372. struct host1x *host;
  373. refcount_t ref;
  374. struct pid *owner;
  375. struct device dev;
  376. u64 dma_mask;
  377. u32 stream_id;
  378. };
  379. #ifdef CONFIG_IOMMU_API
  380. struct host1x_memory_context *host1x_memory_context_alloc(struct host1x *host1x,
  381. struct pid *pid);
  382. void host1x_memory_context_get(struct host1x_memory_context *cd);
  383. void host1x_memory_context_put(struct host1x_memory_context *cd);
  384. #else
  385. static inline struct host1x_memory_context *host1x_memory_context_alloc(struct host1x *host1x,
  386. struct pid *pid)
  387. {
  388. return NULL;
  389. }
  390. static inline void host1x_memory_context_get(struct host1x_memory_context *cd)
  391. {
  392. }
  393. static inline void host1x_memory_context_put(struct host1x_memory_context *cd)
  394. {
  395. }
  396. #endif
  397. #endif