drm_legacy.h 9.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331
  1. #ifndef __DRM_DRM_LEGACY_H__
  2. #define __DRM_DRM_LEGACY_H__
  3. /*
  4. * Legacy driver interfaces for the Direct Rendering Manager
  5. *
  6. * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
  7. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
  8. * Copyright (c) 2009-2010, Code Aurora Forum.
  9. * All rights reserved.
  10. * Copyright © 2014 Intel Corporation
  11. * Daniel Vetter <[email protected]>
  12. *
  13. * Author: Rickard E. (Rik) Faith <[email protected]>
  14. * Author: Gareth Hughes <[email protected]>
  15. *
  16. * Permission is hereby granted, free of charge, to any person obtaining a
  17. * copy of this software and associated documentation files (the "Software"),
  18. * to deal in the Software without restriction, including without limitation
  19. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  20. * and/or sell copies of the Software, and to permit persons to whom the
  21. * Software is furnished to do so, subject to the following conditions:
  22. *
  23. * The above copyright notice and this permission notice (including the next
  24. * paragraph) shall be included in all copies or substantial portions of the
  25. * Software.
  26. *
  27. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  28. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  29. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  30. * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  31. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  32. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  33. * OTHER DEALINGS IN THE SOFTWARE.
  34. */
  35. #include <linux/agp_backend.h>
  36. #include <drm/drm.h>
  37. #include <drm/drm_auth.h>
  38. struct drm_device;
  39. struct drm_driver;
  40. struct file;
  41. struct pci_driver;
  42. /*
  43. * Legacy Support for palateontologic DRM drivers
  44. *
  45. * If you add a new driver and it uses any of these functions or structures,
  46. * you're doing it terribly wrong.
  47. */
  48. /*
  49. * Hash-table Support
  50. */
  51. struct drm_hash_item {
  52. struct hlist_node head;
  53. unsigned long key;
  54. };
  55. struct drm_open_hash {
  56. struct hlist_head *table;
  57. u8 order;
  58. };
  59. /**
  60. * DMA buffer.
  61. */
  62. struct drm_buf {
  63. int idx; /**< Index into master buflist */
  64. int total; /**< Buffer size */
  65. int order; /**< log-base-2(total) */
  66. int used; /**< Amount of buffer in use (for DMA) */
  67. unsigned long offset; /**< Byte offset (used internally) */
  68. void *address; /**< Address of buffer */
  69. unsigned long bus_address; /**< Bus address of buffer */
  70. struct drm_buf *next; /**< Kernel-only: used for free list */
  71. __volatile__ int waiting; /**< On kernel DMA queue */
  72. __volatile__ int pending; /**< On hardware DMA queue */
  73. struct drm_file *file_priv; /**< Private of holding file descr */
  74. int context; /**< Kernel queue for this buffer */
  75. int while_locked; /**< Dispatch this buffer while locked */
  76. enum {
  77. DRM_LIST_NONE = 0,
  78. DRM_LIST_FREE = 1,
  79. DRM_LIST_WAIT = 2,
  80. DRM_LIST_PEND = 3,
  81. DRM_LIST_PRIO = 4,
  82. DRM_LIST_RECLAIM = 5
  83. } list; /**< Which list we're on */
  84. int dev_priv_size; /**< Size of buffer private storage */
  85. void *dev_private; /**< Per-buffer private storage */
  86. };
  87. typedef struct drm_dma_handle {
  88. dma_addr_t busaddr;
  89. void *vaddr;
  90. size_t size;
  91. } drm_dma_handle_t;
  92. /**
  93. * Buffer entry. There is one of this for each buffer size order.
  94. */
  95. struct drm_buf_entry {
  96. int buf_size; /**< size */
  97. int buf_count; /**< number of buffers */
  98. struct drm_buf *buflist; /**< buffer list */
  99. int seg_count;
  100. int page_order;
  101. struct drm_dma_handle **seglist;
  102. int low_mark; /**< Low water mark */
  103. int high_mark; /**< High water mark */
  104. };
  105. /**
  106. * DMA data.
  107. */
  108. struct drm_device_dma {
  109. struct drm_buf_entry bufs[DRM_MAX_ORDER + 1]; /**< buffers, grouped by their size order */
  110. int buf_count; /**< total number of buffers */
  111. struct drm_buf **buflist; /**< Vector of pointers into drm_device_dma::bufs */
  112. int seg_count;
  113. int page_count; /**< number of pages */
  114. unsigned long *pagelist; /**< page list */
  115. unsigned long byte_count;
  116. enum {
  117. _DRM_DMA_USE_AGP = 0x01,
  118. _DRM_DMA_USE_SG = 0x02,
  119. _DRM_DMA_USE_FB = 0x04,
  120. _DRM_DMA_USE_PCI_RO = 0x08
  121. } flags;
  122. };
  123. /**
  124. * Scatter-gather memory.
  125. */
  126. struct drm_sg_mem {
  127. unsigned long handle;
  128. void *virtual;
  129. int pages;
  130. struct page **pagelist;
  131. dma_addr_t *busaddr;
  132. };
  133. /**
  134. * Kernel side of a mapping
  135. */
  136. struct drm_local_map {
  137. dma_addr_t offset; /**< Requested physical address (0 for SAREA)*/
  138. unsigned long size; /**< Requested physical size (bytes) */
  139. enum drm_map_type type; /**< Type of memory to map */
  140. enum drm_map_flags flags; /**< Flags */
  141. void *handle; /**< User-space: "Handle" to pass to mmap() */
  142. /**< Kernel-space: kernel-virtual address */
  143. int mtrr; /**< MTRR slot used */
  144. };
  145. typedef struct drm_local_map drm_local_map_t;
  146. /**
  147. * Mappings list
  148. */
  149. struct drm_map_list {
  150. struct list_head head; /**< list head */
  151. struct drm_hash_item hash;
  152. struct drm_local_map *map; /**< mapping */
  153. uint64_t user_token;
  154. struct drm_master *master;
  155. };
  156. int drm_legacy_addmap(struct drm_device *d, resource_size_t offset,
  157. unsigned int size, enum drm_map_type type,
  158. enum drm_map_flags flags, struct drm_local_map **map_p);
  159. struct drm_local_map *drm_legacy_findmap(struct drm_device *dev, unsigned int token);
  160. void drm_legacy_rmmap(struct drm_device *d, struct drm_local_map *map);
  161. int drm_legacy_rmmap_locked(struct drm_device *d, struct drm_local_map *map);
  162. struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev);
  163. int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma);
  164. int drm_legacy_addbufs_agp(struct drm_device *d, struct drm_buf_desc *req);
  165. int drm_legacy_addbufs_pci(struct drm_device *d, struct drm_buf_desc *req);
  166. /**
  167. * Test that the hardware lock is held by the caller, returning otherwise.
  168. *
  169. * \param dev DRM device.
  170. * \param filp file pointer of the caller.
  171. */
  172. #define LOCK_TEST_WITH_RETURN( dev, _file_priv ) \
  173. do { \
  174. if (!_DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock) || \
  175. _file_priv->master->lock.file_priv != _file_priv) { \
  176. DRM_ERROR( "%s called without lock held, held %d owner %p %p\n",\
  177. __func__, _DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock),\
  178. _file_priv->master->lock.file_priv, _file_priv); \
  179. return -EINVAL; \
  180. } \
  181. } while (0)
  182. void drm_legacy_idlelock_take(struct drm_lock_data *lock);
  183. void drm_legacy_idlelock_release(struct drm_lock_data *lock);
  184. /* drm_irq.c */
  185. int drm_legacy_irq_uninstall(struct drm_device *dev);
  186. /* drm_pci.c */
  187. #ifdef CONFIG_PCI
  188. int drm_legacy_pci_init(const struct drm_driver *driver,
  189. struct pci_driver *pdriver);
  190. void drm_legacy_pci_exit(const struct drm_driver *driver,
  191. struct pci_driver *pdriver);
  192. #else
  193. static inline struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev,
  194. size_t size, size_t align)
  195. {
  196. return NULL;
  197. }
  198. static inline void drm_pci_free(struct drm_device *dev,
  199. struct drm_dma_handle *dmah)
  200. {
  201. }
  202. static inline int drm_legacy_pci_init(const struct drm_driver *driver,
  203. struct pci_driver *pdriver)
  204. {
  205. return -EINVAL;
  206. }
  207. static inline void drm_legacy_pci_exit(const struct drm_driver *driver,
  208. struct pci_driver *pdriver)
  209. {
  210. }
  211. #endif
  212. /*
  213. * AGP Support
  214. */
  215. struct drm_agp_head {
  216. struct agp_kern_info agp_info;
  217. struct list_head memory;
  218. unsigned long mode;
  219. struct agp_bridge_data *bridge;
  220. int enabled;
  221. int acquired;
  222. unsigned long base;
  223. int agp_mtrr;
  224. int cant_use_aperture;
  225. unsigned long page_mask;
  226. };
  227. #if IS_ENABLED(CONFIG_DRM_LEGACY) && IS_ENABLED(CONFIG_AGP)
  228. struct drm_agp_head *drm_legacy_agp_init(struct drm_device *dev);
  229. int drm_legacy_agp_acquire(struct drm_device *dev);
  230. int drm_legacy_agp_release(struct drm_device *dev);
  231. int drm_legacy_agp_enable(struct drm_device *dev, struct drm_agp_mode mode);
  232. int drm_legacy_agp_info(struct drm_device *dev, struct drm_agp_info *info);
  233. int drm_legacy_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request);
  234. int drm_legacy_agp_free(struct drm_device *dev, struct drm_agp_buffer *request);
  235. int drm_legacy_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request);
  236. int drm_legacy_agp_bind(struct drm_device *dev, struct drm_agp_binding *request);
  237. #else
  238. static inline struct drm_agp_head *drm_legacy_agp_init(struct drm_device *dev)
  239. {
  240. return NULL;
  241. }
  242. static inline int drm_legacy_agp_acquire(struct drm_device *dev)
  243. {
  244. return -ENODEV;
  245. }
  246. static inline int drm_legacy_agp_release(struct drm_device *dev)
  247. {
  248. return -ENODEV;
  249. }
  250. static inline int drm_legacy_agp_enable(struct drm_device *dev,
  251. struct drm_agp_mode mode)
  252. {
  253. return -ENODEV;
  254. }
  255. static inline int drm_legacy_agp_info(struct drm_device *dev,
  256. struct drm_agp_info *info)
  257. {
  258. return -ENODEV;
  259. }
  260. static inline int drm_legacy_agp_alloc(struct drm_device *dev,
  261. struct drm_agp_buffer *request)
  262. {
  263. return -ENODEV;
  264. }
  265. static inline int drm_legacy_agp_free(struct drm_device *dev,
  266. struct drm_agp_buffer *request)
  267. {
  268. return -ENODEV;
  269. }
  270. static inline int drm_legacy_agp_unbind(struct drm_device *dev,
  271. struct drm_agp_binding *request)
  272. {
  273. return -ENODEV;
  274. }
  275. static inline int drm_legacy_agp_bind(struct drm_device *dev,
  276. struct drm_agp_binding *request)
  277. {
  278. return -ENODEV;
  279. }
  280. #endif
  281. /* drm_memory.c */
  282. void drm_legacy_ioremap(struct drm_local_map *map, struct drm_device *dev);
  283. void drm_legacy_ioremap_wc(struct drm_local_map *map, struct drm_device *dev);
  284. void drm_legacy_ioremapfree(struct drm_local_map *map, struct drm_device *dev);
  285. #endif /* __DRM_DRM_LEGACY_H__ */