kgsl.h 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright (c) 2008-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #ifndef __KGSL_H
  7. #define __KGSL_H
  8. #include <linux/cdev.h>
  9. #include <linux/compat.h>
  10. #include <linux/interrupt.h>
  11. #include <linux/kthread.h>
  12. #include <linux/mm.h>
  13. #include <uapi/linux/msm_kgsl.h>
  14. #include <linux/uaccess.h>
  15. #include <linux/version.h>
  16. #include "kgsl_gmu_core.h"
  17. #include "kgsl_pwrscale.h"
  18. #define KGSL_L3_DEVICE "kgsl-l3"
  19. #if (KERNEL_VERSION(6, 1, 0) > LINUX_VERSION_CODE)
  20. #include <soc/qcom/boot_stats.h>
  21. #define KGSL_BOOT_MARKER(str) place_marker("M - DRIVER " str)
  22. #else
  23. #define KGSL_BOOT_MARKER(str) pr_info("boot_kpi: M - DRIVER " str)
  24. #endif
  25. /*
  26. * --- kgsl drawobj flags ---
  27. * These flags are same as --- drawobj flags ---
  28. * but renamed to reflect that cmdbatch is renamed to drawobj.
  29. */
  30. #define KGSL_DRAWOBJ_MEMLIST KGSL_CMDBATCH_MEMLIST
  31. #define KGSL_DRAWOBJ_MARKER KGSL_CMDBATCH_MARKER
  32. #define KGSL_DRAWOBJ_SUBMIT_IB_LIST KGSL_CMDBATCH_SUBMIT_IB_LIST
  33. #define KGSL_DRAWOBJ_CTX_SWITCH KGSL_CMDBATCH_CTX_SWITCH
  34. #define KGSL_DRAWOBJ_PROFILING KGSL_CMDBATCH_PROFILING
  35. #define KGSL_DRAWOBJ_PROFILING_KTIME KGSL_CMDBATCH_PROFILING_KTIME
  36. #define KGSL_DRAWOBJ_END_OF_FRAME KGSL_CMDBATCH_END_OF_FRAME
  37. #define KGSL_DRAWOBJ_SYNC KGSL_CMDBATCH_SYNC
  38. #define KGSL_DRAWOBJ_PWR_CONSTRAINT KGSL_CMDBATCH_PWR_CONSTRAINT
  39. #define KGSL_DRAWOBJ_START_RECURRING KGSL_CMDBATCH_START_RECURRING
  40. #define KGSL_DRAWOBJ_STOP_RECURRING KGSL_CMDBATCH_STOP_RECURRING
  41. #define kgsl_drawobj_profiling_buffer kgsl_cmdbatch_profiling_buffer
  42. /* The number of memstore arrays limits the number of contexts allowed.
  43. * If more contexts are needed, update multiple for MEMSTORE_SIZE
  44. */
  45. #define KGSL_MEMSTORE_SIZE ((int)(PAGE_SIZE * 8))
  46. #define KGSL_MEMSTORE_GLOBAL (0)
  47. #define KGSL_PRIORITY_MAX_RB_LEVELS 4
  48. #define KGSL_LPAC_RB_ID KGSL_PRIORITY_MAX_RB_LEVELS
  49. /* Subtract one for LPAC */
  50. #define KGSL_MEMSTORE_MAX (KGSL_MEMSTORE_SIZE / \
  51. sizeof(struct kgsl_devmemstore) - 2 - KGSL_PRIORITY_MAX_RB_LEVELS)
  52. #define KGSL_MAX_CONTEXTS_PER_PROC 200
  53. #define MEMSTORE_RB_OFFSET(rb, field) \
  54. KGSL_MEMSTORE_OFFSET(((rb)->id + KGSL_MEMSTORE_MAX), field)
  55. #define MEMSTORE_ID_GPU_ADDR(dev, iter, field) \
  56. ((dev)->memstore->gpuaddr + KGSL_MEMSTORE_OFFSET(iter, field))
  57. #define MEMSTORE_RB_GPU_ADDR(dev, rb, field) \
  58. ((dev)->memstore->gpuaddr + \
  59. KGSL_MEMSTORE_OFFSET(((rb)->id + KGSL_MEMSTORE_MAX), field))
  60. #define KGSL_CONTEXT_PRIORITY_HIGH 0
  61. /* Last context id is reserved for global context */
  62. #define KGSL_GLOBAL_CTXT_ID (KGSL_MEMSTORE_MAX - 1)
  63. /*
  64. * SCRATCH MEMORY: The scratch memory is one page worth of data that
  65. * is mapped into the GPU. This allows for some 'shared' data between
  66. * the GPU and CPU. For example, it will be used by the GPU to write
  67. * each updated RPTR for each RB.
  68. */
  69. /* Shadow global helpers */
  70. struct adreno_rb_shadow {
  71. /** @rptr: per ringbuffer address where GPU writes the rptr */
  72. u32 rptr;
  73. /** @bv_rptr: per ringbuffer address where GPU writes BV rptr */
  74. u32 bv_rptr;
  75. /** @bv_ts: per ringbuffer address where BV ringbuffer timestamp is written to */
  76. u32 bv_ts;
  77. /** @current_rb_ptname: The current pagetable active on the given RB */
  78. u32 current_rb_ptname;
  79. /** @ttbr0: value to program into TTBR0 during pagetable switch */
  80. u64 ttbr0;
  81. /** @contextidr: value to program into CONTEXTIDR during pagetable switch */
  82. u32 contextidr;
  83. };
  84. /**
  85. * struct gpu_work_period - App specific GPU work period stats
  86. */
  87. struct gpu_work_period {
  88. struct kref refcount;
  89. struct list_head list;
  90. /** @uid: application unique identifier */
  91. uid_t uid;
  92. /** @active: Total amount of time the GPU spent running work */
  93. u64 active;
  94. /** @cmds: Total number of commands completed within work period */
  95. u32 cmds;
  96. /** @frames: Total number of frames completed within work period */
  97. atomic_t frames;
  98. /** @flags: Flags to accumulate GPU busy stats */
  99. unsigned long flags;
  100. /** @active_cmds: The number of active cmds from application */
  101. atomic_t active_cmds;
  102. /** @defer_ws: Work struct to clear gpu work period */
  103. struct work_struct defer_ws;
  104. };
  105. #define SCRATCH_RB_OFFSET(id, _field) ((id * sizeof(struct adreno_rb_shadow)) + \
  106. offsetof(struct adreno_rb_shadow, _field))
  107. #define SCRATCH_RB_GPU_ADDR(dev, id, _field) \
  108. ((dev)->scratch->gpuaddr + SCRATCH_RB_OFFSET(id, _field))
  109. /* OFFSET to KMD postamble packets in scratch buffer */
  110. #define SCRATCH_POSTAMBLE_OFFSET (100 * sizeof(u64))
  111. #define SCRATCH_POSTAMBLE_ADDR(dev) \
  112. ((dev)->scratch->gpuaddr + SCRATCH_POSTAMBLE_OFFSET)
  113. /* Timestamp window used to detect rollovers (half of integer range) */
  114. #define KGSL_TIMESTAMP_WINDOW 0x80000000
  115. /*
  116. * A macro for memory statistics - add the new size to the stat and if
  117. * the statisic is greater then _max, set _max
  118. */
  119. static inline void KGSL_STATS_ADD(uint64_t size, atomic_long_t *stat,
  120. atomic_long_t *max)
  121. {
  122. uint64_t ret = atomic_long_add_return(size, stat);
  123. if (ret > atomic_long_read(max))
  124. atomic_long_set(max, ret);
  125. }
  126. #define KGSL_MAX_NUMIBS 2000
  127. #define KGSL_MAX_SYNCPOINTS 32
  128. struct kgsl_device;
  129. struct kgsl_context;
  130. /**
  131. * struct kgsl_driver - main container for global KGSL things
  132. * @cdev: Character device struct
  133. * @major: Major ID for the KGSL device
  134. * @class: Pointer to the class struct for the core KGSL sysfs entries
  135. * @virtdev: Virtual device for managing the core
  136. * @ptkobj: kobject for storing the pagetable statistics
  137. * @prockobj: kobject for storing the process statistics
  138. * @devp: Array of pointers to the individual KGSL device structs
  139. * @process_list: List of open processes
  140. * @pagetable_list: LIst of open pagetables
  141. * @ptlock: Lock for accessing the pagetable list
  142. * @process_mutex: Mutex for accessing the process list
  143. * @proclist_lock: Lock for accessing the process list
  144. * @devlock: Mutex protecting the device list
  145. * @stats: Struct containing atomic memory statistics
  146. * @full_cache_threshold: the threshold that triggers a full cache flush
  147. * @workqueue: Pointer to a single threaded workqueue
  148. */
  149. struct kgsl_driver {
  150. struct cdev cdev;
  151. dev_t major;
  152. struct class *class;
  153. struct device virtdev;
  154. struct kobject *ptkobj;
  155. struct kobject *prockobj;
  156. struct kgsl_device *devp[1];
  157. struct list_head process_list;
  158. /** @wp_list: List of work period allocated per uid */
  159. struct list_head wp_list;
  160. /** @wp_list_lock: Lock for accessing the work period list */
  161. spinlock_t wp_list_lock;
  162. struct list_head pagetable_list;
  163. spinlock_t ptlock;
  164. struct mutex process_mutex;
  165. rwlock_t proclist_lock;
  166. struct mutex devlock;
  167. struct {
  168. atomic_long_t vmalloc;
  169. atomic_long_t vmalloc_max;
  170. atomic_long_t page_alloc;
  171. atomic_long_t page_alloc_max;
  172. atomic_long_t coherent;
  173. atomic_long_t coherent_max;
  174. atomic_long_t secure;
  175. atomic_long_t secure_max;
  176. atomic_long_t mapped;
  177. atomic_long_t mapped_max;
  178. } stats;
  179. unsigned int full_cache_threshold;
  180. struct workqueue_struct *workqueue;
  181. /* @lockless_workqueue: Pointer to a workqueue handler which doesn't hold device mutex */
  182. struct workqueue_struct *lockless_workqueue;
  183. };
  184. extern struct kgsl_driver kgsl_driver;
  185. struct kgsl_pagetable;
  186. struct kgsl_memdesc;
  187. struct kgsl_memdesc_ops {
  188. unsigned int vmflags;
  189. vm_fault_t (*vmfault)(struct kgsl_memdesc *memdesc,
  190. struct vm_area_struct *vma, struct vm_fault *vmf);
  191. void (*free)(struct kgsl_memdesc *memdesc);
  192. int (*map_kernel)(struct kgsl_memdesc *memdesc);
  193. void (*unmap_kernel)(struct kgsl_memdesc *memdesc);
  194. /**
  195. * @put_gpuaddr: Put away the GPU address and unmap the memory
  196. * descriptor
  197. */
  198. void (*put_gpuaddr)(struct kgsl_memdesc *memdesc);
  199. };
  200. /* Internal definitions for memdesc->priv */
  201. #define KGSL_MEMDESC_GUARD_PAGE BIT(0)
  202. /* Set if the memdesc is mapped into all pagetables */
  203. #define KGSL_MEMDESC_GLOBAL BIT(1)
  204. /* The memdesc is frozen during a snapshot */
  205. #define KGSL_MEMDESC_FROZEN BIT(2)
  206. /* The memdesc is mapped into a pagetable */
  207. #define KGSL_MEMDESC_MAPPED BIT(3)
  208. /* The memdesc is secured for content protection */
  209. #define KGSL_MEMDESC_SECURE BIT(4)
  210. /* Memory is accessible in privileged mode */
  211. #define KGSL_MEMDESC_PRIVILEGED BIT(6)
  212. /* This is an instruction buffer */
  213. #define KGSL_MEMDESC_UCODE BIT(7)
  214. /* For global buffers, randomly assign an address from the region */
  215. #define KGSL_MEMDESC_RANDOM BIT(8)
  216. /* Allocate memory from the system instead of the pools */
  217. #define KGSL_MEMDESC_SYSMEM BIT(9)
  218. /* The memdesc pages can be reclaimed */
  219. #define KGSL_MEMDESC_CAN_RECLAIM BIT(10)
  220. /* The memdesc pages were reclaimed */
  221. #define KGSL_MEMDESC_RECLAIMED BIT(11)
  222. /* Skip reclaim of the memdesc pages */
  223. #define KGSL_MEMDESC_SKIP_RECLAIM BIT(12)
  224. /* The memdesc is hypassigned to HLOS*/
  225. #define KGSL_MEMDESC_HYPASSIGNED_HLOS BIT(13)
  226. /**
  227. * struct kgsl_memdesc - GPU memory object descriptor
  228. * @pagetable: Pointer to the pagetable that the object is mapped in
  229. * @hostptr: Kernel virtual address
  230. * @hostptr_count: Number of threads using hostptr
  231. * @gpuaddr: GPU virtual address
  232. * @physaddr: Physical address of the memory object
  233. * @size: Size of the memory object
  234. * @priv: Internal flags and settings
  235. * @sgt: Scatter gather table for allocated pages
  236. * @ops: Function hooks for the memdesc memory type
  237. * @flags: Flags set from userspace
  238. * @dev: Pointer to the struct device that owns this memory
  239. * @attrs: dma attributes for this memory
  240. * @pages: An array of pointers to allocated pages
  241. * @page_count: Total number of pages allocated
  242. */
  243. struct kgsl_memdesc {
  244. struct kgsl_pagetable *pagetable;
  245. void *hostptr;
  246. unsigned int hostptr_count;
  247. uint64_t gpuaddr;
  248. phys_addr_t physaddr;
  249. uint64_t size;
  250. unsigned int priv;
  251. struct sg_table *sgt;
  252. const struct kgsl_memdesc_ops *ops;
  253. uint64_t flags;
  254. struct device *dev;
  255. unsigned long attrs;
  256. struct page **pages;
  257. unsigned int page_count;
  258. /*
  259. * @lock: Spinlock to protect the gpuaddr from being accessed by
  260. * multiple entities trying to map the same SVM region at once
  261. */
  262. spinlock_t lock;
  263. /** @shmem_filp: Pointer to the shmem file backing this memdesc */
  264. struct file *shmem_filp;
  265. /** @ranges: rbtree base for the interval list of vbo ranges */
  266. struct rb_root_cached ranges;
  267. /** @ranges_lock: Mutex to protect the range database */
  268. struct mutex ranges_lock;
  269. /** @gmuaddr: GMU VA if this is mapped in GMU */
  270. u32 gmuaddr;
  271. /*@shmem_page_list: shmem pages list */
  272. struct list_head shmem_page_list;
  273. };
  274. /**
  275. * struct kgsl_global_memdesc - wrapper for global memory objects
  276. */
  277. struct kgsl_global_memdesc {
  278. /** @memdesc: Container for the GPU memory descriptor for the object */
  279. struct kgsl_memdesc memdesc;
  280. /** @name: Name of the object for the debugfs list */
  281. const char *name;
  282. /** @node: List node for the list of global objects */
  283. struct list_head node;
  284. };
  285. /*
  286. * List of different memory entry types. The usermem enum
  287. * starts at 0, which we use for allocated memory, so 1 is
  288. * added to the enum values.
  289. */
  290. #define KGSL_MEM_ENTRY_KERNEL 0
  291. #define KGSL_MEM_ENTRY_USER (KGSL_USER_MEM_TYPE_ADDR + 1)
  292. #define KGSL_MEM_ENTRY_ION (KGSL_USER_MEM_TYPE_ION + 1)
  293. #define KGSL_MEM_ENTRY_MAX (KGSL_USER_MEM_TYPE_MAX + 1)
  294. /* For application specific GPU work period stats */
  295. #define KGSL_WORK_PERIOD 0
  296. /* GPU work period time in msec to emulate application work stats */
  297. #define KGSL_WORK_PERIOD_MS 900
  298. /* symbolic table for trace and debugfs */
  299. /*
  300. * struct kgsl_mem_entry - a userspace memory allocation
  301. * @refcount: reference count. Currently userspace can only
  302. * hold a single reference count, but the kernel may hold more.
  303. * @memdesc: description of the memory
  304. * @priv_data: type-specific data, such as the dma-buf attachment pointer.
  305. * @node: rb_node for the gpu address lookup rb tree
  306. * @id: idr index for this entry, can be used to find memory that does not have
  307. * a valid GPU address.
  308. * @priv: back pointer to the process that owns this memory
  309. * @pending_free: if !0, userspace requested that his memory be freed, but there
  310. * are still references to it.
  311. * @dev_priv: back pointer to the device file that created this entry.
  312. * @metadata: String containing user specified metadata for the entry
  313. * @work: Work struct used to schedule kgsl_mem_entry_destroy()
  314. */
  315. struct kgsl_mem_entry {
  316. struct kref refcount;
  317. struct kgsl_memdesc memdesc;
  318. void *priv_data;
  319. struct rb_node node;
  320. unsigned int id;
  321. struct kgsl_process_private *priv;
  322. int pending_free;
  323. char metadata[KGSL_GPUOBJ_ALLOC_METADATA_MAX + 1];
  324. struct work_struct work;
  325. /**
  326. * @map_count: Count how many vmas this object is mapped in - used for
  327. * debugfs accounting
  328. */
  329. atomic_t map_count;
  330. /** @vbo_count: Count how many VBO ranges this entry is mapped in */
  331. atomic_t vbo_count;
  332. };
  333. struct kgsl_device_private;
  334. struct kgsl_event_group;
  335. typedef void (*kgsl_event_func)(struct kgsl_device *, struct kgsl_event_group *,
  336. void *, int);
  337. /**
  338. * struct kgsl_event - KGSL GPU timestamp event
  339. * @device: Pointer to the KGSL device that owns the event
  340. * @context: Pointer to the context that owns the event
  341. * @timestamp: Timestamp for the event to expire
  342. * @func: Callback function for the event when it expires
  343. * @priv: Private data passed to the callback function
  344. * @node: List node for the kgsl_event_group list
  345. * @created: Jiffies when the event was created
  346. * @work: kthread_work struct for dispatching the callback
  347. * @result: KGSL event result type to pass to the callback
  348. * group: The event group this event belongs to
  349. */
  350. struct kgsl_event {
  351. struct kgsl_device *device;
  352. struct kgsl_context *context;
  353. unsigned int timestamp;
  354. kgsl_event_func func;
  355. void *priv;
  356. struct list_head node;
  357. unsigned int created;
  358. struct kthread_work work;
  359. int result;
  360. struct kgsl_event_group *group;
  361. };
  362. typedef int (*readtimestamp_func)(struct kgsl_device *, void *,
  363. enum kgsl_timestamp_type, unsigned int *);
  364. /**
  365. * struct event_group - A list of GPU events
  366. * @context: Pointer to the active context for the events
  367. * @lock: Spinlock for protecting the list
  368. * @events: List of active GPU events
  369. * @group: Node for the master group list
  370. * @processed: Last processed timestamp
  371. * @name: String name for the group (for the debugfs file)
  372. * @readtimestamp: Function pointer to read a timestamp
  373. * @priv: Priv member to pass to the readtimestamp function
  374. */
  375. struct kgsl_event_group {
  376. struct kgsl_context *context;
  377. spinlock_t lock;
  378. struct list_head events;
  379. struct list_head group;
  380. unsigned int processed;
  381. char name[64];
  382. readtimestamp_func readtimestamp;
  383. void *priv;
  384. };
  385. /**
  386. * struct submission_info - Container for submission statistics
  387. * @inflight: Number of commands that are inflight
  388. * @rb_id: id of the ringbuffer to which this submission is made
  389. * @rptr: Read pointer of the ringbuffer
  390. * @wptr: Write pointer of the ringbuffer
  391. * @gmu_dispatch_queue: GMU dispach queue to which this submission is made
  392. */
  393. struct submission_info {
  394. int inflight;
  395. u32 rb_id;
  396. u32 rptr;
  397. u32 wptr;
  398. u32 gmu_dispatch_queue;
  399. };
  400. /**
  401. * struct retire_info - Container for retire statistics
  402. * @inflight: NUmber of commands that are inflight
  403. * @rb_id: id of the ringbuffer to which this submission is made
  404. * @rptr: Read pointer of the ringbuffer
  405. * @wptr: Write pointer of the ringbuffer
  406. * @gmu_dispatch_queue: GMU dispach queue to which this submission is made
  407. * @timestamp: Timestamp of submission that retired
  408. * @submitted_to_rb: AO ticks when GMU put this submission on ringbuffer
  409. * @sop: AO ticks when GPU started procssing this submission
  410. * @eop: AO ticks when GPU finished this submission
  411. * @retired_on_gmu: AO ticks when GMU retired this submission
  412. * @active: Number AO of ticks taken by GPU to complete the command
  413. */
  414. struct retire_info {
  415. int inflight;
  416. int rb_id;
  417. u32 rptr;
  418. u32 wptr;
  419. u32 gmu_dispatch_queue;
  420. u32 timestamp;
  421. u64 submitted_to_rb;
  422. u64 sop;
  423. u64 eop;
  424. u64 retired_on_gmu;
  425. u64 active;
  426. };
  427. long kgsl_ioctl_device_getproperty(struct kgsl_device_private *dev_priv,
  428. unsigned int cmd, void *data);
  429. long kgsl_ioctl_device_setproperty(struct kgsl_device_private *dev_priv,
  430. unsigned int cmd, void *data);
  431. long kgsl_ioctl_device_waittimestamp_ctxtid(struct kgsl_device_private
  432. *dev_priv, unsigned int cmd, void *data);
  433. long kgsl_ioctl_rb_issueibcmds(struct kgsl_device_private *dev_priv,
  434. unsigned int cmd, void *data);
  435. long kgsl_ioctl_submit_commands(struct kgsl_device_private *dev_priv,
  436. unsigned int cmd, void *data);
  437. long kgsl_ioctl_cmdstream_readtimestamp_ctxtid(struct kgsl_device_private
  438. *dev_priv, unsigned int cmd,
  439. void *data);
  440. long kgsl_ioctl_cmdstream_freememontimestamp_ctxtid(
  441. struct kgsl_device_private
  442. *dev_priv, unsigned int cmd,
  443. void *data);
  444. long kgsl_ioctl_drawctxt_create(struct kgsl_device_private *dev_priv,
  445. unsigned int cmd, void *data);
  446. long kgsl_ioctl_drawctxt_destroy(struct kgsl_device_private *dev_priv,
  447. unsigned int cmd, void *data);
  448. long kgsl_ioctl_sharedmem_free(struct kgsl_device_private *dev_priv,
  449. unsigned int cmd, void *data);
  450. long kgsl_ioctl_gpumem_free_id(struct kgsl_device_private *dev_priv,
  451. unsigned int cmd, void *data);
  452. long kgsl_ioctl_map_user_mem(struct kgsl_device_private *dev_priv,
  453. unsigned int cmd, void *data);
  454. long kgsl_ioctl_gpumem_sync_cache(struct kgsl_device_private *dev_priv,
  455. unsigned int cmd, void *data);
  456. long kgsl_ioctl_gpumem_sync_cache_bulk(struct kgsl_device_private *dev_priv,
  457. unsigned int cmd, void *data);
  458. long kgsl_ioctl_sharedmem_flush_cache(struct kgsl_device_private *dev_priv,
  459. unsigned int cmd, void *data);
  460. long kgsl_ioctl_gpumem_alloc(struct kgsl_device_private *dev_priv,
  461. unsigned int cmd, void *data);
  462. long kgsl_ioctl_gpumem_alloc_id(struct kgsl_device_private *dev_priv,
  463. unsigned int cmd, void *data);
  464. long kgsl_ioctl_gpumem_get_info(struct kgsl_device_private *dev_priv,
  465. unsigned int cmd, void *data);
  466. long kgsl_ioctl_timestamp_event(struct kgsl_device_private *dev_priv,
  467. unsigned int cmd, void *data);
  468. long kgsl_ioctl_gpuobj_alloc(struct kgsl_device_private *dev_priv,
  469. unsigned int cmd, void *data);
  470. long kgsl_ioctl_gpuobj_free(struct kgsl_device_private *dev_priv,
  471. unsigned int cmd, void *data);
  472. long kgsl_ioctl_gpuobj_info(struct kgsl_device_private *dev_priv,
  473. unsigned int cmd, void *data);
  474. long kgsl_ioctl_gpuobj_import(struct kgsl_device_private *dev_priv,
  475. unsigned int cmd, void *data);
  476. long kgsl_ioctl_gpuobj_sync(struct kgsl_device_private *dev_priv,
  477. unsigned int cmd, void *data);
  478. long kgsl_ioctl_gpu_command(struct kgsl_device_private *dev_priv,
  479. unsigned int cmd, void *data);
  480. long kgsl_ioctl_gpuobj_set_info(struct kgsl_device_private *dev_priv,
  481. unsigned int cmd, void *data);
  482. long kgsl_ioctl_gpumem_bind_ranges(struct kgsl_device_private *dev_priv,
  483. unsigned int cmd, void *data);
  484. long kgsl_ioctl_gpu_aux_command(struct kgsl_device_private *dev_priv,
  485. unsigned int cmd, void *data);
  486. long kgsl_ioctl_timeline_create(struct kgsl_device_private *dev_priv,
  487. unsigned int cmd, void *data);
  488. long kgsl_ioctl_timeline_wait(struct kgsl_device_private *dev_priv,
  489. unsigned int cmd, void *data);
  490. long kgsl_ioctl_timeline_query(struct kgsl_device_private *dev_priv,
  491. unsigned int cmd, void *data);
  492. long kgsl_ioctl_timeline_fence_get(struct kgsl_device_private *dev_priv,
  493. unsigned int cmd, void *data);
  494. long kgsl_ioctl_timeline_signal(struct kgsl_device_private *dev_priv,
  495. unsigned int cmd, void *data);
  496. long kgsl_ioctl_timeline_destroy(struct kgsl_device_private *dev_priv,
  497. unsigned int cmd, void *data);
  498. long kgsl_ioctl_get_fault_report(struct kgsl_device_private *dev_priv,
  499. unsigned int cmd, void *data);
  500. long kgsl_ioctl_recurring_command(struct kgsl_device_private *dev_priv,
  501. unsigned int cmd, void *data);
  502. void kgsl_mem_entry_destroy(struct kref *kref);
  503. void kgsl_mem_entry_destroy_deferred(struct kref *kref);
  504. void kgsl_get_egl_counts(struct kgsl_mem_entry *entry,
  505. int *egl_surface_count, int *egl_image_count);
  506. unsigned long kgsl_get_dmabuf_inode_number(struct kgsl_mem_entry *entry);
  507. struct kgsl_mem_entry * __must_check
  508. kgsl_sharedmem_find(struct kgsl_process_private *private, uint64_t gpuaddr);
  509. struct kgsl_mem_entry * __must_check
  510. kgsl_sharedmem_find_id(struct kgsl_process_private *process, unsigned int id);
  511. struct kgsl_mem_entry *gpumem_alloc_entry(struct kgsl_device_private *dev_priv,
  512. uint64_t size, uint64_t flags);
  513. long gpumem_free_entry(struct kgsl_mem_entry *entry);
  514. enum kgsl_mmutype kgsl_mmu_get_mmutype(struct kgsl_device *device);
  515. /* Helper functions */
  516. int kgsl_request_irq(struct platform_device *pdev, const char *name,
  517. irq_handler_t handler, void *data);
  518. int kgsl_request_irq_optional(struct platform_device *pdev, const char *name,
  519. irq_handler_t handler, void *data);
  520. int __init kgsl_core_init(void);
  521. void kgsl_core_exit(void);
  522. static inline bool kgsl_gpuaddr_in_memdesc(const struct kgsl_memdesc *memdesc,
  523. uint64_t gpuaddr, uint64_t size)
  524. {
  525. if (IS_ERR_OR_NULL(memdesc))
  526. return false;
  527. /* set a minimum size to search for */
  528. if (!size)
  529. size = 1;
  530. /* don't overflow */
  531. if (size > U64_MAX - gpuaddr)
  532. return false;
  533. return (gpuaddr >= memdesc->gpuaddr &&
  534. ((gpuaddr + size) <= (memdesc->gpuaddr + memdesc->size)));
  535. }
  536. static inline void *kgsl_memdesc_map(struct kgsl_memdesc *memdesc)
  537. {
  538. if (memdesc->ops && memdesc->ops->map_kernel)
  539. memdesc->ops->map_kernel(memdesc);
  540. return memdesc->hostptr;
  541. }
  542. static inline void kgsl_memdesc_unmap(struct kgsl_memdesc *memdesc)
  543. {
  544. if (memdesc->ops && memdesc->ops->unmap_kernel)
  545. memdesc->ops->unmap_kernel(memdesc);
  546. }
  547. static inline void *kgsl_gpuaddr_to_vaddr(struct kgsl_memdesc *memdesc,
  548. uint64_t gpuaddr)
  549. {
  550. void *hostptr = NULL;
  551. if ((gpuaddr >= memdesc->gpuaddr) &&
  552. (gpuaddr < (memdesc->gpuaddr + memdesc->size)))
  553. hostptr = kgsl_memdesc_map(memdesc);
  554. return hostptr != NULL ? hostptr + (gpuaddr - memdesc->gpuaddr) : NULL;
  555. }
  556. static inline int timestamp_cmp(unsigned int a, unsigned int b)
  557. {
  558. /* check for equal */
  559. if (a == b)
  560. return 0;
  561. /* check for greater-than for non-rollover case */
  562. if ((a > b) && (a - b < KGSL_TIMESTAMP_WINDOW))
  563. return 1;
  564. /* check for greater-than for rollover case
  565. * note that <= is required to ensure that consistent
  566. * results are returned for values whose difference is
  567. * equal to the window size
  568. */
  569. a += KGSL_TIMESTAMP_WINDOW;
  570. b += KGSL_TIMESTAMP_WINDOW;
  571. return ((a > b) && (a - b <= KGSL_TIMESTAMP_WINDOW)) ? 1 : -1;
  572. }
  573. /**
  574. * kgsl_schedule_work() - Schedule a work item on the KGSL workqueue
  575. * @work: work item to schedule
  576. */
  577. static inline void kgsl_schedule_work(struct work_struct *work)
  578. {
  579. queue_work(kgsl_driver.workqueue, work);
  580. }
  581. static inline struct kgsl_mem_entry *
  582. kgsl_mem_entry_get(struct kgsl_mem_entry *entry)
  583. {
  584. if (!IS_ERR_OR_NULL(entry) && kref_get_unless_zero(&entry->refcount))
  585. return entry;
  586. return NULL;
  587. }
  588. static inline void
  589. kgsl_mem_entry_put(struct kgsl_mem_entry *entry)
  590. {
  591. if (!IS_ERR_OR_NULL(entry))
  592. kref_put(&entry->refcount, kgsl_mem_entry_destroy);
  593. }
  594. /*
  595. * kgsl_mem_entry_put_deferred() - Puts refcount and triggers deferred
  596. * mem_entry destroy when refcount is the last refcount.
  597. * @entry: memory entry to be put.
  598. *
  599. * Use this to put a memory entry when we don't want to block
  600. * the caller while destroying memory entry.
  601. */
  602. static inline void kgsl_mem_entry_put_deferred(struct kgsl_mem_entry *entry)
  603. {
  604. if (entry)
  605. kref_put(&entry->refcount, kgsl_mem_entry_destroy_deferred);
  606. }
  607. /*
  608. * kgsl_addr_range_overlap() - Checks if 2 ranges overlap
  609. * @gpuaddr1: Start of first address range
  610. * @size1: Size of first address range
  611. * @gpuaddr2: Start of second address range
  612. * @size2: Size of second address range
  613. *
  614. * Function returns true if the 2 given address ranges overlap
  615. * else false
  616. */
  617. static inline bool kgsl_addr_range_overlap(uint64_t gpuaddr1,
  618. uint64_t size1, uint64_t gpuaddr2, uint64_t size2)
  619. {
  620. if ((size1 > (U64_MAX - gpuaddr1)) || (size2 > (U64_MAX - gpuaddr2)))
  621. return false;
  622. return !(((gpuaddr1 + size1) <= gpuaddr2) ||
  623. (gpuaddr1 >= (gpuaddr2 + size2)));
  624. }
  625. /**
  626. * kgsl_work_period_update() - To update application work period stats
  627. * @device: Pointer to the KGSL device
  628. * @period: GPU work period stats
  629. * @active: Command active time
  630. */
  631. void kgsl_work_period_update(struct kgsl_device *device,
  632. struct gpu_work_period *period, u64 active);
  633. #endif /* __KGSL_H */