kgsl_device.h 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright (c) 2002,2007-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #ifndef __KGSL_DEVICE_H
  7. #define __KGSL_DEVICE_H
  8. #include <linux/sched/mm.h>
  9. #include <linux/sched/task.h>
  10. #include <trace/events/gpu_mem.h>
  11. #include "kgsl.h"
  12. #include "kgsl_drawobj.h"
  13. #include "kgsl_mmu.h"
  14. #include "kgsl_regmap.h"
  15. #define KGSL_IOCTL_FUNC(_cmd, _func) \
  16. [_IOC_NR((_cmd))] = \
  17. { .cmd = (_cmd), .func = (_func) }
  18. /*
  19. * KGSL device state is initialized to INIT when platform_probe *
  20. * successfully initialized the device. Once a device has been opened *
  21. * (started) it becomes active. NAP implies that only low latency *
  22. * resources (for now clocks on some platforms) are off. SLEEP implies *
  23. * that the KGSL module believes a device is idle (has been inactive *
  24. * past its timer) and all system resources are released. SUSPEND is *
  25. * requested by the kernel and will be enforced upon all open devices. *
  26. * RESET indicates that GPU or GMU hang happens. KGSL is handling *
  27. * snapshot or recover GPU from hang. MINBW implies that DDR BW vote is *
  28. * set to non-zero minimum value.
  29. */
  30. #define KGSL_STATE_NONE 0x00000000
  31. #define KGSL_STATE_INIT 0x00000001
  32. #define KGSL_STATE_ACTIVE 0x00000002
  33. #define KGSL_STATE_NAP 0x00000004 /* Not Used */
  34. #define KGSL_STATE_SUSPEND 0x00000010
  35. #define KGSL_STATE_AWARE 0x00000020
  36. #define KGSL_STATE_SLUMBER 0x00000080
  37. #define KGSL_STATE_MINBW 0x00000100 /* Not Used */
  38. /**
  39. * enum kgsl_event_results - result codes passed to an event callback when the
  40. * event is retired or cancelled
  41. * @KGSL_EVENT_RETIRED: The timestamp associated with the event retired
  42. * successflly
  43. * @KGSL_EVENT_CANCELLED: The event was cancelled before the event was fired
  44. */
  45. enum kgsl_event_results {
  46. KGSL_EVENT_RETIRED = 1,
  47. KGSL_EVENT_CANCELLED = 2,
  48. };
  49. /*
  50. * "list" of event types for ftrace symbolic magic
  51. */
  52. #define KGSL_CONTEXT_FLAGS \
  53. { KGSL_CONTEXT_NO_GMEM_ALLOC, "NO_GMEM_ALLOC" }, \
  54. { KGSL_CONTEXT_PREAMBLE, "PREAMBLE" }, \
  55. { KGSL_CONTEXT_TRASH_STATE, "TRASH_STATE" }, \
  56. { KGSL_CONTEXT_CTX_SWITCH, "CTX_SWITCH" }, \
  57. { KGSL_CONTEXT_PER_CONTEXT_TS, "PER_CONTEXT_TS" }, \
  58. { KGSL_CONTEXT_USER_GENERATED_TS, "USER_TS" }, \
  59. { KGSL_CONTEXT_NO_FAULT_TOLERANCE, "NO_FT" }, \
  60. { KGSL_CONTEXT_INVALIDATE_ON_FAULT, "INVALIDATE_ON_FAULT" }, \
  61. { KGSL_CONTEXT_PWR_CONSTRAINT, "PWR" }, \
  62. { KGSL_CONTEXT_SAVE_GMEM, "SAVE_GMEM" }, \
  63. { KGSL_CONTEXT_IFH_NOP, "IFH_NOP" }, \
  64. { KGSL_CONTEXT_SECURE, "SECURE" }, \
  65. { KGSL_CONTEXT_LPAC, "LPAC" }, \
  66. { KGSL_CONTEXT_NO_SNAPSHOT, "NO_SNAPSHOT" }
  67. #define KGSL_CONTEXT_ID(_context) \
  68. ((_context != NULL) ? (_context)->id : KGSL_MEMSTORE_GLOBAL)
  69. struct kgsl_device;
  70. struct platform_device;
  71. struct kgsl_device_private;
  72. struct kgsl_context;
  73. struct kgsl_power_stats;
  74. struct kgsl_event;
  75. struct kgsl_snapshot;
  76. struct kgsl_sync_fence;
  77. struct kgsl_functable {
  78. /* Mandatory functions - these functions must be implemented
  79. * by the client device. The driver will not check for a NULL
  80. * pointer before calling the hook.
  81. */
  82. int (*suspend_context)(struct kgsl_device *device);
  83. int (*first_open)(struct kgsl_device *device);
  84. int (*last_close)(struct kgsl_device *device);
  85. int (*start)(struct kgsl_device *device, int priority);
  86. int (*stop)(struct kgsl_device *device);
  87. int (*getproperty)(struct kgsl_device *device,
  88. struct kgsl_device_getproperty *param);
  89. int (*getproperty_compat)(struct kgsl_device *device,
  90. struct kgsl_device_getproperty *param);
  91. int (*waittimestamp)(struct kgsl_device *device,
  92. struct kgsl_context *context, unsigned int timestamp,
  93. unsigned int msecs);
  94. int (*readtimestamp)(struct kgsl_device *device, void *priv,
  95. enum kgsl_timestamp_type type, unsigned int *timestamp);
  96. int (*queue_cmds)(struct kgsl_device_private *dev_priv,
  97. struct kgsl_context *context, struct kgsl_drawobj *drawobj[],
  98. uint32_t count, uint32_t *timestamp);
  99. void (*power_stats)(struct kgsl_device *device,
  100. struct kgsl_power_stats *stats);
  101. void (*snapshot)(struct kgsl_device *device,
  102. struct kgsl_snapshot *snapshot, struct kgsl_context *context,
  103. struct kgsl_context *context_lpac);
  104. /** @drain_and_idle: Drain the GPU and wait for it to idle */
  105. int (*drain_and_idle)(struct kgsl_device *device);
  106. struct kgsl_device_private * (*device_private_create)(void);
  107. void (*device_private_destroy)(struct kgsl_device_private *dev_priv);
  108. /*
  109. * Optional functions - these functions are not mandatory. The
  110. * driver will check that the function pointer is not NULL before
  111. * calling the hook
  112. */
  113. struct kgsl_context *(*drawctxt_create)
  114. (struct kgsl_device_private *dev_priv,
  115. uint32_t *flags);
  116. void (*drawctxt_detach)(struct kgsl_context *context);
  117. void (*drawctxt_destroy)(struct kgsl_context *context);
  118. void (*drawctxt_dump)(struct kgsl_device *device,
  119. struct kgsl_context *context);
  120. long (*ioctl)(struct kgsl_device_private *dev_priv,
  121. unsigned int cmd, unsigned long arg);
  122. long (*compat_ioctl)(struct kgsl_device_private *dev_priv,
  123. unsigned int cmd, unsigned long arg);
  124. int (*setproperty)(struct kgsl_device_private *dev_priv,
  125. unsigned int type, void __user *value,
  126. unsigned int sizebytes);
  127. int (*setproperty_compat)(struct kgsl_device_private *dev_priv,
  128. unsigned int type, void __user *value,
  129. unsigned int sizebytes);
  130. void (*drawctxt_sched)(struct kgsl_device *device,
  131. struct kgsl_context *context);
  132. void (*resume)(struct kgsl_device *device);
  133. int (*regulator_enable)(struct kgsl_device *device);
  134. bool (*is_hw_collapsible)(struct kgsl_device *device);
  135. void (*regulator_disable)(struct kgsl_device *device);
  136. void (*pwrlevel_change_settings)(struct kgsl_device *device,
  137. unsigned int prelevel, unsigned int postlevel, bool post);
  138. /**
  139. * @query_property_list: query the list of properties
  140. * supported by the device. If 'list' is NULL just return the total
  141. * number of properties available otherwise copy up to 'count' items
  142. * into the list and return the total number of items copied.
  143. */
  144. int (*query_property_list)(struct kgsl_device *device, u32 *list,
  145. u32 count);
  146. bool (*is_hwcg_on)(struct kgsl_device *device);
  147. /** @gpu_clock_set: Target specific function to set gpu frequency */
  148. int (*gpu_clock_set)(struct kgsl_device *device, u32 pwrlevel);
  149. /** @gpu_bus_set: Target specific function to set gpu bandwidth */
  150. int (*gpu_bus_set)(struct kgsl_device *device, int bus_level, u32 ab);
  151. void (*deassert_gbif_halt)(struct kgsl_device *device);
  152. /** @queue_recurring_cmd: Queue recurring commands to GMU */
  153. int (*queue_recurring_cmd)(struct kgsl_device_private *dev_priv,
  154. struct kgsl_context *context, struct kgsl_drawobj *drawobj);
  155. /** @dequeue_recurring_cmd: Dequeue recurring commands from GMU */
  156. int (*dequeue_recurring_cmd)(struct kgsl_device *device,
  157. struct kgsl_context *context);
  158. /** set_isdb_breakpoint_registers: Program isdb registers to issue break command */
  159. void (*set_isdb_breakpoint_registers)(struct kgsl_device *device);
  160. /** @create_hw_fence: Create a hardware fence */
  161. void (*create_hw_fence)(struct kgsl_device *device, struct kgsl_sync_fence *kfence);
  162. };
  163. struct kgsl_ioctl {
  164. unsigned int cmd;
  165. long (*func)(struct kgsl_device_private *dev_priv,
  166. unsigned int cmd, void *data);
  167. };
  168. long kgsl_ioctl_helper(struct file *filep, unsigned int cmd, unsigned long arg,
  169. const struct kgsl_ioctl *cmds, int len);
  170. /* Flag to mark that the memobj_node should not go to the hadrware */
  171. #define MEMOBJ_SKIP BIT(1)
  172. /**
  173. * struct kgsl_memobj_node - Memory object descriptor
  174. * @node: Local list node for the object
  175. * @id: GPU memory ID for the object
  176. * offset: Offset within the object
  177. * @gpuaddr: GPU address for the object
  178. * @flags: External flags passed by the user
  179. * @priv: Internal flags set by the driver
  180. */
  181. struct kgsl_memobj_node {
  182. struct list_head node;
  183. unsigned int id;
  184. uint64_t offset;
  185. uint64_t gpuaddr;
  186. uint64_t size;
  187. unsigned long flags;
  188. unsigned long priv;
  189. };
  190. struct kgsl_device {
  191. struct device *dev;
  192. const char *name;
  193. u32 id;
  194. /* Kernel virtual address for GPU shader memory */
  195. void __iomem *shader_mem_virt;
  196. /* Starting kernel virtual address for QDSS GFX DBG register block */
  197. void __iomem *qdss_gfx_virt;
  198. struct kgsl_memdesc *memstore;
  199. struct kgsl_memdesc *scratch;
  200. struct kgsl_mmu mmu;
  201. struct gmu_core_device gmu_core;
  202. struct completion hwaccess_gate;
  203. struct completion halt_gate;
  204. const struct kgsl_functable *ftbl;
  205. struct work_struct idle_check_ws;
  206. struct timer_list idle_timer;
  207. struct kgsl_pwrctrl pwrctrl;
  208. int open_count;
  209. /* For GPU inline submission */
  210. uint32_t submit_now;
  211. spinlock_t submit_lock;
  212. /** @skip_inline_submit: Track if user threads should make an inline submission or not */
  213. bool skip_inline_submit;
  214. struct mutex mutex;
  215. uint32_t state;
  216. uint32_t requested_state;
  217. atomic_t active_cnt;
  218. /** @total_mapped: To trace overall gpu memory usage */
  219. atomic64_t total_mapped;
  220. wait_queue_head_t active_cnt_wq;
  221. struct platform_device *pdev;
  222. struct dentry *d_debugfs;
  223. struct idr context_idr;
  224. rwlock_t context_lock;
  225. struct {
  226. void *ptr;
  227. dma_addr_t dma_handle;
  228. u32 size;
  229. } snapshot_memory;
  230. struct kgsl_snapshot *snapshot;
  231. /** @panic_nb: notifier block to capture GPU snapshot on kernel panic */
  232. struct notifier_block panic_nb;
  233. struct {
  234. void *ptr;
  235. u32 size;
  236. } snapshot_memory_atomic;
  237. u32 snapshot_faultcount; /* Total number of faults since boot */
  238. bool force_panic; /* Force panic after snapshot dump */
  239. bool skip_ib_capture; /* Skip IB capture after snapshot */
  240. bool prioritize_unrecoverable; /* Overwrite with new GMU snapshots */
  241. bool set_isdb_breakpoint; /* Set isdb registers before snapshot */
  242. bool snapshot_atomic; /* To capture snapshot in atomic context*/
  243. /* Use CP Crash dumper to get GPU snapshot*/
  244. bool snapshot_crashdumper;
  245. /* Use HOST side register reads to get GPU snapshot*/
  246. bool snapshot_legacy;
  247. /* Use to dump the context record in bytes */
  248. u64 snapshot_ctxt_record_size;
  249. struct kobject snapshot_kobj;
  250. struct kgsl_pwrscale pwrscale;
  251. int reset_counter; /* Track how many GPU core resets have occurred */
  252. struct kthread_worker *events_worker;
  253. /* Number of active contexts seen globally for this device */
  254. int active_context_count;
  255. struct kobject gpu_sysfs_kobj;
  256. unsigned int l3_freq[3];
  257. unsigned int num_l3_pwrlevels;
  258. /* store current L3 vote to determine if we should change our vote */
  259. unsigned int cur_l3_pwrlevel;
  260. /** @globals: List of global memory objects */
  261. struct list_head globals;
  262. /** @globlal_map: bitmap for global memory allocations */
  263. unsigned long *global_map;
  264. /* @qdss_desc: Memory descriptor for the QDSS region if applicable */
  265. struct kgsl_memdesc *qdss_desc;
  266. /* @qtimer_desc: Memory descriptor for the QDSS region if applicable */
  267. struct kgsl_memdesc *qtimer_desc;
  268. /** @event_groups: List of event groups for this device */
  269. struct list_head event_groups;
  270. /** @event_groups_lock: A R/W lock for the events group list */
  271. rwlock_t event_groups_lock;
  272. /** @speed_bin: Speed bin for the GPU device if applicable */
  273. u32 speed_bin;
  274. /** @soc_code: Identifier containing product and feature code */
  275. u32 soc_code;
  276. /** @gmu_fault: Set when a gmu or rgmu fault is encountered */
  277. bool gmu_fault;
  278. /** @regmap: GPU register map */
  279. struct kgsl_regmap regmap;
  280. /** @timelines: Iterator for assigning IDs to timelines */
  281. struct idr timelines;
  282. /** @timelines_lock: Spinlock to protect the timelines idr */
  283. spinlock_t timelines_lock;
  284. /** @fence_trace_array: A local trace array for fence debugging */
  285. struct trace_array *fence_trace_array;
  286. /** @l3_vote: Enable/Disable l3 voting */
  287. bool l3_vote;
  288. /** @pdev_loaded: Flag to test if platform driver is probed */
  289. bool pdev_loaded;
  290. /** @nh: Pointer to head of the SRCU notifier chain */
  291. struct srcu_notifier_head nh;
  292. /** @bcl_data_kobj: Kobj for bcl_data sysfs node */
  293. struct kobject bcl_data_kobj;
  294. /** @work_period_timer: Timer to capture application GPU work stats */
  295. struct timer_list work_period_timer;
  296. /** @work_period_lock: Lock to protect application GPU work periods */
  297. spinlock_t work_period_lock;
  298. /** @work_period_ws: Work struct to emulate application GPU work events */
  299. struct work_struct work_period_ws;
  300. /** @flags: Flags for gpu_period stats */
  301. unsigned long flags;
  302. struct {
  303. u64 begin;
  304. u64 end;
  305. } gpu_period;
  306. /** @idle_jiffies: Latest idle jiffies */
  307. unsigned long idle_jiffies;
  308. /** @dump_all_ibs: Whether to dump all ibs in snapshot */
  309. bool dump_all_ibs;
  310. /** @freq_limiter_irq_clear: reset controller to clear freq limiter irq */
  311. struct reset_control *freq_limiter_irq_clear;
  312. /** @freq_limiter_intr_num: The interrupt number for freq limiter */
  313. int freq_limiter_intr_num;
  314. /** @cx_host_irq_num: Interrupt number for cx_host_irq */
  315. int cx_host_irq_num;
  316. };
  317. #define KGSL_MMU_DEVICE(_mmu) \
  318. container_of((_mmu), struct kgsl_device, mmu)
  319. /**
  320. * enum bits for struct kgsl_context.priv
  321. * @KGSL_CONTEXT_PRIV_SUBMITTED - The context has submitted commands to gpu.
  322. * @KGSL_CONTEXT_PRIV_DETACHED - The context has been destroyed by userspace
  323. * and is no longer using the gpu.
  324. * @KGSL_CONTEXT_PRIV_INVALID - The context has been destroyed by the kernel
  325. * because it caused a GPU fault.
  326. * @KGSL_CONTEXT_PRIV_PAGEFAULT - The context has caused a page fault.
  327. * @KGSL_CONTEXT_PRIV_DEVICE_SPECIFIC - this value and higher values are
  328. * reserved for devices specific use.
  329. */
  330. enum kgsl_context_priv {
  331. KGSL_CONTEXT_PRIV_SUBMITTED = 0,
  332. KGSL_CONTEXT_PRIV_DETACHED,
  333. KGSL_CONTEXT_PRIV_INVALID,
  334. KGSL_CONTEXT_PRIV_PAGEFAULT,
  335. KGSL_CONTEXT_PRIV_DEVICE_SPECIFIC = 16,
  336. };
  337. struct kgsl_process_private;
  338. #define KGSL_MAX_FAULT_ENTRIES 40
  339. /* Maintain faults observed within threshold time (in milliseconds) */
  340. #define KGSL_MAX_FAULT_TIME_THRESHOLD 5000
  341. /**
  342. * struct kgsl_fault_node - GPU fault descriptor
  343. * @node: List node for list of faults
  344. * @type: Type of fault
  345. * @priv: Pointer to type specific fault
  346. * @time: Time when fault was observed
  347. */
  348. struct kgsl_fault_node {
  349. struct list_head node;
  350. u32 type;
  351. void *priv;
  352. ktime_t time;
  353. };
  354. /**
  355. * struct kgsl_context - The context fields that are valid for a user defined
  356. * context
  357. * @refcount: kref object for reference counting the context
  358. * @id: integer identifier for the context
  359. * @priority; The context's priority to submit commands to GPU
  360. * @tid: task that created this context.
  361. * @dev_priv: pointer to the owning device instance
  362. * @proc_priv: pointer to process private, the process that allocated the
  363. * context
  364. * @priv: in-kernel context flags, use KGSL_CONTEXT_* values
  365. * @reset_status: status indication whether a gpu reset occurred and whether
  366. * this context was responsible for causing it
  367. * @timeline: sync timeline used to create fences that can be signaled when a
  368. * sync_pt timestamp expires
  369. * @events: A kgsl_event_group for this context - contains the list of GPU
  370. * events
  371. * @flags: flags from userspace controlling the behavior of this context
  372. * @pwr_constraint: power constraint from userspace for this context
  373. * @fault_count: number of times gpu hanged in last _context_throttle_time ms
  374. * @fault_time: time of the first gpu hang in last _context_throttle_time ms
  375. * @user_ctxt_record: memory descriptor used by CP to save/restore VPC data
  376. * across preemption
  377. * @total_fault_count: number of times gpu faulted in this context
  378. * @last_faulted_cmd_ts: last faulted command batch timestamp
  379. * @gmu_registered: whether context is registered with gmu or not
  380. */
  381. struct kgsl_context {
  382. struct kref refcount;
  383. uint32_t id;
  384. uint32_t priority;
  385. pid_t tid;
  386. struct kgsl_device_private *dev_priv;
  387. struct kgsl_process_private *proc_priv;
  388. unsigned long priv;
  389. struct kgsl_device *device;
  390. unsigned int reset_status;
  391. struct kgsl_sync_timeline *ktimeline;
  392. struct kgsl_event_group events;
  393. unsigned int flags;
  394. struct kgsl_pwr_constraint pwr_constraint;
  395. struct kgsl_pwr_constraint l3_pwr_constraint;
  396. unsigned int fault_count;
  397. ktime_t fault_time;
  398. struct kgsl_mem_entry *user_ctxt_record;
  399. unsigned int total_fault_count;
  400. unsigned int last_faulted_cmd_ts;
  401. bool gmu_registered;
  402. /**
  403. * @gmu_dispatch_queue: dispatch queue id to which this context will be
  404. * submitted
  405. */
  406. u32 gmu_dispatch_queue;
  407. /** @faults: List of @kgsl_fault_node to store fault information */
  408. struct list_head faults;
  409. /** @fault_lock: Mutex to protect faults */
  410. struct mutex fault_lock;
  411. };
  412. #define _context_comm(_c) \
  413. (((_c) && (_c)->proc_priv) ? (_c)->proc_priv->comm : "unknown")
  414. /*
  415. * Print log messages with the context process name/pid:
  416. * [...] kgsl kgsl-3d0: kgsl-api-test[22182]:
  417. */
  418. #define pr_context(_d, _c, fmt, args...) \
  419. dev_err((_d)->dev, "%s[%d]: " fmt, \
  420. _context_comm((_c)), \
  421. pid_nr((_c)->proc_priv->pid), ##args)
  422. /**
  423. * struct kgsl_process_private - Private structure for a KGSL process (across
  424. * all devices)
  425. * @priv: Internal flags, use KGSL_PROCESS_* values
  426. * @pid: Identification structure for the task owner of the process
  427. * @comm: task name of the process
  428. * @mem_lock: Spinlock to protect the process memory lists
  429. * @refcount: kref object for reference counting the process
  430. * @idr: Iterator for assigning IDs to memory allocations
  431. * @pagetable: Pointer to the pagetable owned by this process
  432. * @kobj: Pointer to a kobj for the sysfs directory for this process
  433. * @debug_root: Pointer to the debugfs root for this process
  434. * @stats: Memory allocation statistics for this process
  435. * @gpumem_mapped: KGSL memory mapped in the process address space
  436. * @syncsource_idr: sync sources created by this process
  437. * @syncsource_lock: Spinlock to protect the syncsource idr
  438. * @fd_count: Counter for the number of FDs for this process
  439. * @ctxt_count: Count for the number of contexts for this process
  440. * @ctxt_count_lock: Spinlock to protect ctxt_count
  441. * @frame_count: Count for the number of frames processed
  442. */
  443. struct kgsl_process_private {
  444. unsigned long priv;
  445. struct pid *pid;
  446. char comm[TASK_COMM_LEN];
  447. spinlock_t mem_lock;
  448. struct kref refcount;
  449. struct idr mem_idr;
  450. struct kgsl_pagetable *pagetable;
  451. struct list_head list;
  452. struct list_head reclaim_list;
  453. struct kobject kobj;
  454. struct dentry *debug_root;
  455. struct {
  456. atomic64_t cur;
  457. uint64_t max;
  458. } stats[KGSL_MEM_ENTRY_MAX];
  459. atomic64_t gpumem_mapped;
  460. struct idr syncsource_idr;
  461. spinlock_t syncsource_lock;
  462. int fd_count;
  463. atomic_t ctxt_count;
  464. spinlock_t ctxt_count_lock;
  465. atomic64_t frame_count;
  466. /**
  467. * @state: state consisting KGSL_PROC_STATE and KGSL_PROC_PINNED_STATE
  468. */
  469. unsigned long state;
  470. /**
  471. * @unpinned_page_count: The number of pages unpinned for reclaim
  472. */
  473. atomic_t unpinned_page_count;
  474. /**
  475. * @fg_work: Work struct to schedule foreground work
  476. */
  477. struct work_struct fg_work;
  478. /**
  479. * @reclaim_lock: Mutex lock to protect KGSL_PROC_PINNED_STATE
  480. */
  481. struct mutex reclaim_lock;
  482. /** @period: Stats for GPU utilization */
  483. struct gpu_work_period *period;
  484. /**
  485. * @cmd_count: The number of cmds that are active for the process
  486. */
  487. atomic_t cmd_count;
  488. /**
  489. * @kobj_memtype: Pointer to a kobj for memtype sysfs directory for this
  490. * process
  491. */
  492. struct kobject kobj_memtype;
  493. /**
  494. * @private_mutex: Mutex lock to protect kgsl_process_private
  495. */
  496. struct mutex private_mutex;
  497. /**
  498. * @cmdline: Cmdline string of the process
  499. */
  500. char *cmdline;
  501. };
  502. struct kgsl_device_private {
  503. struct kgsl_device *device;
  504. struct kgsl_process_private *process_priv;
  505. };
  506. /**
  507. * struct kgsl_snapshot - details for a specific snapshot instance
  508. * @ib1base: Active IB1 base address at the time of fault
  509. * @ib2base: Active IB2 base address at the time of fault
  510. * @ib1size: Number of DWORDS pending in IB1 at the time of fault
  511. * @ib2size: Number of DWORDS pending in IB2 at the time of fault
  512. * @ib1dumped: Active IB1 dump status to sansphot binary
  513. * @ib2dumped: Active IB2 dump status to sansphot binary
  514. * @start: Pointer to the start of the static snapshot region
  515. * @size: Size of the current snapshot instance
  516. * @ptr: Pointer to the next block of memory to write to during snapshotting
  517. * @remain: Bytes left in the snapshot region
  518. * @timestamp: Timestamp of the snapshot instance (in seconds since boot)
  519. * @mempool: Pointer to the memory pool for storing memory objects
  520. * @mempool_size: Size of the memory pool
  521. * @obj_list: List of frozen GPU buffers that are waiting to be dumped.
  522. * @cp_list: List of IB's to be dumped.
  523. * @work: worker to dump the frozen memory
  524. * @dump_gate: completion gate signaled by worker when it is finished.
  525. * @process: the process that caused the hang, if known.
  526. * @sysfs_read: Count of current reads via sysfs
  527. * @first_read: True until the snapshot read is started
  528. * @recovered: True if GPU was recovered after previous snapshot
  529. */
  530. struct kgsl_snapshot {
  531. uint64_t ib1base;
  532. uint64_t ib2base;
  533. unsigned int ib1size;
  534. unsigned int ib2size;
  535. bool ib1dumped;
  536. bool ib2dumped;
  537. u64 ib1base_lpac;
  538. u64 ib2base_lpac;
  539. u32 ib1size_lpac;
  540. u32 ib2size_lpac;
  541. bool ib1dumped_lpac;
  542. bool ib2dumped_lpac;
  543. u8 *start;
  544. size_t size;
  545. u8 *ptr;
  546. size_t remain;
  547. unsigned long timestamp;
  548. u8 *mempool;
  549. size_t mempool_size;
  550. struct list_head obj_list;
  551. struct list_head cp_list;
  552. struct work_struct work;
  553. struct completion dump_gate;
  554. struct kgsl_process_private *process;
  555. struct kgsl_process_private *process_lpac;
  556. unsigned int sysfs_read;
  557. bool first_read;
  558. bool recovered;
  559. struct kgsl_device *device;
  560. };
  561. /**
  562. * struct kgsl_snapshot_object - GPU memory in the snapshot
  563. * @gpuaddr: The GPU address identified during snapshot
  564. * @size: The buffer size identified during snapshot
  565. * @offset: offset from start of the allocated kgsl_mem_entry
  566. * @type: SNAPSHOT_OBJ_TYPE_* identifier.
  567. * @entry: the reference counted memory entry for this buffer
  568. * @node: node for kgsl_snapshot.obj_list
  569. */
  570. struct kgsl_snapshot_object {
  571. uint64_t gpuaddr;
  572. uint64_t size;
  573. uint64_t offset;
  574. int type;
  575. struct kgsl_mem_entry *entry;
  576. struct list_head node;
  577. };
  578. struct kgsl_device *kgsl_get_device(int dev_idx);
  579. static inline void kgsl_regread(struct kgsl_device *device,
  580. unsigned int offsetwords,
  581. unsigned int *value)
  582. {
  583. *value = kgsl_regmap_read(&device->regmap, offsetwords);
  584. }
  585. static inline void kgsl_regread64(struct kgsl_device *device,
  586. u32 offsetwords_lo, u32 offsetwords_hi,
  587. u64 *value)
  588. {
  589. u32 val_lo = 0, val_hi = 0;
  590. val_lo = kgsl_regmap_read(&device->regmap, offsetwords_lo);
  591. val_hi = kgsl_regmap_read(&device->regmap, offsetwords_hi);
  592. *value = (((u64)val_hi << 32) | val_lo);
  593. }
  594. static inline void kgsl_regwrite(struct kgsl_device *device,
  595. unsigned int offsetwords,
  596. unsigned int value)
  597. {
  598. kgsl_regmap_write(&device->regmap, value, offsetwords);
  599. }
  600. static inline void kgsl_regrmw(struct kgsl_device *device,
  601. unsigned int offsetwords,
  602. unsigned int mask, unsigned int bits)
  603. {
  604. kgsl_regmap_rmw(&device->regmap, offsetwords, mask, bits);
  605. }
  606. static inline bool kgsl_state_is_awake(struct kgsl_device *device)
  607. {
  608. return (device->state == KGSL_STATE_ACTIVE ||
  609. device->state == KGSL_STATE_AWARE);
  610. }
  611. /**
  612. * kgsl_start_idle_timer - Start the idle timer
  613. * @device: A KGSL device handle
  614. *
  615. * Start the idle timer to expire in 'interval_timeout' milliseconds
  616. */
  617. static inline void kgsl_start_idle_timer(struct kgsl_device *device)
  618. {
  619. device->idle_jiffies = jiffies + msecs_to_jiffies(device->pwrctrl.interval_timeout);
  620. mod_timer(&device->idle_timer, device->idle_jiffies);
  621. }
  622. int kgsl_readtimestamp(struct kgsl_device *device, void *priv,
  623. enum kgsl_timestamp_type type, unsigned int *timestamp);
  624. bool kgsl_check_timestamp(struct kgsl_device *device,
  625. struct kgsl_context *context, unsigned int timestamp);
  626. int kgsl_device_platform_probe(struct kgsl_device *device);
  627. void kgsl_device_platform_remove(struct kgsl_device *device);
  628. const char *kgsl_pwrstate_to_str(unsigned int state);
  629. /**
  630. * kgsl_device_snapshot_probe - add resources for the device GPU snapshot
  631. * @device: The device to initialize
  632. * @size: The size of the static region to allocate
  633. *
  634. * Allocate memory for a GPU snapshot for the specified device,
  635. * and create the sysfs files to manage it
  636. */
  637. void kgsl_device_snapshot_probe(struct kgsl_device *device, u32 size);
  638. void kgsl_device_snapshot(struct kgsl_device *device,
  639. struct kgsl_context *context, struct kgsl_context *context_lpac,
  640. bool gmu_fault);
  641. void kgsl_device_snapshot_close(struct kgsl_device *device);
  642. void kgsl_events_init(void);
  643. void kgsl_events_exit(void);
  644. /**
  645. * kgsl_device_events_probe - Set up events for the KGSL device
  646. * @device: A KGSL GPU device handle
  647. *
  648. * Set up the list and lock for GPU events for this device
  649. */
  650. void kgsl_device_events_probe(struct kgsl_device *device);
  651. /**
  652. * kgsl_device_events_remove - Remove all event groups from the KGSL device
  653. * @device: A KGSL GPU device handle
  654. *
  655. * Remove all of the GPU event groups from the device and warn if any of them
  656. * still have events pending
  657. */
  658. void kgsl_device_events_remove(struct kgsl_device *device);
  659. void kgsl_context_detach(struct kgsl_context *context);
  660. /**
  661. * kgsl_del_event_group - Remove a GPU event group from a device
  662. * @device: A KGSL GPU device handle
  663. * @group: Event group to be removed
  664. *
  665. * Remove the specified group from the list of event groups on @device.
  666. */
  667. void kgsl_del_event_group(struct kgsl_device *device,
  668. struct kgsl_event_group *group);
  669. /**
  670. * kgsl_add_event_group - Add a new GPU event group
  671. * @device: A KGSL GPU device handle
  672. * @group: Pointer to the new group to add to the list
  673. * @context: Context that owns the group (or NULL for global)
  674. * @readtimestamp: Function pointer to the readtimestamp function to call when
  675. * processing events
  676. * @priv: Priv member to pass to the readtimestamp function
  677. * @fmt: The format string to use to build the event name
  678. * @...: Arguments for the format string
  679. */
  680. void kgsl_add_event_group(struct kgsl_device *device,
  681. struct kgsl_event_group *group,
  682. struct kgsl_context *context, readtimestamp_func readtimestamp,
  683. void *priv, const char *fmt, ...);
  684. void kgsl_cancel_events_timestamp(struct kgsl_device *device,
  685. struct kgsl_event_group *group, unsigned int timestamp);
  686. void kgsl_cancel_events(struct kgsl_device *device,
  687. struct kgsl_event_group *group);
  688. void kgsl_cancel_event(struct kgsl_device *device,
  689. struct kgsl_event_group *group, unsigned int timestamp,
  690. kgsl_event_func func, void *priv);
  691. bool kgsl_event_pending(struct kgsl_device *device,
  692. struct kgsl_event_group *group, unsigned int timestamp,
  693. kgsl_event_func func, void *priv);
  694. int kgsl_add_event(struct kgsl_device *device, struct kgsl_event_group *group,
  695. unsigned int timestamp, kgsl_event_func func, void *priv);
  696. void kgsl_process_event_group(struct kgsl_device *device,
  697. struct kgsl_event_group *group);
  698. void kgsl_flush_event_group(struct kgsl_device *device,
  699. struct kgsl_event_group *group);
  700. void kgsl_process_event_groups(struct kgsl_device *device);
  701. void kgsl_context_destroy(struct kref *kref);
  702. int kgsl_context_init(struct kgsl_device_private *dev_priv,
  703. struct kgsl_context *context);
  704. void kgsl_context_dump(struct kgsl_context *context);
  705. int kgsl_memfree_find_entry(pid_t ptname, uint64_t *gpuaddr,
  706. uint64_t *size, uint64_t *flags, pid_t *pid);
  707. long kgsl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
  708. long kgsl_ioctl_copy_in(unsigned int kernel_cmd, unsigned int user_cmd,
  709. unsigned long arg, unsigned char *ptr);
  710. long kgsl_ioctl_copy_out(unsigned int kernel_cmd, unsigned int user_cmd,
  711. unsigned long arg, unsigned char *ptr);
  712. /**
  713. * kgsl_context_type - Return a symbolic string for the context type
  714. * @type: Context type
  715. *
  716. * Return: Symbolic string representing the context type
  717. */
  718. const char *kgsl_context_type(int type);
  719. /**
  720. * kgsl_context_put() - Release context reference count
  721. * @context: Pointer to the KGSL context to be released
  722. *
  723. * Reduce the reference count on a KGSL context and destroy it if it is no
  724. * longer needed
  725. */
  726. static inline void
  727. kgsl_context_put(struct kgsl_context *context)
  728. {
  729. if (context)
  730. kref_put(&context->refcount, kgsl_context_destroy);
  731. }
  732. /**
  733. * kgsl_context_detached() - check if a context is detached
  734. * @context: the context
  735. *
  736. * Check if a context has been destroyed by userspace and is only waiting
  737. * for reference counts to go away. This check is used to weed out
  738. * contexts that shouldn't use the gpu so NULL is considered detached.
  739. */
  740. static inline bool kgsl_context_detached(struct kgsl_context *context)
  741. {
  742. return (context == NULL || test_bit(KGSL_CONTEXT_PRIV_DETACHED,
  743. &context->priv));
  744. }
  745. /**
  746. * kgsl_context_invalid() - check if a context is invalid
  747. * @context: the context
  748. *
  749. * Check if a context has been invalidated by the kernel and may no
  750. * longer use the GPU.
  751. */
  752. static inline bool kgsl_context_invalid(struct kgsl_context *context)
  753. {
  754. return (context == NULL || test_bit(KGSL_CONTEXT_PRIV_INVALID,
  755. &context->priv));
  756. }
  757. /** kgsl_context_is_bad - Check if a context is detached or invalid
  758. * @context: Pointer to a KGSL context handle
  759. *
  760. * Return: True if the context has been detached or is invalid
  761. */
  762. static inline bool kgsl_context_is_bad(struct kgsl_context *context)
  763. {
  764. return (kgsl_context_detached(context) ||
  765. kgsl_context_invalid(context));
  766. }
  767. /** kgsl_check_context_state - Check if a context is bad or invalid
  768. * @context: Pointer to a KGSL context handle
  769. *
  770. * Return: True if the context has been marked bad or invalid
  771. */
  772. static inline int kgsl_check_context_state(struct kgsl_context *context)
  773. {
  774. if (kgsl_context_invalid(context))
  775. return -EDEADLK;
  776. if (kgsl_context_detached(context))
  777. return -ENOENT;
  778. return 0;
  779. }
  780. /**
  781. * kgsl_context_get() - get a pointer to a KGSL context
  782. * @device: Pointer to the KGSL device that owns the context
  783. * @id: Context ID
  784. *
  785. * Find the context associated with the given ID number, increase the reference
  786. * count on it and return it. The caller must make sure that this call is
  787. * paired with a kgsl_context_put. This function is for internal use because it
  788. * doesn't validate the ownership of the context with the calling process - use
  789. * kgsl_context_get_owner for that
  790. */
  791. static inline struct kgsl_context *kgsl_context_get(struct kgsl_device *device,
  792. uint32_t id)
  793. {
  794. int result = 0;
  795. struct kgsl_context *context = NULL;
  796. read_lock(&device->context_lock);
  797. context = idr_find(&device->context_idr, id);
  798. /* Don't return a context that has been detached */
  799. if (kgsl_context_detached(context))
  800. context = NULL;
  801. else
  802. result = kref_get_unless_zero(&context->refcount);
  803. read_unlock(&device->context_lock);
  804. if (!result)
  805. return NULL;
  806. return context;
  807. }
  808. /**
  809. * _kgsl_context_get() - lightweight function to just increment the ref count
  810. * @context: Pointer to the KGSL context
  811. *
  812. * Get a reference to the specified KGSL context structure. This is a
  813. * lightweight way to just increase the refcount on a known context rather than
  814. * walking through kgsl_context_get and searching the iterator
  815. */
  816. static inline int _kgsl_context_get(struct kgsl_context *context)
  817. {
  818. int ret = 0;
  819. if (context)
  820. ret = kref_get_unless_zero(&context->refcount);
  821. return ret;
  822. }
  823. /**
  824. * kgsl_context_get_owner() - get a pointer to a KGSL context in a specific
  825. * process
  826. * @dev_priv: Pointer to the process struct
  827. * @id: Context ID to return
  828. *
  829. * Find the context associated with the given ID number, increase the reference
  830. * count on it and return it. The caller must make sure that this call is
  831. * paired with a kgsl_context_put. This function validates that the context id
  832. * given is owned by the dev_priv instancet that is passed in. See
  833. * kgsl_context_get for the internal version that doesn't do the check
  834. */
  835. static inline struct kgsl_context *kgsl_context_get_owner(
  836. struct kgsl_device_private *dev_priv, uint32_t id)
  837. {
  838. struct kgsl_context *context;
  839. context = kgsl_context_get(dev_priv->device, id);
  840. /* Verify that the context belongs to current calling fd. */
  841. if (context != NULL && context->dev_priv != dev_priv) {
  842. kgsl_context_put(context);
  843. return NULL;
  844. }
  845. return context;
  846. }
  847. /**
  848. * kgsl_process_private_get() - increment the refcount on a
  849. * kgsl_process_private struct
  850. * @process: Pointer to the KGSL process_private
  851. *
  852. * Returns 0 if the structure is invalid and a reference count could not be
  853. * obtained, nonzero otherwise.
  854. */
  855. static inline int kgsl_process_private_get(struct kgsl_process_private *process)
  856. {
  857. if (process != NULL)
  858. return kref_get_unless_zero(&process->refcount);
  859. return 0;
  860. }
  861. void kgsl_process_private_put(struct kgsl_process_private *private);
  862. struct kgsl_process_private *kgsl_process_private_find(pid_t pid);
  863. /*
  864. * A helper macro to print out "not enough memory functions" - this
  865. * makes it easy to standardize the messages as well as cut down on
  866. * the number of strings in the binary
  867. */
  868. #define SNAPSHOT_ERR_NOMEM(_d, _s) \
  869. dev_err_ratelimited((_d)->dev, \
  870. "snapshot: not enough snapshot memory for section %s\n", (_s))
  871. /**
  872. * struct kgsl_snapshot_registers - list of registers to snapshot
  873. * @regs: Pointer to an array of register ranges
  874. * @count: Number of entries in the array
  875. */
  876. struct kgsl_snapshot_registers {
  877. const unsigned int *regs;
  878. unsigned int count;
  879. };
  880. size_t kgsl_snapshot_dump_registers(struct kgsl_device *device, u8 *buf,
  881. size_t remain, void *priv);
  882. void kgsl_snapshot_indexed_registers(struct kgsl_device *device,
  883. struct kgsl_snapshot *snapshot, unsigned int index,
  884. unsigned int data, unsigned int start, unsigned int count);
  885. /**
  886. * kgsl_snapshot_indexed_registers_v2 - Add a set of indexed registers to the
  887. * snapshot
  888. * @device: Pointer to the KGSL device being snapshotted
  889. * @snapshot: Snapshot instance
  890. * @index: Offset for the index register
  891. * @data: Offset for the data register
  892. * @start: Index to start reading
  893. * @count: Number of entries to read
  894. * @pipe_id: Pipe ID to be dumped
  895. * @slice_id: Slice ID to be dumped
  896. *
  897. * Dump the values from an indexed register group into the snapshot
  898. */
  899. void kgsl_snapshot_indexed_registers_v2(struct kgsl_device *device,
  900. struct kgsl_snapshot *snapshot, u32 index, u32 data,
  901. u32 start, u32 count, u32 pipe_id, u32 slice_id);
  902. int kgsl_snapshot_get_object(struct kgsl_snapshot *snapshot,
  903. struct kgsl_process_private *process, uint64_t gpuaddr,
  904. uint64_t size, unsigned int type);
  905. int kgsl_snapshot_have_object(struct kgsl_snapshot *snapshot,
  906. struct kgsl_process_private *process,
  907. uint64_t gpuaddr, uint64_t size);
  908. struct adreno_ib_object_list;
  909. int kgsl_snapshot_add_ib_obj_list(struct kgsl_snapshot *snapshot,
  910. struct adreno_ib_object_list *ib_obj_list);
  911. void kgsl_snapshot_add_section(struct kgsl_device *device, u16 id,
  912. struct kgsl_snapshot *snapshot,
  913. size_t (*func)(struct kgsl_device *, u8 *, size_t, void *),
  914. void *priv);
  915. /**
  916. * kgsl_of_property_read_ddrtype - Get property from devicetree based on
  917. * the type of DDR.
  918. * @node: Devicetree node
  919. * @base: prefix string of the property
  920. * @ptr: Pointer to store the value of the property
  921. *
  922. * First look up the devicetree property based on the prefix string and DDR
  923. * type. If property is not specified per DDR type, then look for the property
  924. * based on prefix string only.
  925. *
  926. * Return: 0 on success or error code on failure.
  927. */
  928. int kgsl_of_property_read_ddrtype(struct device_node *node, const char *base,
  929. u32 *ptr);
  930. /**
  931. * kgsl_query_property_list - Get a list of valid properties
  932. * @device: A KGSL device handle
  933. * @list: Pointer to a list of u32s
  934. * @count: Number of items in @list
  935. *
  936. * Populate a list with the IDs for supported properties. If @list is NULL,
  937. * just return the number of properties available, otherwise fill up to @count
  938. * items in the list with property identifiers.
  939. *
  940. * Returns the number of total properties if @list is NULL or the number of
  941. * properties copied to @list.
  942. */
  943. int kgsl_query_property_list(struct kgsl_device *device, u32 *list, u32 count);
  944. static inline bool kgsl_mmu_has_feature(struct kgsl_device *device,
  945. enum kgsl_mmu_feature feature)
  946. {
  947. return test_bit(feature, &device->mmu.features);
  948. }
  949. static inline void kgsl_mmu_set_feature(struct kgsl_device *device,
  950. enum kgsl_mmu_feature feature)
  951. {
  952. set_bit(feature, &device->mmu.features);
  953. }
  954. /**
  955. * kgsl_add_fault - Add fault information for a context
  956. * @context: Pointer to the KGSL context
  957. * @type: type of fault info
  958. * @priv: Pointer to type specific fault info
  959. *
  960. * Return: 0 on success or error code on failure.
  961. */
  962. int kgsl_add_fault(struct kgsl_context *context, u32 type, void *priv);
  963. /**
  964. * kgsl_free_faults - Free fault information for a context
  965. * @context: Pointer to the KGSL context
  966. */
  967. void kgsl_free_faults(struct kgsl_context *context);
  968. /**
  969. * kgsl_trace_gpu_mem_total - Overall gpu memory usage tracking which includes
  970. * process allocations, imported dmabufs and kgsl globals
  971. * @device: A KGSL device handle
  972. * @delta: delta of total mapped memory size
  973. */
  974. #ifdef CONFIG_TRACE_GPU_MEM
  975. static inline void kgsl_trace_gpu_mem_total(struct kgsl_device *device,
  976. s64 delta)
  977. {
  978. u64 total_size;
  979. total_size = atomic64_add_return(delta, &device->total_mapped);
  980. trace_gpu_mem_total(0, 0, total_size);
  981. }
  982. #else
  983. static inline void kgsl_trace_gpu_mem_total(struct kgsl_device *device,
  984. s64 delta) {}
  985. #endif
  986. /*
  987. * kgsl_context_is_lpac() - Checks if context is LPAC
  988. * @context: KGSL context to check
  989. *
  990. * Function returns true if context is LPAC else false
  991. */
  992. static inline bool kgsl_context_is_lpac(struct kgsl_context *context)
  993. {
  994. if (context == NULL)
  995. return false;
  996. return (context->flags & KGSL_CONTEXT_LPAC) ? true : false;
  997. }
  998. #endif /* __KGSL_DEVICE_H */