i915_drv.h 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998
  1. /* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
  2. */
  3. /*
  4. *
  5. * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
  6. * All Rights Reserved.
  7. *
  8. * Permission is hereby granted, free of charge, to any person obtaining a
  9. * copy of this software and associated documentation files (the
  10. * "Software"), to deal in the Software without restriction, including
  11. * without limitation the rights to use, copy, modify, merge, publish,
  12. * distribute, sub license, and/or sell copies of the Software, and to
  13. * permit persons to whom the Software is furnished to do so, subject to
  14. * the following conditions:
  15. *
  16. * The above copyright notice and this permission notice (including the
  17. * next paragraph) shall be included in all copies or substantial portions
  18. * of the Software.
  19. *
  20. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  21. * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  22. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
  23. * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
  24. * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  25. * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  26. * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  27. *
  28. */
  29. #ifndef _I915_DRV_H_
  30. #define _I915_DRV_H_
  31. #include <uapi/drm/i915_drm.h>
  32. #include <linux/pm_qos.h>
  33. #include <drm/ttm/ttm_device.h>
  34. #include "display/intel_display.h"
  35. #include "display/intel_display_core.h"
  36. #include "gem/i915_gem_context_types.h"
  37. #include "gem/i915_gem_lmem.h"
  38. #include "gem/i915_gem_shrinker.h"
  39. #include "gem/i915_gem_stolen.h"
  40. #include "gt/intel_engine.h"
  41. #include "gt/intel_gt_types.h"
  42. #include "gt/intel_region_lmem.h"
  43. #include "gt/intel_workarounds.h"
  44. #include "gt/uc/intel_uc.h"
  45. #include "i915_drm_client.h"
  46. #include "i915_gem.h"
  47. #include "i915_gpu_error.h"
  48. #include "i915_params.h"
  49. #include "i915_perf_types.h"
  50. #include "i915_scheduler.h"
  51. #include "i915_utils.h"
  52. #include "intel_device_info.h"
  53. #include "intel_memory_region.h"
  54. #include "intel_pch.h"
  55. #include "intel_runtime_pm.h"
  56. #include "intel_step.h"
  57. #include "intel_uncore.h"
  58. #include "intel_wopcm.h"
  59. struct drm_i915_clock_gating_funcs;
  60. struct drm_i915_gem_object;
  61. struct drm_i915_private;
  62. struct intel_connector;
  63. struct intel_dp;
  64. struct intel_encoder;
  65. struct intel_limit;
  66. struct intel_overlay_error_state;
  67. struct vlv_s0ix_state;
  68. /* Threshold == 5 for long IRQs, 50 for short */
  69. #define HPD_STORM_DEFAULT_THRESHOLD 50
  70. #define I915_GEM_GPU_DOMAINS \
  71. (I915_GEM_DOMAIN_RENDER | \
  72. I915_GEM_DOMAIN_SAMPLER | \
  73. I915_GEM_DOMAIN_COMMAND | \
  74. I915_GEM_DOMAIN_INSTRUCTION | \
  75. I915_GEM_DOMAIN_VERTEX)
  76. #define I915_COLOR_UNEVICTABLE (-1) /* a non-vma sharing the address space */
  77. #define GEM_QUIRK_PIN_SWIZZLED_PAGES BIT(0)
  78. struct i915_suspend_saved_registers {
  79. u32 saveDSPARB;
  80. u32 saveSWF0[16];
  81. u32 saveSWF1[16];
  82. u32 saveSWF3[3];
  83. u16 saveGCDGMBUS;
  84. };
  85. #define MAX_L3_SLICES 2
  86. struct intel_l3_parity {
  87. u32 *remap_info[MAX_L3_SLICES];
  88. struct work_struct error_work;
  89. int which_slice;
  90. };
  91. struct i915_gem_mm {
  92. /*
  93. * Shortcut for the stolen region. This points to either
  94. * INTEL_REGION_STOLEN_SMEM for integrated platforms, or
  95. * INTEL_REGION_STOLEN_LMEM for discrete, or NULL if the device doesn't
  96. * support stolen.
  97. */
  98. struct intel_memory_region *stolen_region;
  99. /** Memory allocator for GTT stolen memory */
  100. struct drm_mm stolen;
  101. /** Protects the usage of the GTT stolen memory allocator. This is
  102. * always the inner lock when overlapping with struct_mutex. */
  103. struct mutex stolen_lock;
  104. /* Protects bound_list/unbound_list and #drm_i915_gem_object.mm.link */
  105. spinlock_t obj_lock;
  106. /**
  107. * List of objects which are purgeable.
  108. */
  109. struct list_head purge_list;
  110. /**
  111. * List of objects which have allocated pages and are shrinkable.
  112. */
  113. struct list_head shrink_list;
  114. /**
  115. * List of objects which are pending destruction.
  116. */
  117. struct llist_head free_list;
  118. struct work_struct free_work;
  119. /**
  120. * Count of objects pending destructions. Used to skip needlessly
  121. * waiting on an RCU barrier if no objects are waiting to be freed.
  122. */
  123. atomic_t free_count;
  124. /**
  125. * tmpfs instance used for shmem backed objects
  126. */
  127. struct vfsmount *gemfs;
  128. struct intel_memory_region *regions[INTEL_REGION_UNKNOWN];
  129. struct notifier_block oom_notifier;
  130. struct notifier_block vmap_notifier;
  131. struct shrinker shrinker;
  132. #ifdef CONFIG_MMU_NOTIFIER
  133. /**
  134. * notifier_lock for mmu notifiers, memory may not be allocated
  135. * while holding this lock.
  136. */
  137. rwlock_t notifier_lock;
  138. #endif
  139. /* shrinker accounting, also useful for userland debugging */
  140. u64 shrink_memory;
  141. u32 shrink_count;
  142. };
  143. #define I915_IDLE_ENGINES_TIMEOUT (200) /* in ms */
  144. unsigned long i915_fence_context_timeout(const struct drm_i915_private *i915,
  145. u64 context);
  146. static inline unsigned long
  147. i915_fence_timeout(const struct drm_i915_private *i915)
  148. {
  149. return i915_fence_context_timeout(i915, U64_MAX);
  150. }
  151. #define HAS_HW_SAGV_WM(i915) (DISPLAY_VER(i915) >= 13 && !IS_DGFX(i915))
  152. struct i915_virtual_gpu {
  153. struct mutex lock; /* serialises sending of g2v_notify command pkts */
  154. bool active;
  155. u32 caps;
  156. u32 *initial_mmio;
  157. u8 *initial_cfg_space;
  158. struct list_head entry;
  159. };
  160. struct i915_selftest_stash {
  161. atomic_t counter;
  162. struct ida mock_region_instances;
  163. };
  164. struct drm_i915_private {
  165. struct drm_device drm;
  166. struct intel_display display;
  167. /* FIXME: Device release actions should all be moved to drmm_ */
  168. bool do_release;
  169. /* i915 device parameters */
  170. struct i915_params params;
  171. const struct intel_device_info __info; /* Use INTEL_INFO() to access. */
  172. struct intel_runtime_info __runtime; /* Use RUNTIME_INFO() to access. */
  173. struct intel_driver_caps caps;
  174. /**
  175. * Data Stolen Memory - aka "i915 stolen memory" gives us the start and
  176. * end of stolen which we can optionally use to create GEM objects
  177. * backed by stolen memory. Note that stolen_usable_size tells us
  178. * exactly how much of this we are actually allowed to use, given that
  179. * some portion of it is in fact reserved for use by hardware functions.
  180. */
  181. struct resource dsm;
  182. /**
  183. * Reseved portion of Data Stolen Memory
  184. */
  185. struct resource dsm_reserved;
  186. /*
  187. * Stolen memory is segmented in hardware with different portions
  188. * offlimits to certain functions.
  189. *
  190. * The drm_mm is initialised to the total accessible range, as found
  191. * from the PCI config. On Broadwell+, this is further restricted to
  192. * avoid the first page! The upper end of stolen memory is reserved for
  193. * hardware functions and similarly removed from the accessible range.
  194. */
  195. resource_size_t stolen_usable_size; /* Total size minus reserved ranges */
  196. struct intel_uncore uncore;
  197. struct intel_uncore_mmio_debug mmio_debug;
  198. struct i915_virtual_gpu vgpu;
  199. struct intel_gvt *gvt;
  200. struct intel_wopcm wopcm;
  201. struct pci_dev *bridge_dev;
  202. struct rb_root uabi_engines;
  203. unsigned int engine_uabi_class_count[I915_LAST_UABI_ENGINE_CLASS + 1];
  204. struct resource mch_res;
  205. /* protects the irq masks */
  206. spinlock_t irq_lock;
  207. bool display_irqs_enabled;
  208. /* Sideband mailbox protection */
  209. struct mutex sb_lock;
  210. struct pm_qos_request sb_qos;
  211. /** Cached value of IMR to avoid reads in updating the bitfield */
  212. union {
  213. u32 irq_mask;
  214. u32 de_irq_mask[I915_MAX_PIPES];
  215. };
  216. u32 pipestat_irq_mask[I915_MAX_PIPES];
  217. bool preserve_bios_swizzle;
  218. unsigned int fsb_freq, mem_freq, is_ddr3;
  219. unsigned int skl_preferred_vco_freq;
  220. unsigned int max_dotclk_freq;
  221. unsigned int hpll_freq;
  222. unsigned int czclk_freq;
  223. /**
  224. * wq - Driver workqueue for GEM.
  225. *
  226. * NOTE: Work items scheduled here are not allowed to grab any modeset
  227. * locks, for otherwise the flushing done in the pageflip code will
  228. * result in deadlocks.
  229. */
  230. struct workqueue_struct *wq;
  231. /* pm private clock gating functions */
  232. const struct drm_i915_clock_gating_funcs *clock_gating_funcs;
  233. /* PCH chipset type */
  234. enum intel_pch pch_type;
  235. unsigned short pch_id;
  236. unsigned long gem_quirks;
  237. struct drm_atomic_state *modeset_restore_state;
  238. struct drm_modeset_acquire_ctx reset_ctx;
  239. struct i915_gem_mm mm;
  240. /* Kernel Modesetting */
  241. struct list_head global_obj_list;
  242. bool mchbar_need_disable;
  243. struct intel_l3_parity l3_parity;
  244. /*
  245. * HTI (aka HDPORT) state read during initial hw readout. Most
  246. * platforms don't have HTI, so this will just stay 0. Those that do
  247. * will use this later to figure out which PLLs and PHYs are unavailable
  248. * for driver usage.
  249. */
  250. u32 hti_state;
  251. /*
  252. * edram size in MB.
  253. * Cannot be determined by PCIID. You must always read a register.
  254. */
  255. u32 edram_size_mb;
  256. struct i915_gpu_error gpu_error;
  257. /*
  258. * Shadows for CHV DPLL_MD regs to keep the state
  259. * checker somewhat working in the presence hardware
  260. * crappiness (can't read out DPLL_MD for pipes B & C).
  261. */
  262. u32 chv_dpll_md[I915_MAX_PIPES];
  263. u32 bxt_phy_grc;
  264. u32 suspend_count;
  265. struct i915_suspend_saved_registers regfile;
  266. struct vlv_s0ix_state *vlv_s0ix_state;
  267. struct dram_info {
  268. bool wm_lv_0_adjust_needed;
  269. u8 num_channels;
  270. bool symmetric_memory;
  271. enum intel_dram_type {
  272. INTEL_DRAM_UNKNOWN,
  273. INTEL_DRAM_DDR3,
  274. INTEL_DRAM_DDR4,
  275. INTEL_DRAM_LPDDR3,
  276. INTEL_DRAM_LPDDR4,
  277. INTEL_DRAM_DDR5,
  278. INTEL_DRAM_LPDDR5,
  279. } type;
  280. u8 num_qgv_points;
  281. u8 num_psf_gv_points;
  282. } dram_info;
  283. struct intel_runtime_pm runtime_pm;
  284. struct i915_perf perf;
  285. /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
  286. struct intel_gt gt0;
  287. /*
  288. * i915->gt[0] == &i915->gt0
  289. */
  290. #define I915_MAX_GT 4
  291. struct intel_gt *gt[I915_MAX_GT];
  292. struct kobject *sysfs_gt;
  293. /* Quick lookup of media GT (current platforms only have one) */
  294. struct intel_gt *media_gt;
  295. struct {
  296. struct i915_gem_contexts {
  297. spinlock_t lock; /* locks list */
  298. struct list_head list;
  299. } contexts;
  300. /*
  301. * We replace the local file with a global mappings as the
  302. * backing storage for the mmap is on the device and not
  303. * on the struct file, and we do not want to prolong the
  304. * lifetime of the local fd. To minimise the number of
  305. * anonymous inodes we create, we use a global singleton to
  306. * share the global mapping.
  307. */
  308. struct file *mmap_singleton;
  309. } gem;
  310. u8 pch_ssc_use;
  311. /* For i915gm/i945gm vblank irq workaround */
  312. u8 vblank_enabled;
  313. bool irq_enabled;
  314. /*
  315. * DG2: Mask of PHYs that were not calibrated by the firmware
  316. * and should not be used.
  317. */
  318. u8 snps_phy_failed_calibration;
  319. struct i915_pmu pmu;
  320. struct i915_drm_clients clients;
  321. /* The TTM device structure. */
  322. struct ttm_device bdev;
  323. I915_SELFTEST_DECLARE(struct i915_selftest_stash selftest;)
  324. /*
  325. * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
  326. * will be rejected. Instead look for a better place.
  327. */
  328. };
  329. static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
  330. {
  331. return container_of(dev, struct drm_i915_private, drm);
  332. }
  333. static inline struct drm_i915_private *kdev_to_i915(struct device *kdev)
  334. {
  335. return dev_get_drvdata(kdev);
  336. }
  337. static inline struct drm_i915_private *pdev_to_i915(struct pci_dev *pdev)
  338. {
  339. return pci_get_drvdata(pdev);
  340. }
  341. static inline struct intel_gt *to_gt(struct drm_i915_private *i915)
  342. {
  343. return &i915->gt0;
  344. }
  345. /* Simple iterator over all initialised engines */
  346. #define for_each_engine(engine__, dev_priv__, id__) \
  347. for ((id__) = 0; \
  348. (id__) < I915_NUM_ENGINES; \
  349. (id__)++) \
  350. for_each_if ((engine__) = (dev_priv__)->engine[(id__)])
  351. /* Iterator over subset of engines selected by mask */
  352. #define for_each_engine_masked(engine__, gt__, mask__, tmp__) \
  353. for ((tmp__) = (mask__) & (gt__)->info.engine_mask; \
  354. (tmp__) ? \
  355. ((engine__) = (gt__)->engine[__mask_next_bit(tmp__)]), 1 : \
  356. 0;)
  357. #define rb_to_uabi_engine(rb) \
  358. rb_entry_safe(rb, struct intel_engine_cs, uabi_node)
  359. #define for_each_uabi_engine(engine__, i915__) \
  360. for ((engine__) = rb_to_uabi_engine(rb_first(&(i915__)->uabi_engines));\
  361. (engine__); \
  362. (engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node)))
  363. #define for_each_uabi_class_engine(engine__, class__, i915__) \
  364. for ((engine__) = intel_engine_lookup_user((i915__), (class__), 0); \
  365. (engine__) && (engine__)->uabi_class == (class__); \
  366. (engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node)))
  367. #define INTEL_INFO(dev_priv) (&(dev_priv)->__info)
  368. #define RUNTIME_INFO(dev_priv) (&(dev_priv)->__runtime)
  369. #define DRIVER_CAPS(dev_priv) (&(dev_priv)->caps)
  370. #define INTEL_DEVID(dev_priv) (RUNTIME_INFO(dev_priv)->device_id)
  371. #define IP_VER(ver, rel) ((ver) << 8 | (rel))
  372. #define GRAPHICS_VER(i915) (RUNTIME_INFO(i915)->graphics.ip.ver)
  373. #define GRAPHICS_VER_FULL(i915) IP_VER(RUNTIME_INFO(i915)->graphics.ip.ver, \
  374. RUNTIME_INFO(i915)->graphics.ip.rel)
  375. #define IS_GRAPHICS_VER(i915, from, until) \
  376. (GRAPHICS_VER(i915) >= (from) && GRAPHICS_VER(i915) <= (until))
  377. #define MEDIA_VER(i915) (RUNTIME_INFO(i915)->media.ip.ver)
  378. #define MEDIA_VER_FULL(i915) IP_VER(RUNTIME_INFO(i915)->media.ip.ver, \
  379. RUNTIME_INFO(i915)->media.ip.rel)
  380. #define IS_MEDIA_VER(i915, from, until) \
  381. (MEDIA_VER(i915) >= (from) && MEDIA_VER(i915) <= (until))
  382. #define DISPLAY_VER(i915) (RUNTIME_INFO(i915)->display.ip.ver)
  383. #define IS_DISPLAY_VER(i915, from, until) \
  384. (DISPLAY_VER(i915) >= (from) && DISPLAY_VER(i915) <= (until))
  385. #define INTEL_REVID(dev_priv) (to_pci_dev((dev_priv)->drm.dev)->revision)
  386. #define HAS_DSB(dev_priv) (INTEL_INFO(dev_priv)->display.has_dsb)
  387. #define INTEL_DISPLAY_STEP(__i915) (RUNTIME_INFO(__i915)->step.display_step)
  388. #define INTEL_GRAPHICS_STEP(__i915) (RUNTIME_INFO(__i915)->step.graphics_step)
  389. #define INTEL_MEDIA_STEP(__i915) (RUNTIME_INFO(__i915)->step.media_step)
  390. #define INTEL_BASEDIE_STEP(__i915) (RUNTIME_INFO(__i915)->step.basedie_step)
  391. #define IS_DISPLAY_STEP(__i915, since, until) \
  392. (drm_WARN_ON(&(__i915)->drm, INTEL_DISPLAY_STEP(__i915) == STEP_NONE), \
  393. INTEL_DISPLAY_STEP(__i915) >= (since) && INTEL_DISPLAY_STEP(__i915) < (until))
  394. #define IS_GRAPHICS_STEP(__i915, since, until) \
  395. (drm_WARN_ON(&(__i915)->drm, INTEL_GRAPHICS_STEP(__i915) == STEP_NONE), \
  396. INTEL_GRAPHICS_STEP(__i915) >= (since) && INTEL_GRAPHICS_STEP(__i915) < (until))
  397. #define IS_MEDIA_STEP(__i915, since, until) \
  398. (drm_WARN_ON(&(__i915)->drm, INTEL_MEDIA_STEP(__i915) == STEP_NONE), \
  399. INTEL_MEDIA_STEP(__i915) >= (since) && INTEL_MEDIA_STEP(__i915) < (until))
  400. #define IS_BASEDIE_STEP(__i915, since, until) \
  401. (drm_WARN_ON(&(__i915)->drm, INTEL_BASEDIE_STEP(__i915) == STEP_NONE), \
  402. INTEL_BASEDIE_STEP(__i915) >= (since) && INTEL_BASEDIE_STEP(__i915) < (until))
  403. static __always_inline unsigned int
  404. __platform_mask_index(const struct intel_runtime_info *info,
  405. enum intel_platform p)
  406. {
  407. const unsigned int pbits =
  408. BITS_PER_TYPE(info->platform_mask[0]) - INTEL_SUBPLATFORM_BITS;
  409. /* Expand the platform_mask array if this fails. */
  410. BUILD_BUG_ON(INTEL_MAX_PLATFORMS >
  411. pbits * ARRAY_SIZE(info->platform_mask));
  412. return p / pbits;
  413. }
  414. static __always_inline unsigned int
  415. __platform_mask_bit(const struct intel_runtime_info *info,
  416. enum intel_platform p)
  417. {
  418. const unsigned int pbits =
  419. BITS_PER_TYPE(info->platform_mask[0]) - INTEL_SUBPLATFORM_BITS;
  420. return p % pbits + INTEL_SUBPLATFORM_BITS;
  421. }
  422. static inline u32
  423. intel_subplatform(const struct intel_runtime_info *info, enum intel_platform p)
  424. {
  425. const unsigned int pi = __platform_mask_index(info, p);
  426. return info->platform_mask[pi] & INTEL_SUBPLATFORM_MASK;
  427. }
  428. static __always_inline bool
  429. IS_PLATFORM(const struct drm_i915_private *i915, enum intel_platform p)
  430. {
  431. const struct intel_runtime_info *info = RUNTIME_INFO(i915);
  432. const unsigned int pi = __platform_mask_index(info, p);
  433. const unsigned int pb = __platform_mask_bit(info, p);
  434. BUILD_BUG_ON(!__builtin_constant_p(p));
  435. return info->platform_mask[pi] & BIT(pb);
  436. }
  437. static __always_inline bool
  438. IS_SUBPLATFORM(const struct drm_i915_private *i915,
  439. enum intel_platform p, unsigned int s)
  440. {
  441. const struct intel_runtime_info *info = RUNTIME_INFO(i915);
  442. const unsigned int pi = __platform_mask_index(info, p);
  443. const unsigned int pb = __platform_mask_bit(info, p);
  444. const unsigned int msb = BITS_PER_TYPE(info->platform_mask[0]) - 1;
  445. const u32 mask = info->platform_mask[pi];
  446. BUILD_BUG_ON(!__builtin_constant_p(p));
  447. BUILD_BUG_ON(!__builtin_constant_p(s));
  448. BUILD_BUG_ON((s) >= INTEL_SUBPLATFORM_BITS);
  449. /* Shift and test on the MSB position so sign flag can be used. */
  450. return ((mask << (msb - pb)) & (mask << (msb - s))) & BIT(msb);
  451. }
  452. #define IS_MOBILE(dev_priv) (INTEL_INFO(dev_priv)->is_mobile)
  453. #define IS_DGFX(dev_priv) (INTEL_INFO(dev_priv)->is_dgfx)
  454. #define IS_I830(dev_priv) IS_PLATFORM(dev_priv, INTEL_I830)
  455. #define IS_I845G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I845G)
  456. #define IS_I85X(dev_priv) IS_PLATFORM(dev_priv, INTEL_I85X)
  457. #define IS_I865G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I865G)
  458. #define IS_I915G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I915G)
  459. #define IS_I915GM(dev_priv) IS_PLATFORM(dev_priv, INTEL_I915GM)
  460. #define IS_I945G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I945G)
  461. #define IS_I945GM(dev_priv) IS_PLATFORM(dev_priv, INTEL_I945GM)
  462. #define IS_I965G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I965G)
  463. #define IS_I965GM(dev_priv) IS_PLATFORM(dev_priv, INTEL_I965GM)
  464. #define IS_G45(dev_priv) IS_PLATFORM(dev_priv, INTEL_G45)
  465. #define IS_GM45(dev_priv) IS_PLATFORM(dev_priv, INTEL_GM45)
  466. #define IS_G4X(dev_priv) (IS_G45(dev_priv) || IS_GM45(dev_priv))
  467. #define IS_PINEVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_PINEVIEW)
  468. #define IS_G33(dev_priv) IS_PLATFORM(dev_priv, INTEL_G33)
  469. #define IS_IRONLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_IRONLAKE)
  470. #define IS_IRONLAKE_M(dev_priv) \
  471. (IS_PLATFORM(dev_priv, INTEL_IRONLAKE) && IS_MOBILE(dev_priv))
  472. #define IS_SANDYBRIDGE(dev_priv) IS_PLATFORM(dev_priv, INTEL_SANDYBRIDGE)
  473. #define IS_IVYBRIDGE(dev_priv) IS_PLATFORM(dev_priv, INTEL_IVYBRIDGE)
  474. #define IS_IVB_GT1(dev_priv) (IS_IVYBRIDGE(dev_priv) && \
  475. INTEL_INFO(dev_priv)->gt == 1)
  476. #define IS_VALLEYVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_VALLEYVIEW)
  477. #define IS_CHERRYVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_CHERRYVIEW)
  478. #define IS_HASWELL(dev_priv) IS_PLATFORM(dev_priv, INTEL_HASWELL)
  479. #define IS_BROADWELL(dev_priv) IS_PLATFORM(dev_priv, INTEL_BROADWELL)
  480. #define IS_SKYLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_SKYLAKE)
  481. #define IS_BROXTON(dev_priv) IS_PLATFORM(dev_priv, INTEL_BROXTON)
  482. #define IS_KABYLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_KABYLAKE)
  483. #define IS_GEMINILAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_GEMINILAKE)
  484. #define IS_COFFEELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COFFEELAKE)
  485. #define IS_COMETLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COMETLAKE)
  486. #define IS_ICELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ICELAKE)
  487. #define IS_JSL_EHL(dev_priv) (IS_PLATFORM(dev_priv, INTEL_JASPERLAKE) || \
  488. IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE))
  489. #define IS_TIGERLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_TIGERLAKE)
  490. #define IS_ROCKETLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ROCKETLAKE)
  491. #define IS_DG1(dev_priv) IS_PLATFORM(dev_priv, INTEL_DG1)
  492. #define IS_ALDERLAKE_S(dev_priv) IS_PLATFORM(dev_priv, INTEL_ALDERLAKE_S)
  493. #define IS_ALDERLAKE_P(dev_priv) IS_PLATFORM(dev_priv, INTEL_ALDERLAKE_P)
  494. #define IS_XEHPSDV(dev_priv) IS_PLATFORM(dev_priv, INTEL_XEHPSDV)
  495. #define IS_DG2(dev_priv) IS_PLATFORM(dev_priv, INTEL_DG2)
  496. #define IS_PONTEVECCHIO(dev_priv) IS_PLATFORM(dev_priv, INTEL_PONTEVECCHIO)
  497. #define IS_METEORLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_METEORLAKE)
  498. #define IS_METEORLAKE_M(dev_priv) \
  499. IS_SUBPLATFORM(dev_priv, INTEL_METEORLAKE, INTEL_SUBPLATFORM_M)
  500. #define IS_METEORLAKE_P(dev_priv) \
  501. IS_SUBPLATFORM(dev_priv, INTEL_METEORLAKE, INTEL_SUBPLATFORM_P)
  502. #define IS_DG2_G10(dev_priv) \
  503. IS_SUBPLATFORM(dev_priv, INTEL_DG2, INTEL_SUBPLATFORM_G10)
  504. #define IS_DG2_G11(dev_priv) \
  505. IS_SUBPLATFORM(dev_priv, INTEL_DG2, INTEL_SUBPLATFORM_G11)
  506. #define IS_DG2_G12(dev_priv) \
  507. IS_SUBPLATFORM(dev_priv, INTEL_DG2, INTEL_SUBPLATFORM_G12)
  508. #define IS_ADLS_RPLS(dev_priv) \
  509. IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_S, INTEL_SUBPLATFORM_RPL)
  510. #define IS_ADLP_N(dev_priv) \
  511. IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_P, INTEL_SUBPLATFORM_N)
  512. #define IS_ADLP_RPLP(dev_priv) \
  513. IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_P, INTEL_SUBPLATFORM_RPL)
  514. #define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
  515. (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
  516. #define IS_BDW_ULT(dev_priv) \
  517. IS_SUBPLATFORM(dev_priv, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULT)
  518. #define IS_BDW_ULX(dev_priv) \
  519. IS_SUBPLATFORM(dev_priv, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULX)
  520. #define IS_BDW_GT3(dev_priv) (IS_BROADWELL(dev_priv) && \
  521. INTEL_INFO(dev_priv)->gt == 3)
  522. #define IS_HSW_ULT(dev_priv) \
  523. IS_SUBPLATFORM(dev_priv, INTEL_HASWELL, INTEL_SUBPLATFORM_ULT)
  524. #define IS_HSW_GT3(dev_priv) (IS_HASWELL(dev_priv) && \
  525. INTEL_INFO(dev_priv)->gt == 3)
  526. #define IS_HSW_GT1(dev_priv) (IS_HASWELL(dev_priv) && \
  527. INTEL_INFO(dev_priv)->gt == 1)
  528. /* ULX machines are also considered ULT. */
  529. #define IS_HSW_ULX(dev_priv) \
  530. IS_SUBPLATFORM(dev_priv, INTEL_HASWELL, INTEL_SUBPLATFORM_ULX)
  531. #define IS_SKL_ULT(dev_priv) \
  532. IS_SUBPLATFORM(dev_priv, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULT)
  533. #define IS_SKL_ULX(dev_priv) \
  534. IS_SUBPLATFORM(dev_priv, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULX)
  535. #define IS_KBL_ULT(dev_priv) \
  536. IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULT)
  537. #define IS_KBL_ULX(dev_priv) \
  538. IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULX)
  539. #define IS_SKL_GT2(dev_priv) (IS_SKYLAKE(dev_priv) && \
  540. INTEL_INFO(dev_priv)->gt == 2)
  541. #define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \
  542. INTEL_INFO(dev_priv)->gt == 3)
  543. #define IS_SKL_GT4(dev_priv) (IS_SKYLAKE(dev_priv) && \
  544. INTEL_INFO(dev_priv)->gt == 4)
  545. #define IS_KBL_GT2(dev_priv) (IS_KABYLAKE(dev_priv) && \
  546. INTEL_INFO(dev_priv)->gt == 2)
  547. #define IS_KBL_GT3(dev_priv) (IS_KABYLAKE(dev_priv) && \
  548. INTEL_INFO(dev_priv)->gt == 3)
  549. #define IS_CFL_ULT(dev_priv) \
  550. IS_SUBPLATFORM(dev_priv, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULT)
  551. #define IS_CFL_ULX(dev_priv) \
  552. IS_SUBPLATFORM(dev_priv, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULX)
  553. #define IS_CFL_GT2(dev_priv) (IS_COFFEELAKE(dev_priv) && \
  554. INTEL_INFO(dev_priv)->gt == 2)
  555. #define IS_CFL_GT3(dev_priv) (IS_COFFEELAKE(dev_priv) && \
  556. INTEL_INFO(dev_priv)->gt == 3)
  557. #define IS_CML_ULT(dev_priv) \
  558. IS_SUBPLATFORM(dev_priv, INTEL_COMETLAKE, INTEL_SUBPLATFORM_ULT)
  559. #define IS_CML_ULX(dev_priv) \
  560. IS_SUBPLATFORM(dev_priv, INTEL_COMETLAKE, INTEL_SUBPLATFORM_ULX)
  561. #define IS_CML_GT2(dev_priv) (IS_COMETLAKE(dev_priv) && \
  562. INTEL_INFO(dev_priv)->gt == 2)
  563. #define IS_ICL_WITH_PORT_F(dev_priv) \
  564. IS_SUBPLATFORM(dev_priv, INTEL_ICELAKE, INTEL_SUBPLATFORM_PORTF)
  565. #define IS_TGL_UY(dev_priv) \
  566. IS_SUBPLATFORM(dev_priv, INTEL_TIGERLAKE, INTEL_SUBPLATFORM_UY)
  567. #define IS_SKL_GRAPHICS_STEP(p, since, until) (IS_SKYLAKE(p) && IS_GRAPHICS_STEP(p, since, until))
  568. #define IS_KBL_GRAPHICS_STEP(dev_priv, since, until) \
  569. (IS_KABYLAKE(dev_priv) && IS_GRAPHICS_STEP(dev_priv, since, until))
  570. #define IS_KBL_DISPLAY_STEP(dev_priv, since, until) \
  571. (IS_KABYLAKE(dev_priv) && IS_DISPLAY_STEP(dev_priv, since, until))
  572. #define IS_JSL_EHL_GRAPHICS_STEP(p, since, until) \
  573. (IS_JSL_EHL(p) && IS_GRAPHICS_STEP(p, since, until))
  574. #define IS_JSL_EHL_DISPLAY_STEP(p, since, until) \
  575. (IS_JSL_EHL(p) && IS_DISPLAY_STEP(p, since, until))
  576. #define IS_TGL_DISPLAY_STEP(__i915, since, until) \
  577. (IS_TIGERLAKE(__i915) && \
  578. IS_DISPLAY_STEP(__i915, since, until))
  579. #define IS_TGL_UY_GRAPHICS_STEP(__i915, since, until) \
  580. (IS_TGL_UY(__i915) && \
  581. IS_GRAPHICS_STEP(__i915, since, until))
  582. #define IS_TGL_GRAPHICS_STEP(__i915, since, until) \
  583. (IS_TIGERLAKE(__i915) && !IS_TGL_UY(__i915)) && \
  584. IS_GRAPHICS_STEP(__i915, since, until))
  585. #define IS_RKL_DISPLAY_STEP(p, since, until) \
  586. (IS_ROCKETLAKE(p) && IS_DISPLAY_STEP(p, since, until))
  587. #define IS_DG1_GRAPHICS_STEP(p, since, until) \
  588. (IS_DG1(p) && IS_GRAPHICS_STEP(p, since, until))
  589. #define IS_DG1_DISPLAY_STEP(p, since, until) \
  590. (IS_DG1(p) && IS_DISPLAY_STEP(p, since, until))
  591. #define IS_ADLS_DISPLAY_STEP(__i915, since, until) \
  592. (IS_ALDERLAKE_S(__i915) && \
  593. IS_DISPLAY_STEP(__i915, since, until))
  594. #define IS_ADLS_GRAPHICS_STEP(__i915, since, until) \
  595. (IS_ALDERLAKE_S(__i915) && \
  596. IS_GRAPHICS_STEP(__i915, since, until))
  597. #define IS_ADLP_DISPLAY_STEP(__i915, since, until) \
  598. (IS_ALDERLAKE_P(__i915) && \
  599. IS_DISPLAY_STEP(__i915, since, until))
  600. #define IS_ADLP_GRAPHICS_STEP(__i915, since, until) \
  601. (IS_ALDERLAKE_P(__i915) && \
  602. IS_GRAPHICS_STEP(__i915, since, until))
  603. #define IS_XEHPSDV_GRAPHICS_STEP(__i915, since, until) \
  604. (IS_XEHPSDV(__i915) && IS_GRAPHICS_STEP(__i915, since, until))
  605. /*
  606. * DG2 hardware steppings are a bit unusual. The hardware design was forked to
  607. * create three variants (G10, G11, and G12) which each have distinct
  608. * workaround sets. The G11 and G12 forks of the DG2 design reset the GT
  609. * stepping back to "A0" for their first iterations, even though they're more
  610. * similar to a G10 B0 stepping and G10 C0 stepping respectively in terms of
  611. * functionality and workarounds. However the display stepping does not reset
  612. * in the same manner --- a specific stepping like "B0" has a consistent
  613. * meaning regardless of whether it belongs to a G10, G11, or G12 DG2.
  614. *
  615. * TLDR: All GT workarounds and stepping-specific logic must be applied in
  616. * relation to a specific subplatform (G10/G11/G12), whereas display workarounds
  617. * and stepping-specific logic will be applied with a general DG2-wide stepping
  618. * number.
  619. */
  620. #define IS_DG2_GRAPHICS_STEP(__i915, variant, since, until) \
  621. (IS_SUBPLATFORM(__i915, INTEL_DG2, INTEL_SUBPLATFORM_##variant) && \
  622. IS_GRAPHICS_STEP(__i915, since, until))
  623. #define IS_DG2_DISPLAY_STEP(__i915, since, until) \
  624. (IS_DG2(__i915) && \
  625. IS_DISPLAY_STEP(__i915, since, until))
  626. #define IS_PVC_BD_STEP(__i915, since, until) \
  627. (IS_PONTEVECCHIO(__i915) && \
  628. IS_BASEDIE_STEP(__i915, since, until))
  629. #define IS_PVC_CT_STEP(__i915, since, until) \
  630. (IS_PONTEVECCHIO(__i915) && \
  631. IS_GRAPHICS_STEP(__i915, since, until))
  632. #define IS_LP(dev_priv) (INTEL_INFO(dev_priv)->is_lp)
  633. #define IS_GEN9_LP(dev_priv) (GRAPHICS_VER(dev_priv) == 9 && IS_LP(dev_priv))
  634. #define IS_GEN9_BC(dev_priv) (GRAPHICS_VER(dev_priv) == 9 && !IS_LP(dev_priv))
  635. #define __HAS_ENGINE(engine_mask, id) ((engine_mask) & BIT(id))
  636. #define HAS_ENGINE(gt, id) __HAS_ENGINE((gt)->info.engine_mask, id)
  637. #define ENGINE_INSTANCES_MASK(gt, first, count) ({ \
  638. unsigned int first__ = (first); \
  639. unsigned int count__ = (count); \
  640. ((gt)->info.engine_mask & \
  641. GENMASK(first__ + count__ - 1, first__)) >> first__; \
  642. })
  643. #define RCS_MASK(gt) \
  644. ENGINE_INSTANCES_MASK(gt, RCS0, I915_MAX_RCS)
  645. #define BCS_MASK(gt) \
  646. ENGINE_INSTANCES_MASK(gt, BCS0, I915_MAX_BCS)
  647. #define VDBOX_MASK(gt) \
  648. ENGINE_INSTANCES_MASK(gt, VCS0, I915_MAX_VCS)
  649. #define VEBOX_MASK(gt) \
  650. ENGINE_INSTANCES_MASK(gt, VECS0, I915_MAX_VECS)
  651. #define CCS_MASK(gt) \
  652. ENGINE_INSTANCES_MASK(gt, CCS0, I915_MAX_CCS)
  653. #define HAS_MEDIA_RATIO_MODE(dev_priv) (INTEL_INFO(dev_priv)->has_media_ratio_mode)
  654. /*
  655. * The Gen7 cmdparser copies the scanned buffer to the ggtt for execution
  656. * All later gens can run the final buffer from the ppgtt
  657. */
  658. #define CMDPARSER_USES_GGTT(dev_priv) (GRAPHICS_VER(dev_priv) == 7)
  659. #define HAS_LLC(dev_priv) (INTEL_INFO(dev_priv)->has_llc)
  660. #define HAS_4TILE(dev_priv) (INTEL_INFO(dev_priv)->has_4tile)
  661. #define HAS_SNOOP(dev_priv) (INTEL_INFO(dev_priv)->has_snoop)
  662. #define HAS_EDRAM(dev_priv) ((dev_priv)->edram_size_mb)
  663. #define HAS_SECURE_BATCHES(dev_priv) (GRAPHICS_VER(dev_priv) < 6)
  664. #define HAS_WT(dev_priv) HAS_EDRAM(dev_priv)
  665. #define HWS_NEEDS_PHYSICAL(dev_priv) (INTEL_INFO(dev_priv)->hws_needs_physical)
  666. #define HAS_LOGICAL_RING_CONTEXTS(dev_priv) \
  667. (INTEL_INFO(dev_priv)->has_logical_ring_contexts)
  668. #define HAS_LOGICAL_RING_ELSQ(dev_priv) \
  669. (INTEL_INFO(dev_priv)->has_logical_ring_elsq)
  670. #define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv)
  671. #define INTEL_PPGTT(dev_priv) (RUNTIME_INFO(dev_priv)->ppgtt_type)
  672. #define HAS_PPGTT(dev_priv) \
  673. (INTEL_PPGTT(dev_priv) != INTEL_PPGTT_NONE)
  674. #define HAS_FULL_PPGTT(dev_priv) \
  675. (INTEL_PPGTT(dev_priv) >= INTEL_PPGTT_FULL)
  676. #define HAS_PAGE_SIZES(dev_priv, sizes) ({ \
  677. GEM_BUG_ON((sizes) == 0); \
  678. ((sizes) & ~RUNTIME_INFO(dev_priv)->page_sizes) == 0; \
  679. })
  680. #define HAS_OVERLAY(dev_priv) (INTEL_INFO(dev_priv)->display.has_overlay)
  681. #define OVERLAY_NEEDS_PHYSICAL(dev_priv) \
  682. (INTEL_INFO(dev_priv)->display.overlay_needs_physical)
  683. /* Early gen2 have a totally busted CS tlb and require pinned batches. */
  684. #define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_I845G(dev_priv))
  685. #define NEEDS_RC6_CTX_CORRUPTION_WA(dev_priv) \
  686. (IS_BROADWELL(dev_priv) || GRAPHICS_VER(dev_priv) == 9)
  687. /* WaRsDisableCoarsePowerGating:skl,cnl */
  688. #define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \
  689. (IS_SKL_GT3(dev_priv) || IS_SKL_GT4(dev_priv))
  690. #define HAS_GMBUS_IRQ(dev_priv) (DISPLAY_VER(dev_priv) >= 4)
  691. #define HAS_GMBUS_BURST_READ(dev_priv) (DISPLAY_VER(dev_priv) >= 11 || \
  692. IS_GEMINILAKE(dev_priv) || \
  693. IS_KABYLAKE(dev_priv))
  694. /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
  695. * rows, which changed the alignment requirements and fence programming.
  696. */
  697. #define HAS_128_BYTE_Y_TILING(dev_priv) (GRAPHICS_VER(dev_priv) != 2 && \
  698. !(IS_I915G(dev_priv) || IS_I915GM(dev_priv)))
  699. #define SUPPORTS_TV(dev_priv) (INTEL_INFO(dev_priv)->display.supports_tv)
  700. #define I915_HAS_HOTPLUG(dev_priv) (INTEL_INFO(dev_priv)->display.has_hotplug)
  701. #define HAS_FW_BLC(dev_priv) (DISPLAY_VER(dev_priv) > 2)
  702. #define HAS_FBC(dev_priv) (RUNTIME_INFO(dev_priv)->fbc_mask != 0)
  703. #define HAS_CUR_FBC(dev_priv) (!HAS_GMCH(dev_priv) && DISPLAY_VER(dev_priv) >= 7)
  704. #define HAS_IPS(dev_priv) (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv))
  705. #define HAS_DP_MST(dev_priv) (INTEL_INFO(dev_priv)->display.has_dp_mst)
  706. #define HAS_DP20(dev_priv) (IS_DG2(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
  707. #define HAS_DOUBLE_BUFFERED_M_N(dev_priv) (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
  708. #define HAS_CDCLK_CRAWL(dev_priv) (INTEL_INFO(dev_priv)->display.has_cdclk_crawl)
  709. #define HAS_DDI(dev_priv) (INTEL_INFO(dev_priv)->display.has_ddi)
  710. #define HAS_FPGA_DBG_UNCLAIMED(dev_priv) (INTEL_INFO(dev_priv)->display.has_fpga_dbg)
  711. #define HAS_PSR(dev_priv) (INTEL_INFO(dev_priv)->display.has_psr)
  712. #define HAS_PSR_HW_TRACKING(dev_priv) \
  713. (INTEL_INFO(dev_priv)->display.has_psr_hw_tracking)
  714. #define HAS_PSR2_SEL_FETCH(dev_priv) (DISPLAY_VER(dev_priv) >= 12)
  715. #define HAS_TRANSCODER(dev_priv, trans) ((RUNTIME_INFO(dev_priv)->cpu_transcoder_mask & BIT(trans)) != 0)
  716. #define HAS_RC6(dev_priv) (INTEL_INFO(dev_priv)->has_rc6)
  717. #define HAS_RC6p(dev_priv) (INTEL_INFO(dev_priv)->has_rc6p)
  718. #define HAS_RC6pp(dev_priv) (false) /* HW was never validated */
  719. #define HAS_RPS(dev_priv) (INTEL_INFO(dev_priv)->has_rps)
  720. #define HAS_DMC(dev_priv) (RUNTIME_INFO(dev_priv)->has_dmc)
  721. #define HAS_HECI_PXP(dev_priv) \
  722. (INTEL_INFO(dev_priv)->has_heci_pxp)
  723. #define HAS_HECI_GSCFI(dev_priv) \
  724. (INTEL_INFO(dev_priv)->has_heci_gscfi)
  725. #define HAS_HECI_GSC(dev_priv) (HAS_HECI_PXP(dev_priv) || HAS_HECI_GSCFI(dev_priv))
  726. #define HAS_MSO(i915) (DISPLAY_VER(i915) >= 12)
  727. #define HAS_RUNTIME_PM(dev_priv) (INTEL_INFO(dev_priv)->has_runtime_pm)
  728. #define HAS_64BIT_RELOC(dev_priv) (INTEL_INFO(dev_priv)->has_64bit_reloc)
  729. /*
  730. * Set this flag, when platform requires 64K GTT page sizes or larger for
  731. * device local memory access.
  732. */
  733. #define HAS_64K_PAGES(dev_priv) (INTEL_INFO(dev_priv)->has_64k_pages)
  734. /*
  735. * Set this flag when platform doesn't allow both 64k pages and 4k pages in
  736. * the same PT. this flag means we need to support compact PT layout for the
  737. * ppGTT when using the 64K GTT pages.
  738. */
  739. #define NEEDS_COMPACT_PT(dev_priv) (INTEL_INFO(dev_priv)->needs_compact_pt)
  740. #define HAS_IPC(dev_priv) (INTEL_INFO(dev_priv)->display.has_ipc)
  741. #define HAS_REGION(i915, i) (RUNTIME_INFO(i915)->memory_regions & (i))
  742. #define HAS_LMEM(i915) HAS_REGION(i915, REGION_LMEM)
  743. #define HAS_EXTRA_GT_LIST(dev_priv) (INTEL_INFO(dev_priv)->extra_gt_list)
  744. /*
  745. * Platform has the dedicated compression control state for each lmem surfaces
  746. * stored in lmem to support the 3D and media compression formats.
  747. */
  748. #define HAS_FLAT_CCS(dev_priv) (INTEL_INFO(dev_priv)->has_flat_ccs)
  749. #define HAS_GT_UC(dev_priv) (INTEL_INFO(dev_priv)->has_gt_uc)
  750. #define HAS_POOLED_EU(dev_priv) (RUNTIME_INFO(dev_priv)->has_pooled_eu)
  751. #define HAS_GLOBAL_MOCS_REGISTERS(dev_priv) (INTEL_INFO(dev_priv)->has_global_mocs)
  752. #define HAS_PXP(dev_priv) ((IS_ENABLED(CONFIG_DRM_I915_PXP) && \
  753. INTEL_INFO(dev_priv)->has_pxp) && \
  754. VDBOX_MASK(to_gt(dev_priv)))
  755. #define HAS_GMCH(dev_priv) (INTEL_INFO(dev_priv)->display.has_gmch)
  756. #define HAS_LSPCON(dev_priv) (IS_DISPLAY_VER(dev_priv, 9, 10))
  757. #define HAS_L3_CCS_READ(i915) (INTEL_INFO(i915)->has_l3_ccs_read)
  758. /* DPF == dynamic parity feature */
  759. #define HAS_L3_DPF(dev_priv) (INTEL_INFO(dev_priv)->has_l3_dpf)
  760. #define NUM_L3_SLICES(dev_priv) (IS_HSW_GT3(dev_priv) ? \
  761. 2 : HAS_L3_DPF(dev_priv))
  762. #define GT_FREQUENCY_MULTIPLIER 50
  763. #define GEN9_FREQ_SCALER 3
  764. #define INTEL_NUM_PIPES(dev_priv) (hweight8(RUNTIME_INFO(dev_priv)->pipe_mask))
  765. #define HAS_DISPLAY(dev_priv) (RUNTIME_INFO(dev_priv)->pipe_mask != 0)
  766. #define HAS_VRR(i915) (DISPLAY_VER(i915) >= 11)
  767. #define HAS_ASYNC_FLIPS(i915) (DISPLAY_VER(i915) >= 5)
  768. /* Only valid when HAS_DISPLAY() is true */
  769. #define INTEL_DISPLAY_ENABLED(dev_priv) \
  770. (drm_WARN_ON(&(dev_priv)->drm, !HAS_DISPLAY(dev_priv)), \
  771. !(dev_priv)->params.disable_display && \
  772. !intel_opregion_headless_sku(dev_priv))
  773. #define HAS_GUC_DEPRIVILEGE(dev_priv) \
  774. (INTEL_INFO(dev_priv)->has_guc_deprivilege)
  775. #define HAS_D12_PLANE_MINIMIZATION(dev_priv) (IS_ROCKETLAKE(dev_priv) || \
  776. IS_ALDERLAKE_S(dev_priv))
  777. #define HAS_MBUS_JOINING(i915) (IS_ALDERLAKE_P(i915) || DISPLAY_VER(i915) >= 14)
  778. #define HAS_3D_PIPELINE(i915) (INTEL_INFO(i915)->has_3d_pipeline)
  779. #define HAS_ONE_EU_PER_FUSE_BIT(i915) (INTEL_INFO(i915)->has_one_eu_per_fuse_bit)
  780. /* intel_device_info.c */
  781. static inline struct intel_device_info *
  782. mkwrite_device_info(struct drm_i915_private *dev_priv)
  783. {
  784. return (struct intel_device_info *)INTEL_INFO(dev_priv);
  785. }
  786. static inline enum i915_map_type
  787. i915_coherent_map_type(struct drm_i915_private *i915,
  788. struct drm_i915_gem_object *obj, bool always_coherent)
  789. {
  790. if (i915_gem_object_is_lmem(obj))
  791. return I915_MAP_WC;
  792. if (HAS_LLC(i915) || always_coherent)
  793. return I915_MAP_WB;
  794. else
  795. return I915_MAP_WC;
  796. }
  797. #endif