drm_atomic_uapi.c 42 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445
  1. /*
  2. * Copyright (C) 2014 Red Hat
  3. * Copyright (C) 2014 Intel Corp.
  4. * Copyright (C) 2018 Intel Corp.
  5. * Copyright (c) 2020, The Linux Foundation. All rights reserved.
  6. *
  7. * Permission is hereby granted, free of charge, to any person obtaining a
  8. * copy of this software and associated documentation files (the "Software"),
  9. * to deal in the Software without restriction, including without limitation
  10. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  11. * and/or sell copies of the Software, and to permit persons to whom the
  12. * Software is furnished to do so, subject to the following conditions:
  13. *
  14. * The above copyright notice and this permission notice shall be included in
  15. * all copies or substantial portions of the Software.
  16. *
  17. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  18. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  19. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  20. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  21. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  22. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  23. * OTHER DEALINGS IN THE SOFTWARE.
  24. *
  25. * Authors:
  26. * Rob Clark <[email protected]>
  27. * Daniel Vetter <[email protected]>
  28. */
  29. #include <drm/drm_atomic_uapi.h>
  30. #include <drm/drm_atomic.h>
  31. #include <drm/drm_framebuffer.h>
  32. #include <drm/drm_print.h>
  33. #include <drm/drm_drv.h>
  34. #include <drm/drm_writeback.h>
  35. #include <drm/drm_vblank.h>
  36. #include <linux/dma-fence.h>
  37. #include <linux/uaccess.h>
  38. #include <linux/sync_file.h>
  39. #include <linux/file.h>
  40. #include "drm_crtc_internal.h"
  41. /**
  42. * DOC: overview
  43. *
  44. * This file contains the marshalling and demarshalling glue for the atomic UAPI
  45. * in all its forms: The monster ATOMIC IOCTL itself, code for GET_PROPERTY and
  46. * SET_PROPERTY IOCTLs. Plus interface functions for compatibility helpers and
  47. * drivers which have special needs to construct their own atomic updates, e.g.
  48. * for load detect or similar.
  49. */
  50. /**
  51. * drm_atomic_set_mode_for_crtc - set mode for CRTC
  52. * @state: the CRTC whose incoming state to update
  53. * @mode: kernel-internal mode to use for the CRTC, or NULL to disable
  54. *
  55. * Set a mode (originating from the kernel) on the desired CRTC state and update
  56. * the enable property.
  57. *
  58. * RETURNS:
  59. * Zero on success, error code on failure. Cannot return -EDEADLK.
  60. */
  61. int drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state,
  62. const struct drm_display_mode *mode)
  63. {
  64. struct drm_crtc *crtc = state->crtc;
  65. struct drm_mode_modeinfo umode;
  66. /* Early return for no change. */
  67. if (mode && memcmp(&state->mode, mode, sizeof(*mode)) == 0)
  68. return 0;
  69. drm_property_blob_put(state->mode_blob);
  70. state->mode_blob = NULL;
  71. if (mode) {
  72. struct drm_property_blob *blob;
  73. drm_mode_convert_to_umode(&umode, mode);
  74. blob = drm_property_create_blob(crtc->dev,
  75. sizeof(umode), &umode);
  76. if (IS_ERR(blob))
  77. return PTR_ERR(blob);
  78. drm_mode_copy(&state->mode, mode);
  79. state->mode_blob = blob;
  80. state->enable = true;
  81. drm_dbg_atomic(crtc->dev,
  82. "Set [MODE:%s] for [CRTC:%d:%s] state %p\n",
  83. mode->name, crtc->base.id, crtc->name, state);
  84. } else {
  85. memset(&state->mode, 0, sizeof(state->mode));
  86. state->enable = false;
  87. drm_dbg_atomic(crtc->dev,
  88. "Set [NOMODE] for [CRTC:%d:%s] state %p\n",
  89. crtc->base.id, crtc->name, state);
  90. }
  91. return 0;
  92. }
  93. EXPORT_SYMBOL(drm_atomic_set_mode_for_crtc);
  94. /**
  95. * drm_atomic_set_mode_prop_for_crtc - set mode for CRTC
  96. * @state: the CRTC whose incoming state to update
  97. * @blob: pointer to blob property to use for mode
  98. *
  99. * Set a mode (originating from a blob property) on the desired CRTC state.
  100. * This function will take a reference on the blob property for the CRTC state,
  101. * and release the reference held on the state's existing mode property, if any
  102. * was set.
  103. *
  104. * RETURNS:
  105. * Zero on success, error code on failure. Cannot return -EDEADLK.
  106. */
  107. int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
  108. struct drm_property_blob *blob)
  109. {
  110. struct drm_crtc *crtc = state->crtc;
  111. if (blob == state->mode_blob)
  112. return 0;
  113. drm_property_blob_put(state->mode_blob);
  114. state->mode_blob = NULL;
  115. memset(&state->mode, 0, sizeof(state->mode));
  116. if (blob) {
  117. int ret;
  118. if (blob->length != sizeof(struct drm_mode_modeinfo)) {
  119. drm_dbg_atomic(crtc->dev,
  120. "[CRTC:%d:%s] bad mode blob length: %zu\n",
  121. crtc->base.id, crtc->name,
  122. blob->length);
  123. return -EINVAL;
  124. }
  125. ret = drm_mode_convert_umode(crtc->dev,
  126. &state->mode, blob->data);
  127. if (ret) {
  128. drm_dbg_atomic(crtc->dev,
  129. "[CRTC:%d:%s] invalid mode (ret=%d, status=%s):\n",
  130. crtc->base.id, crtc->name,
  131. ret, drm_get_mode_status_name(state->mode.status));
  132. drm_mode_debug_printmodeline(&state->mode);
  133. return -EINVAL;
  134. }
  135. state->mode_blob = drm_property_blob_get(blob);
  136. state->enable = true;
  137. drm_dbg_atomic(crtc->dev,
  138. "Set [MODE:%s] for [CRTC:%d:%s] state %p\n",
  139. state->mode.name, crtc->base.id, crtc->name,
  140. state);
  141. } else {
  142. state->enable = false;
  143. drm_dbg_atomic(crtc->dev,
  144. "Set [NOMODE] for [CRTC:%d:%s] state %p\n",
  145. crtc->base.id, crtc->name, state);
  146. }
  147. return 0;
  148. }
  149. EXPORT_SYMBOL(drm_atomic_set_mode_prop_for_crtc);
  150. /**
  151. * drm_atomic_set_crtc_for_plane - set CRTC for plane
  152. * @plane_state: the plane whose incoming state to update
  153. * @crtc: CRTC to use for the plane
  154. *
  155. * Changing the assigned CRTC for a plane requires us to grab the lock and state
  156. * for the new CRTC, as needed. This function takes care of all these details
  157. * besides updating the pointer in the state object itself.
  158. *
  159. * Returns:
  160. * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
  161. * then the w/w mutex code has detected a deadlock and the entire atomic
  162. * sequence must be restarted. All other errors are fatal.
  163. */
  164. int
  165. drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state,
  166. struct drm_crtc *crtc)
  167. {
  168. struct drm_plane *plane = plane_state->plane;
  169. struct drm_crtc_state *crtc_state;
  170. /* Nothing to do for same crtc*/
  171. if (plane_state->crtc == crtc)
  172. return 0;
  173. if (plane_state->crtc) {
  174. crtc_state = drm_atomic_get_crtc_state(plane_state->state,
  175. plane_state->crtc);
  176. if (WARN_ON(IS_ERR(crtc_state)))
  177. return PTR_ERR(crtc_state);
  178. crtc_state->plane_mask &= ~drm_plane_mask(plane);
  179. }
  180. plane_state->crtc = crtc;
  181. if (crtc) {
  182. crtc_state = drm_atomic_get_crtc_state(plane_state->state,
  183. crtc);
  184. if (IS_ERR(crtc_state))
  185. return PTR_ERR(crtc_state);
  186. crtc_state->plane_mask |= drm_plane_mask(plane);
  187. }
  188. if (crtc)
  189. drm_dbg_atomic(plane->dev,
  190. "Link [PLANE:%d:%s] state %p to [CRTC:%d:%s]\n",
  191. plane->base.id, plane->name, plane_state,
  192. crtc->base.id, crtc->name);
  193. else
  194. drm_dbg_atomic(plane->dev,
  195. "Link [PLANE:%d:%s] state %p to [NOCRTC]\n",
  196. plane->base.id, plane->name, plane_state);
  197. return 0;
  198. }
  199. EXPORT_SYMBOL(drm_atomic_set_crtc_for_plane);
  200. /**
  201. * drm_atomic_set_fb_for_plane - set framebuffer for plane
  202. * @plane_state: atomic state object for the plane
  203. * @fb: fb to use for the plane
  204. *
  205. * Changing the assigned framebuffer for a plane requires us to grab a reference
  206. * to the new fb and drop the reference to the old fb, if there is one. This
  207. * function takes care of all these details besides updating the pointer in the
  208. * state object itself.
  209. */
  210. void
  211. drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
  212. struct drm_framebuffer *fb)
  213. {
  214. struct drm_plane *plane = plane_state->plane;
  215. if (fb)
  216. drm_dbg_atomic(plane->dev,
  217. "Set [FB:%d] for [PLANE:%d:%s] state %p\n",
  218. fb->base.id, plane->base.id, plane->name,
  219. plane_state);
  220. else
  221. drm_dbg_atomic(plane->dev,
  222. "Set [NOFB] for [PLANE:%d:%s] state %p\n",
  223. plane->base.id, plane->name, plane_state);
  224. drm_framebuffer_assign(&plane_state->fb, fb);
  225. }
  226. EXPORT_SYMBOL(drm_atomic_set_fb_for_plane);
  227. /**
  228. * drm_atomic_set_crtc_for_connector - set CRTC for connector
  229. * @conn_state: atomic state object for the connector
  230. * @crtc: CRTC to use for the connector
  231. *
  232. * Changing the assigned CRTC for a connector requires us to grab the lock and
  233. * state for the new CRTC, as needed. This function takes care of all these
  234. * details besides updating the pointer in the state object itself.
  235. *
  236. * Returns:
  237. * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
  238. * then the w/w mutex code has detected a deadlock and the entire atomic
  239. * sequence must be restarted. All other errors are fatal.
  240. */
  241. int
  242. drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
  243. struct drm_crtc *crtc)
  244. {
  245. struct drm_connector *connector = conn_state->connector;
  246. struct drm_crtc_state *crtc_state;
  247. if (conn_state->crtc == crtc)
  248. return 0;
  249. if (conn_state->crtc) {
  250. crtc_state = drm_atomic_get_new_crtc_state(conn_state->state,
  251. conn_state->crtc);
  252. crtc_state->connector_mask &=
  253. ~drm_connector_mask(conn_state->connector);
  254. drm_connector_put(conn_state->connector);
  255. conn_state->crtc = NULL;
  256. }
  257. if (crtc) {
  258. crtc_state = drm_atomic_get_crtc_state(conn_state->state, crtc);
  259. if (IS_ERR(crtc_state))
  260. return PTR_ERR(crtc_state);
  261. crtc_state->connector_mask |=
  262. drm_connector_mask(conn_state->connector);
  263. drm_connector_get(conn_state->connector);
  264. conn_state->crtc = crtc;
  265. drm_dbg_atomic(connector->dev,
  266. "Link [CONNECTOR:%d:%s] state %p to [CRTC:%d:%s]\n",
  267. connector->base.id, connector->name,
  268. conn_state, crtc->base.id, crtc->name);
  269. } else {
  270. drm_dbg_atomic(connector->dev,
  271. "Link [CONNECTOR:%d:%s] state %p to [NOCRTC]\n",
  272. connector->base.id, connector->name,
  273. conn_state);
  274. }
  275. return 0;
  276. }
  277. EXPORT_SYMBOL(drm_atomic_set_crtc_for_connector);
  278. static void set_out_fence_for_crtc(struct drm_atomic_state *state,
  279. struct drm_crtc *crtc, s32 __user *fence_ptr)
  280. {
  281. state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = fence_ptr;
  282. }
  283. static s32 __user *get_out_fence_for_crtc(struct drm_atomic_state *state,
  284. struct drm_crtc *crtc)
  285. {
  286. s32 __user *fence_ptr;
  287. fence_ptr = state->crtcs[drm_crtc_index(crtc)].out_fence_ptr;
  288. state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = NULL;
  289. return fence_ptr;
  290. }
  291. static int set_out_fence_for_connector(struct drm_atomic_state *state,
  292. struct drm_connector *connector,
  293. s32 __user *fence_ptr)
  294. {
  295. unsigned int index = drm_connector_index(connector);
  296. if (!fence_ptr)
  297. return 0;
  298. if (put_user(-1, fence_ptr))
  299. return -EFAULT;
  300. state->connectors[index].out_fence_ptr = fence_ptr;
  301. return 0;
  302. }
  303. static s32 __user *get_out_fence_for_connector(struct drm_atomic_state *state,
  304. struct drm_connector *connector)
  305. {
  306. unsigned int index = drm_connector_index(connector);
  307. s32 __user *fence_ptr;
  308. fence_ptr = state->connectors[index].out_fence_ptr;
  309. state->connectors[index].out_fence_ptr = NULL;
  310. return fence_ptr;
  311. }
  312. static int
  313. drm_atomic_replace_property_blob_from_id(struct drm_device *dev,
  314. struct drm_property_blob **blob,
  315. uint64_t blob_id,
  316. ssize_t expected_size,
  317. ssize_t expected_elem_size,
  318. bool *replaced)
  319. {
  320. struct drm_property_blob *new_blob = NULL;
  321. if (blob_id != 0) {
  322. new_blob = drm_property_lookup_blob(dev, blob_id);
  323. if (new_blob == NULL)
  324. return -EINVAL;
  325. if (expected_size > 0 &&
  326. new_blob->length != expected_size) {
  327. drm_property_blob_put(new_blob);
  328. return -EINVAL;
  329. }
  330. if (expected_elem_size > 0 &&
  331. new_blob->length % expected_elem_size != 0) {
  332. drm_property_blob_put(new_blob);
  333. return -EINVAL;
  334. }
  335. }
  336. *replaced |= drm_property_replace_blob(blob, new_blob);
  337. drm_property_blob_put(new_blob);
  338. return 0;
  339. }
  340. static int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
  341. struct drm_crtc_state *state, struct drm_property *property,
  342. uint64_t val)
  343. {
  344. struct drm_device *dev = crtc->dev;
  345. struct drm_mode_config *config = &dev->mode_config;
  346. bool replaced = false;
  347. int ret;
  348. if (property == config->prop_active)
  349. state->active = val;
  350. else if (property == config->prop_mode_id) {
  351. struct drm_property_blob *mode =
  352. drm_property_lookup_blob(dev, val);
  353. ret = drm_atomic_set_mode_prop_for_crtc(state, mode);
  354. drm_property_blob_put(mode);
  355. return ret;
  356. } else if (property == config->prop_vrr_enabled) {
  357. state->vrr_enabled = val;
  358. } else if (property == config->degamma_lut_property) {
  359. ret = drm_atomic_replace_property_blob_from_id(dev,
  360. &state->degamma_lut,
  361. val,
  362. -1, sizeof(struct drm_color_lut),
  363. &replaced);
  364. state->color_mgmt_changed |= replaced;
  365. return ret;
  366. } else if (property == config->ctm_property) {
  367. ret = drm_atomic_replace_property_blob_from_id(dev,
  368. &state->ctm,
  369. val,
  370. sizeof(struct drm_color_ctm), -1,
  371. &replaced);
  372. state->color_mgmt_changed |= replaced;
  373. return ret;
  374. } else if (property == config->gamma_lut_property) {
  375. ret = drm_atomic_replace_property_blob_from_id(dev,
  376. &state->gamma_lut,
  377. val,
  378. -1, sizeof(struct drm_color_lut),
  379. &replaced);
  380. state->color_mgmt_changed |= replaced;
  381. return ret;
  382. } else if (property == config->prop_out_fence_ptr) {
  383. s32 __user *fence_ptr = u64_to_user_ptr(val);
  384. if (!fence_ptr)
  385. return 0;
  386. if (put_user(-1, fence_ptr))
  387. return -EFAULT;
  388. set_out_fence_for_crtc(state->state, crtc, fence_ptr);
  389. } else if (property == crtc->scaling_filter_property) {
  390. state->scaling_filter = val;
  391. } else if (crtc->funcs->atomic_set_property) {
  392. return crtc->funcs->atomic_set_property(crtc, state, property, val);
  393. } else {
  394. drm_dbg_atomic(crtc->dev,
  395. "[CRTC:%d:%s] unknown property [PROP:%d:%s]]\n",
  396. crtc->base.id, crtc->name,
  397. property->base.id, property->name);
  398. return -EINVAL;
  399. }
  400. return 0;
  401. }
  402. static int
  403. drm_atomic_crtc_get_property(struct drm_crtc *crtc,
  404. const struct drm_crtc_state *state,
  405. struct drm_property *property, uint64_t *val)
  406. {
  407. struct drm_device *dev = crtc->dev;
  408. struct drm_mode_config *config = &dev->mode_config;
  409. if (property == config->prop_active)
  410. *val = drm_atomic_crtc_effectively_active(state);
  411. else if (property == config->prop_mode_id)
  412. *val = (state->mode_blob) ? state->mode_blob->base.id : 0;
  413. else if (property == config->prop_vrr_enabled)
  414. *val = state->vrr_enabled;
  415. else if (property == config->degamma_lut_property)
  416. *val = (state->degamma_lut) ? state->degamma_lut->base.id : 0;
  417. else if (property == config->ctm_property)
  418. *val = (state->ctm) ? state->ctm->base.id : 0;
  419. else if (property == config->gamma_lut_property)
  420. *val = (state->gamma_lut) ? state->gamma_lut->base.id : 0;
  421. else if (property == config->prop_out_fence_ptr)
  422. *val = 0;
  423. else if (property == crtc->scaling_filter_property)
  424. *val = state->scaling_filter;
  425. else if (crtc->funcs->atomic_get_property)
  426. return crtc->funcs->atomic_get_property(crtc, state, property, val);
  427. else
  428. return -EINVAL;
  429. return 0;
  430. }
  431. static int drm_atomic_plane_set_property(struct drm_plane *plane,
  432. struct drm_plane_state *state, struct drm_file *file_priv,
  433. struct drm_property *property, uint64_t val)
  434. {
  435. struct drm_device *dev = plane->dev;
  436. struct drm_mode_config *config = &dev->mode_config;
  437. bool replaced = false;
  438. int ret;
  439. if (property == config->prop_fb_id) {
  440. struct drm_framebuffer *fb;
  441. fb = drm_framebuffer_lookup(dev, file_priv, val);
  442. drm_atomic_set_fb_for_plane(state, fb);
  443. if (fb)
  444. drm_framebuffer_put(fb);
  445. } else if (property == config->prop_in_fence_fd) {
  446. if (state->fence)
  447. return -EINVAL;
  448. if (U642I64(val) == -1)
  449. return 0;
  450. state->fence = sync_file_get_fence(val);
  451. if (!state->fence)
  452. return -EINVAL;
  453. } else if (property == config->prop_crtc_id) {
  454. struct drm_crtc *crtc = drm_crtc_find(dev, file_priv, val);
  455. if (val && !crtc)
  456. return -EACCES;
  457. return drm_atomic_set_crtc_for_plane(state, crtc);
  458. } else if (property == config->prop_crtc_x) {
  459. state->crtc_x = U642I64(val);
  460. } else if (property == config->prop_crtc_y) {
  461. state->crtc_y = U642I64(val);
  462. } else if (property == config->prop_crtc_w) {
  463. state->crtc_w = val;
  464. } else if (property == config->prop_crtc_h) {
  465. state->crtc_h = val;
  466. } else if (property == config->prop_src_x) {
  467. state->src_x = val;
  468. } else if (property == config->prop_src_y) {
  469. state->src_y = val;
  470. } else if (property == config->prop_src_w) {
  471. state->src_w = val;
  472. } else if (property == config->prop_src_h) {
  473. state->src_h = val;
  474. } else if (property == plane->alpha_property) {
  475. state->alpha = val;
  476. } else if (property == plane->blend_mode_property) {
  477. state->pixel_blend_mode = val;
  478. } else if (property == plane->rotation_property) {
  479. if (!is_power_of_2(val & DRM_MODE_ROTATE_MASK)) {
  480. drm_dbg_atomic(plane->dev,
  481. "[PLANE:%d:%s] bad rotation bitmask: 0x%llx\n",
  482. plane->base.id, plane->name, val);
  483. return -EINVAL;
  484. }
  485. state->rotation = val;
  486. } else if (property == plane->zpos_property) {
  487. state->zpos = val;
  488. } else if (property == plane->color_encoding_property) {
  489. state->color_encoding = val;
  490. } else if (property == plane->color_range_property) {
  491. state->color_range = val;
  492. } else if (property == config->prop_fb_damage_clips) {
  493. ret = drm_atomic_replace_property_blob_from_id(dev,
  494. &state->fb_damage_clips,
  495. val,
  496. -1,
  497. sizeof(struct drm_rect),
  498. &replaced);
  499. return ret;
  500. } else if (property == plane->scaling_filter_property) {
  501. state->scaling_filter = val;
  502. } else if (plane->funcs->atomic_set_property) {
  503. return plane->funcs->atomic_set_property(plane, state,
  504. property, val);
  505. } else {
  506. drm_dbg_atomic(plane->dev,
  507. "[PLANE:%d:%s] unknown property [PROP:%d:%s]]\n",
  508. plane->base.id, plane->name,
  509. property->base.id, property->name);
  510. return -EINVAL;
  511. }
  512. return 0;
  513. }
  514. static int
  515. drm_atomic_plane_get_property(struct drm_plane *plane,
  516. const struct drm_plane_state *state,
  517. struct drm_property *property, uint64_t *val)
  518. {
  519. struct drm_device *dev = plane->dev;
  520. struct drm_mode_config *config = &dev->mode_config;
  521. if (property == config->prop_fb_id) {
  522. *val = (state->fb) ? state->fb->base.id : 0;
  523. } else if (property == config->prop_in_fence_fd) {
  524. *val = -1;
  525. } else if (property == config->prop_crtc_id) {
  526. *val = (state->crtc) ? state->crtc->base.id : 0;
  527. } else if (property == config->prop_crtc_x) {
  528. *val = I642U64(state->crtc_x);
  529. } else if (property == config->prop_crtc_y) {
  530. *val = I642U64(state->crtc_y);
  531. } else if (property == config->prop_crtc_w) {
  532. *val = state->crtc_w;
  533. } else if (property == config->prop_crtc_h) {
  534. *val = state->crtc_h;
  535. } else if (property == config->prop_src_x) {
  536. *val = state->src_x;
  537. } else if (property == config->prop_src_y) {
  538. *val = state->src_y;
  539. } else if (property == config->prop_src_w) {
  540. *val = state->src_w;
  541. } else if (property == config->prop_src_h) {
  542. *val = state->src_h;
  543. } else if (property == plane->alpha_property) {
  544. *val = state->alpha;
  545. } else if (property == plane->blend_mode_property) {
  546. *val = state->pixel_blend_mode;
  547. } else if (property == plane->rotation_property) {
  548. *val = state->rotation;
  549. } else if (property == plane->zpos_property) {
  550. *val = state->zpos;
  551. } else if (property == plane->color_encoding_property) {
  552. *val = state->color_encoding;
  553. } else if (property == plane->color_range_property) {
  554. *val = state->color_range;
  555. } else if (property == config->prop_fb_damage_clips) {
  556. *val = (state->fb_damage_clips) ?
  557. state->fb_damage_clips->base.id : 0;
  558. } else if (property == plane->scaling_filter_property) {
  559. *val = state->scaling_filter;
  560. } else if (plane->funcs->atomic_get_property) {
  561. return plane->funcs->atomic_get_property(plane, state, property, val);
  562. } else {
  563. return -EINVAL;
  564. }
  565. return 0;
  566. }
  567. static int drm_atomic_set_writeback_fb_for_connector(
  568. struct drm_connector_state *conn_state,
  569. struct drm_framebuffer *fb)
  570. {
  571. int ret;
  572. struct drm_connector *conn = conn_state->connector;
  573. ret = drm_writeback_set_fb(conn_state, fb);
  574. if (ret < 0)
  575. return ret;
  576. if (fb)
  577. drm_dbg_atomic(conn->dev,
  578. "Set [FB:%d] for connector state %p\n",
  579. fb->base.id, conn_state);
  580. else
  581. drm_dbg_atomic(conn->dev,
  582. "Set [NOFB] for connector state %p\n",
  583. conn_state);
  584. return 0;
  585. }
  586. static int drm_atomic_connector_set_property(struct drm_connector *connector,
  587. struct drm_connector_state *state, struct drm_file *file_priv,
  588. struct drm_property *property, uint64_t val)
  589. {
  590. struct drm_device *dev = connector->dev;
  591. struct drm_mode_config *config = &dev->mode_config;
  592. bool replaced = false;
  593. int ret;
  594. if (property == config->prop_crtc_id) {
  595. struct drm_crtc *crtc = drm_crtc_find(dev, file_priv, val);
  596. if (val && !crtc)
  597. return -EACCES;
  598. return drm_atomic_set_crtc_for_connector(state, crtc);
  599. } else if (property == config->dpms_property) {
  600. /* setting DPMS property requires special handling, which
  601. * is done in legacy setprop path for us. Disallow (for
  602. * now?) atomic writes to DPMS property:
  603. */
  604. return -EINVAL;
  605. } else if (property == config->tv_select_subconnector_property) {
  606. state->tv.subconnector = val;
  607. } else if (property == config->tv_left_margin_property) {
  608. state->tv.margins.left = val;
  609. } else if (property == config->tv_right_margin_property) {
  610. state->tv.margins.right = val;
  611. } else if (property == config->tv_top_margin_property) {
  612. state->tv.margins.top = val;
  613. } else if (property == config->tv_bottom_margin_property) {
  614. state->tv.margins.bottom = val;
  615. } else if (property == config->tv_mode_property) {
  616. state->tv.mode = val;
  617. } else if (property == config->tv_brightness_property) {
  618. state->tv.brightness = val;
  619. } else if (property == config->tv_contrast_property) {
  620. state->tv.contrast = val;
  621. } else if (property == config->tv_flicker_reduction_property) {
  622. state->tv.flicker_reduction = val;
  623. } else if (property == config->tv_overscan_property) {
  624. state->tv.overscan = val;
  625. } else if (property == config->tv_saturation_property) {
  626. state->tv.saturation = val;
  627. } else if (property == config->tv_hue_property) {
  628. state->tv.hue = val;
  629. } else if (property == config->link_status_property) {
  630. /* Never downgrade from GOOD to BAD on userspace's request here,
  631. * only hw issues can do that.
  632. *
  633. * For an atomic property the userspace doesn't need to be able
  634. * to understand all the properties, but needs to be able to
  635. * restore the state it wants on VT switch. So if the userspace
  636. * tries to change the link_status from GOOD to BAD, driver
  637. * silently rejects it and returns a 0. This prevents userspace
  638. * from accidentally breaking the display when it restores the
  639. * state.
  640. */
  641. if (state->link_status != DRM_LINK_STATUS_GOOD)
  642. state->link_status = val;
  643. } else if (property == config->hdr_output_metadata_property) {
  644. ret = drm_atomic_replace_property_blob_from_id(dev,
  645. &state->hdr_output_metadata,
  646. val,
  647. sizeof(struct hdr_output_metadata), -1,
  648. &replaced);
  649. return ret;
  650. } else if (property == config->aspect_ratio_property) {
  651. state->picture_aspect_ratio = val;
  652. } else if (property == config->content_type_property) {
  653. state->content_type = val;
  654. } else if (property == connector->scaling_mode_property) {
  655. state->scaling_mode = val;
  656. } else if (property == config->content_protection_property) {
  657. if (val == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
  658. drm_dbg_kms(dev, "only drivers can set CP Enabled\n");
  659. return -EINVAL;
  660. }
  661. state->content_protection = val;
  662. } else if (property == config->hdcp_content_type_property) {
  663. state->hdcp_content_type = val;
  664. } else if (property == connector->colorspace_property) {
  665. state->colorspace = val;
  666. } else if (property == config->writeback_fb_id_property) {
  667. struct drm_framebuffer *fb;
  668. int ret;
  669. fb = drm_framebuffer_lookup(dev, file_priv, val);
  670. ret = drm_atomic_set_writeback_fb_for_connector(state, fb);
  671. if (fb)
  672. drm_framebuffer_put(fb);
  673. return ret;
  674. } else if (property == config->writeback_out_fence_ptr_property) {
  675. s32 __user *fence_ptr = u64_to_user_ptr(val);
  676. return set_out_fence_for_connector(state->state, connector,
  677. fence_ptr);
  678. } else if (property == connector->max_bpc_property) {
  679. state->max_requested_bpc = val;
  680. } else if (property == connector->privacy_screen_sw_state_property) {
  681. state->privacy_screen_sw_state = val;
  682. } else if (connector->funcs->atomic_set_property) {
  683. return connector->funcs->atomic_set_property(connector,
  684. state, property, val);
  685. } else {
  686. drm_dbg_atomic(connector->dev,
  687. "[CONNECTOR:%d:%s] unknown property [PROP:%d:%s]]\n",
  688. connector->base.id, connector->name,
  689. property->base.id, property->name);
  690. return -EINVAL;
  691. }
  692. return 0;
  693. }
  694. static int
  695. drm_atomic_connector_get_property(struct drm_connector *connector,
  696. const struct drm_connector_state *state,
  697. struct drm_property *property, uint64_t *val)
  698. {
  699. struct drm_device *dev = connector->dev;
  700. struct drm_mode_config *config = &dev->mode_config;
  701. if (property == config->prop_crtc_id) {
  702. *val = (state->crtc) ? state->crtc->base.id : 0;
  703. } else if (property == config->dpms_property) {
  704. if (state->crtc && state->crtc->state->self_refresh_active)
  705. *val = DRM_MODE_DPMS_ON;
  706. else
  707. *val = connector->dpms;
  708. } else if (property == config->tv_select_subconnector_property) {
  709. *val = state->tv.subconnector;
  710. } else if (property == config->tv_left_margin_property) {
  711. *val = state->tv.margins.left;
  712. } else if (property == config->tv_right_margin_property) {
  713. *val = state->tv.margins.right;
  714. } else if (property == config->tv_top_margin_property) {
  715. *val = state->tv.margins.top;
  716. } else if (property == config->tv_bottom_margin_property) {
  717. *val = state->tv.margins.bottom;
  718. } else if (property == config->tv_mode_property) {
  719. *val = state->tv.mode;
  720. } else if (property == config->tv_brightness_property) {
  721. *val = state->tv.brightness;
  722. } else if (property == config->tv_contrast_property) {
  723. *val = state->tv.contrast;
  724. } else if (property == config->tv_flicker_reduction_property) {
  725. *val = state->tv.flicker_reduction;
  726. } else if (property == config->tv_overscan_property) {
  727. *val = state->tv.overscan;
  728. } else if (property == config->tv_saturation_property) {
  729. *val = state->tv.saturation;
  730. } else if (property == config->tv_hue_property) {
  731. *val = state->tv.hue;
  732. } else if (property == config->link_status_property) {
  733. *val = state->link_status;
  734. } else if (property == config->aspect_ratio_property) {
  735. *val = state->picture_aspect_ratio;
  736. } else if (property == config->content_type_property) {
  737. *val = state->content_type;
  738. } else if (property == connector->colorspace_property) {
  739. *val = state->colorspace;
  740. } else if (property == connector->scaling_mode_property) {
  741. *val = state->scaling_mode;
  742. } else if (property == config->hdr_output_metadata_property) {
  743. *val = state->hdr_output_metadata ?
  744. state->hdr_output_metadata->base.id : 0;
  745. } else if (property == config->content_protection_property) {
  746. *val = state->content_protection;
  747. } else if (property == config->hdcp_content_type_property) {
  748. *val = state->hdcp_content_type;
  749. } else if (property == config->writeback_fb_id_property) {
  750. /* Writeback framebuffer is one-shot, write and forget */
  751. *val = 0;
  752. } else if (property == config->writeback_out_fence_ptr_property) {
  753. *val = 0;
  754. } else if (property == connector->max_bpc_property) {
  755. *val = state->max_requested_bpc;
  756. } else if (property == connector->privacy_screen_sw_state_property) {
  757. *val = state->privacy_screen_sw_state;
  758. } else if (connector->funcs->atomic_get_property) {
  759. return connector->funcs->atomic_get_property(connector,
  760. state, property, val);
  761. } else {
  762. return -EINVAL;
  763. }
  764. return 0;
  765. }
  766. int drm_atomic_get_property(struct drm_mode_object *obj,
  767. struct drm_property *property, uint64_t *val)
  768. {
  769. struct drm_device *dev = property->dev;
  770. int ret;
  771. switch (obj->type) {
  772. case DRM_MODE_OBJECT_CONNECTOR: {
  773. struct drm_connector *connector = obj_to_connector(obj);
  774. WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
  775. ret = drm_atomic_connector_get_property(connector,
  776. connector->state, property, val);
  777. break;
  778. }
  779. case DRM_MODE_OBJECT_CRTC: {
  780. struct drm_crtc *crtc = obj_to_crtc(obj);
  781. WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
  782. ret = drm_atomic_crtc_get_property(crtc,
  783. crtc->state, property, val);
  784. break;
  785. }
  786. case DRM_MODE_OBJECT_PLANE: {
  787. struct drm_plane *plane = obj_to_plane(obj);
  788. WARN_ON(!drm_modeset_is_locked(&plane->mutex));
  789. ret = drm_atomic_plane_get_property(plane,
  790. plane->state, property, val);
  791. break;
  792. }
  793. default:
  794. ret = -EINVAL;
  795. break;
  796. }
  797. return ret;
  798. }
  799. /*
  800. * The big monster ioctl
  801. */
  802. static struct drm_pending_vblank_event *create_vblank_event(
  803. struct drm_crtc *crtc, uint64_t user_data)
  804. {
  805. struct drm_pending_vblank_event *e = NULL;
  806. e = kzalloc(sizeof *e, GFP_KERNEL);
  807. if (!e)
  808. return NULL;
  809. e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
  810. e->event.base.length = sizeof(e->event);
  811. e->event.vbl.crtc_id = crtc->base.id;
  812. e->event.vbl.user_data = user_data;
  813. return e;
  814. }
  815. int drm_atomic_connector_commit_dpms(struct drm_atomic_state *state,
  816. struct drm_connector *connector,
  817. int mode)
  818. {
  819. struct drm_connector *tmp_connector;
  820. struct drm_connector_state *new_conn_state;
  821. struct drm_crtc *crtc;
  822. struct drm_crtc_state *crtc_state;
  823. int i, ret, old_mode = connector->dpms;
  824. bool active = false;
  825. ret = drm_modeset_lock(&state->dev->mode_config.connection_mutex,
  826. state->acquire_ctx);
  827. if (ret)
  828. return ret;
  829. if (mode != DRM_MODE_DPMS_ON)
  830. mode = DRM_MODE_DPMS_OFF;
  831. connector->dpms = mode;
  832. crtc = connector->state->crtc;
  833. if (!crtc)
  834. goto out;
  835. ret = drm_atomic_add_affected_connectors(state, crtc);
  836. if (ret)
  837. goto out;
  838. crtc_state = drm_atomic_get_crtc_state(state, crtc);
  839. if (IS_ERR(crtc_state)) {
  840. ret = PTR_ERR(crtc_state);
  841. goto out;
  842. }
  843. for_each_new_connector_in_state(state, tmp_connector, new_conn_state, i) {
  844. if (new_conn_state->crtc != crtc)
  845. continue;
  846. if (tmp_connector->dpms == DRM_MODE_DPMS_ON) {
  847. active = true;
  848. break;
  849. }
  850. }
  851. crtc_state->active = active;
  852. ret = drm_atomic_commit(state);
  853. out:
  854. if (ret != 0)
  855. connector->dpms = old_mode;
  856. return ret;
  857. }
  858. int drm_atomic_set_property(struct drm_atomic_state *state,
  859. struct drm_file *file_priv,
  860. struct drm_mode_object *obj,
  861. struct drm_property *prop,
  862. uint64_t prop_value)
  863. {
  864. struct drm_mode_object *ref;
  865. int ret;
  866. if (!drm_property_change_valid_get(prop, prop_value, &ref))
  867. return -EINVAL;
  868. switch (obj->type) {
  869. case DRM_MODE_OBJECT_CONNECTOR: {
  870. struct drm_connector *connector = obj_to_connector(obj);
  871. struct drm_connector_state *connector_state;
  872. connector_state = drm_atomic_get_connector_state(state, connector);
  873. if (IS_ERR(connector_state)) {
  874. ret = PTR_ERR(connector_state);
  875. break;
  876. }
  877. ret = drm_atomic_connector_set_property(connector,
  878. connector_state, file_priv,
  879. prop, prop_value);
  880. break;
  881. }
  882. case DRM_MODE_OBJECT_CRTC: {
  883. struct drm_crtc *crtc = obj_to_crtc(obj);
  884. struct drm_crtc_state *crtc_state;
  885. crtc_state = drm_atomic_get_crtc_state(state, crtc);
  886. if (IS_ERR(crtc_state)) {
  887. ret = PTR_ERR(crtc_state);
  888. break;
  889. }
  890. ret = drm_atomic_crtc_set_property(crtc,
  891. crtc_state, prop, prop_value);
  892. break;
  893. }
  894. case DRM_MODE_OBJECT_PLANE: {
  895. struct drm_plane *plane = obj_to_plane(obj);
  896. struct drm_plane_state *plane_state;
  897. plane_state = drm_atomic_get_plane_state(state, plane);
  898. if (IS_ERR(plane_state)) {
  899. ret = PTR_ERR(plane_state);
  900. break;
  901. }
  902. ret = drm_atomic_plane_set_property(plane,
  903. plane_state, file_priv,
  904. prop, prop_value);
  905. break;
  906. }
  907. default:
  908. ret = -EINVAL;
  909. break;
  910. }
  911. drm_property_change_valid_put(prop, ref);
  912. return ret;
  913. }
  914. /**
  915. * DOC: explicit fencing properties
  916. *
  917. * Explicit fencing allows userspace to control the buffer synchronization
  918. * between devices. A Fence or a group of fences are transferred to/from
  919. * userspace using Sync File fds and there are two DRM properties for that.
  920. * IN_FENCE_FD on each DRM Plane to send fences to the kernel and
  921. * OUT_FENCE_PTR on each DRM CRTC to receive fences from the kernel.
  922. *
  923. * As a contrast, with implicit fencing the kernel keeps track of any
  924. * ongoing rendering, and automatically ensures that the atomic update waits
  925. * for any pending rendering to complete. This is usually tracked in &struct
  926. * dma_resv which can also contain mandatory kernel fences. Implicit syncing
  927. * is how Linux traditionally worked (e.g. DRI2/3 on X.org), whereas explicit
  928. * fencing is what Android wants.
  929. *
  930. * "IN_FENCE_FD”:
  931. * Use this property to pass a fence that DRM should wait on before
  932. * proceeding with the Atomic Commit request and show the framebuffer for
  933. * the plane on the screen. The fence can be either a normal fence or a
  934. * merged one, the sync_file framework will handle both cases and use a
  935. * fence_array if a merged fence is received. Passing -1 here means no
  936. * fences to wait on.
  937. *
  938. * If the Atomic Commit request has the DRM_MODE_ATOMIC_TEST_ONLY flag
  939. * it will only check if the Sync File is a valid one.
  940. *
  941. * On the driver side the fence is stored on the @fence parameter of
  942. * &struct drm_plane_state. Drivers which also support implicit fencing
  943. * should extract the implicit fence using drm_gem_plane_helper_prepare_fb(),
  944. * to make sure there's consistent behaviour between drivers in precedence
  945. * of implicit vs. explicit fencing.
  946. *
  947. * "OUT_FENCE_PTR”:
  948. * Use this property to pass a file descriptor pointer to DRM. Once the
  949. * Atomic Commit request call returns OUT_FENCE_PTR will be filled with
  950. * the file descriptor number of a Sync File. This Sync File contains the
  951. * CRTC fence that will be signaled when all framebuffers present on the
  952. * Atomic Commit * request for that given CRTC are scanned out on the
  953. * screen.
  954. *
  955. * The Atomic Commit request fails if a invalid pointer is passed. If the
  956. * Atomic Commit request fails for any other reason the out fence fd
  957. * returned will be -1. On a Atomic Commit with the
  958. * DRM_MODE_ATOMIC_TEST_ONLY flag the out fence will also be set to -1.
  959. *
  960. * Note that out-fences don't have a special interface to drivers and are
  961. * internally represented by a &struct drm_pending_vblank_event in struct
  962. * &drm_crtc_state, which is also used by the nonblocking atomic commit
  963. * helpers and for the DRM event handling for existing userspace.
  964. */
  965. struct drm_out_fence_state {
  966. s32 __user *out_fence_ptr;
  967. struct sync_file *sync_file;
  968. int fd;
  969. };
  970. static int setup_out_fence(struct drm_out_fence_state *fence_state,
  971. struct dma_fence *fence)
  972. {
  973. fence_state->fd = get_unused_fd_flags(O_CLOEXEC);
  974. if (fence_state->fd < 0)
  975. return fence_state->fd;
  976. if (put_user(fence_state->fd, fence_state->out_fence_ptr))
  977. return -EFAULT;
  978. fence_state->sync_file = sync_file_create(fence);
  979. if (!fence_state->sync_file)
  980. return -ENOMEM;
  981. return 0;
  982. }
  983. static int prepare_signaling(struct drm_device *dev,
  984. struct drm_atomic_state *state,
  985. struct drm_mode_atomic *arg,
  986. struct drm_file *file_priv,
  987. struct drm_out_fence_state **fence_state,
  988. unsigned int *num_fences)
  989. {
  990. struct drm_crtc *crtc;
  991. struct drm_crtc_state *crtc_state;
  992. struct drm_connector *conn;
  993. struct drm_connector_state *conn_state;
  994. int i, c = 0, ret;
  995. if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY)
  996. return 0;
  997. for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
  998. s32 __user *fence_ptr;
  999. fence_ptr = get_out_fence_for_crtc(crtc_state->state, crtc);
  1000. if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT || fence_ptr) {
  1001. struct drm_pending_vblank_event *e;
  1002. e = create_vblank_event(crtc, arg->user_data);
  1003. if (!e)
  1004. return -ENOMEM;
  1005. crtc_state->event = e;
  1006. }
  1007. if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
  1008. struct drm_pending_vblank_event *e = crtc_state->event;
  1009. if (!file_priv)
  1010. continue;
  1011. ret = drm_event_reserve_init(dev, file_priv, &e->base,
  1012. &e->event.base);
  1013. if (ret) {
  1014. kfree(e);
  1015. crtc_state->event = NULL;
  1016. return ret;
  1017. }
  1018. }
  1019. if (fence_ptr) {
  1020. struct dma_fence *fence;
  1021. struct drm_out_fence_state *f;
  1022. f = krealloc(*fence_state, sizeof(**fence_state) *
  1023. (*num_fences + 1), GFP_KERNEL);
  1024. if (!f)
  1025. return -ENOMEM;
  1026. memset(&f[*num_fences], 0, sizeof(*f));
  1027. f[*num_fences].out_fence_ptr = fence_ptr;
  1028. *fence_state = f;
  1029. fence = drm_crtc_create_fence(crtc);
  1030. if (!fence)
  1031. return -ENOMEM;
  1032. ret = setup_out_fence(&f[(*num_fences)++], fence);
  1033. if (ret) {
  1034. dma_fence_put(fence);
  1035. return ret;
  1036. }
  1037. crtc_state->event->base.fence = fence;
  1038. }
  1039. c++;
  1040. }
  1041. for_each_new_connector_in_state(state, conn, conn_state, i) {
  1042. struct drm_writeback_connector *wb_conn;
  1043. struct drm_out_fence_state *f;
  1044. struct dma_fence *fence;
  1045. s32 __user *fence_ptr;
  1046. if (!conn_state->writeback_job)
  1047. continue;
  1048. fence_ptr = get_out_fence_for_connector(state, conn);
  1049. if (!fence_ptr)
  1050. continue;
  1051. f = krealloc(*fence_state, sizeof(**fence_state) *
  1052. (*num_fences + 1), GFP_KERNEL);
  1053. if (!f)
  1054. return -ENOMEM;
  1055. memset(&f[*num_fences], 0, sizeof(*f));
  1056. f[*num_fences].out_fence_ptr = fence_ptr;
  1057. *fence_state = f;
  1058. wb_conn = drm_connector_to_writeback(conn);
  1059. fence = drm_writeback_get_out_fence(wb_conn);
  1060. if (!fence)
  1061. return -ENOMEM;
  1062. ret = setup_out_fence(&f[(*num_fences)++], fence);
  1063. if (ret) {
  1064. dma_fence_put(fence);
  1065. return ret;
  1066. }
  1067. conn_state->writeback_job->out_fence = fence;
  1068. }
  1069. /*
  1070. * Having this flag means user mode pends on event which will never
  1071. * reach due to lack of at least one CRTC for signaling
  1072. */
  1073. if (c == 0 && (arg->flags & DRM_MODE_PAGE_FLIP_EVENT))
  1074. return -EINVAL;
  1075. return 0;
  1076. }
  1077. static void complete_signaling(struct drm_device *dev,
  1078. struct drm_atomic_state *state,
  1079. struct drm_out_fence_state *fence_state,
  1080. unsigned int num_fences,
  1081. bool install_fds)
  1082. {
  1083. struct drm_crtc *crtc;
  1084. struct drm_crtc_state *crtc_state;
  1085. int i;
  1086. if (install_fds) {
  1087. for (i = 0; i < num_fences; i++)
  1088. fd_install(fence_state[i].fd,
  1089. fence_state[i].sync_file->file);
  1090. kfree(fence_state);
  1091. return;
  1092. }
  1093. for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
  1094. struct drm_pending_vblank_event *event = crtc_state->event;
  1095. /*
  1096. * Free the allocated event. drm_atomic_helper_setup_commit
  1097. * can allocate an event too, so only free it if it's ours
  1098. * to prevent a double free in drm_atomic_state_clear.
  1099. */
  1100. if (event && (event->base.fence || event->base.file_priv)) {
  1101. drm_event_cancel_free(dev, &event->base);
  1102. crtc_state->event = NULL;
  1103. }
  1104. }
  1105. if (!fence_state)
  1106. return;
  1107. for (i = 0; i < num_fences; i++) {
  1108. if (fence_state[i].sync_file)
  1109. fput(fence_state[i].sync_file->file);
  1110. if (fence_state[i].fd >= 0)
  1111. put_unused_fd(fence_state[i].fd);
  1112. /* If this fails log error to the user */
  1113. if (fence_state[i].out_fence_ptr &&
  1114. put_user(-1, fence_state[i].out_fence_ptr))
  1115. drm_dbg_atomic(dev, "Couldn't clear out_fence_ptr\n");
  1116. }
  1117. kfree(fence_state);
  1118. }
  1119. int drm_mode_atomic_ioctl(struct drm_device *dev,
  1120. void *data, struct drm_file *file_priv)
  1121. {
  1122. struct drm_mode_atomic *arg = data;
  1123. uint32_t __user *objs_ptr = (uint32_t __user *)(unsigned long)(arg->objs_ptr);
  1124. uint32_t __user *count_props_ptr = (uint32_t __user *)(unsigned long)(arg->count_props_ptr);
  1125. uint32_t __user *props_ptr = (uint32_t __user *)(unsigned long)(arg->props_ptr);
  1126. uint64_t __user *prop_values_ptr = (uint64_t __user *)(unsigned long)(arg->prop_values_ptr);
  1127. unsigned int copied_objs, copied_props;
  1128. struct drm_atomic_state *state;
  1129. struct drm_modeset_acquire_ctx ctx;
  1130. struct drm_out_fence_state *fence_state;
  1131. int ret = 0;
  1132. unsigned int i, j, num_fences;
  1133. /* disallow for drivers not supporting atomic: */
  1134. if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
  1135. return -EOPNOTSUPP;
  1136. /* disallow for userspace that has not enabled atomic cap (even
  1137. * though this may be a bit overkill, since legacy userspace
  1138. * wouldn't know how to call this ioctl)
  1139. */
  1140. if (!file_priv->atomic) {
  1141. drm_dbg_atomic(dev,
  1142. "commit failed: atomic cap not enabled\n");
  1143. return -EINVAL;
  1144. }
  1145. if (arg->flags & ~DRM_MODE_ATOMIC_FLAGS) {
  1146. drm_dbg_atomic(dev, "commit failed: invalid flag\n");
  1147. return -EINVAL;
  1148. }
  1149. if (arg->reserved) {
  1150. drm_dbg_atomic(dev, "commit failed: reserved field set\n");
  1151. return -EINVAL;
  1152. }
  1153. if (arg->flags & DRM_MODE_PAGE_FLIP_ASYNC) {
  1154. drm_dbg_atomic(dev,
  1155. "commit failed: invalid flag DRM_MODE_PAGE_FLIP_ASYNC\n");
  1156. return -EINVAL;
  1157. }
  1158. /* can't test and expect an event at the same time. */
  1159. if ((arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) &&
  1160. (arg->flags & DRM_MODE_PAGE_FLIP_EVENT)) {
  1161. drm_dbg_atomic(dev,
  1162. "commit failed: page-flip event requested with test-only commit\n");
  1163. return -EINVAL;
  1164. }
  1165. state = drm_atomic_state_alloc(dev);
  1166. if (!state)
  1167. return -ENOMEM;
  1168. drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
  1169. state->acquire_ctx = &ctx;
  1170. state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET);
  1171. retry:
  1172. copied_objs = 0;
  1173. copied_props = 0;
  1174. fence_state = NULL;
  1175. num_fences = 0;
  1176. for (i = 0; i < arg->count_objs; i++) {
  1177. uint32_t obj_id, count_props;
  1178. struct drm_mode_object *obj;
  1179. if (get_user(obj_id, objs_ptr + copied_objs)) {
  1180. ret = -EFAULT;
  1181. goto out;
  1182. }
  1183. obj = drm_mode_object_find(dev, file_priv, obj_id, DRM_MODE_OBJECT_ANY);
  1184. if (!obj) {
  1185. ret = -ENOENT;
  1186. goto out;
  1187. }
  1188. if (!obj->properties) {
  1189. drm_mode_object_put(obj);
  1190. ret = -ENOENT;
  1191. goto out;
  1192. }
  1193. if (get_user(count_props, count_props_ptr + copied_objs)) {
  1194. drm_mode_object_put(obj);
  1195. ret = -EFAULT;
  1196. goto out;
  1197. }
  1198. copied_objs++;
  1199. for (j = 0; j < count_props; j++) {
  1200. uint32_t prop_id;
  1201. uint64_t prop_value;
  1202. struct drm_property *prop;
  1203. if (get_user(prop_id, props_ptr + copied_props)) {
  1204. drm_mode_object_put(obj);
  1205. ret = -EFAULT;
  1206. goto out;
  1207. }
  1208. prop = drm_mode_obj_find_prop_id(obj, prop_id);
  1209. if (!prop) {
  1210. drm_mode_object_put(obj);
  1211. ret = -ENOENT;
  1212. goto out;
  1213. }
  1214. if (copy_from_user(&prop_value,
  1215. prop_values_ptr + copied_props,
  1216. sizeof(prop_value))) {
  1217. drm_mode_object_put(obj);
  1218. ret = -EFAULT;
  1219. goto out;
  1220. }
  1221. ret = drm_atomic_set_property(state, file_priv,
  1222. obj, prop, prop_value);
  1223. if (ret) {
  1224. drm_mode_object_put(obj);
  1225. goto out;
  1226. }
  1227. copied_props++;
  1228. }
  1229. drm_mode_object_put(obj);
  1230. }
  1231. ret = prepare_signaling(dev, state, arg, file_priv, &fence_state,
  1232. &num_fences);
  1233. if (ret)
  1234. goto out;
  1235. if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) {
  1236. ret = drm_atomic_check_only(state);
  1237. } else if (arg->flags & DRM_MODE_ATOMIC_NONBLOCK) {
  1238. ret = drm_atomic_nonblocking_commit(state);
  1239. } else {
  1240. ret = drm_atomic_commit(state);
  1241. }
  1242. out:
  1243. complete_signaling(dev, state, fence_state, num_fences, !ret);
  1244. if (ret == -EDEADLK) {
  1245. drm_atomic_state_clear(state);
  1246. ret = drm_modeset_backoff(&ctx);
  1247. if (!ret)
  1248. goto retry;
  1249. }
  1250. drm_atomic_state_put(state);
  1251. drm_modeset_drop_locks(&ctx);
  1252. drm_modeset_acquire_fini(&ctx);
  1253. return ret;
  1254. }