drm_managed.c 6.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2020 Intel
  4. *
  5. * Based on drivers/base/devres.c
  6. */
  7. #include <drm/drm_managed.h>
  8. #include <linux/list.h>
  9. #include <linux/mutex.h>
  10. #include <linux/slab.h>
  11. #include <linux/spinlock.h>
  12. #include <drm/drm_device.h>
  13. #include <drm/drm_print.h>
  14. #include "drm_internal.h"
  15. /**
  16. * DOC: managed resources
  17. *
  18. * Inspired by struct &device managed resources, but tied to the lifetime of
  19. * struct &drm_device, which can outlive the underlying physical device, usually
  20. * when userspace has some open files and other handles to resources still open.
  21. *
  22. * Release actions can be added with drmm_add_action(), memory allocations can
  23. * be done directly with drmm_kmalloc() and the related functions. Everything
  24. * will be released on the final drm_dev_put() in reverse order of how the
  25. * release actions have been added and memory has been allocated since driver
  26. * loading started with devm_drm_dev_alloc().
  27. *
  28. * Note that release actions and managed memory can also be added and removed
  29. * during the lifetime of the driver, all the functions are fully concurrent
  30. * safe. But it is recommended to use managed resources only for resources that
  31. * change rarely, if ever, during the lifetime of the &drm_device instance.
  32. */
  33. struct drmres_node {
  34. struct list_head entry;
  35. drmres_release_t release;
  36. const char *name;
  37. size_t size;
  38. };
  39. struct drmres {
  40. struct drmres_node node;
  41. /*
  42. * Some archs want to perform DMA into kmalloc caches
  43. * and need a guaranteed alignment larger than
  44. * the alignment of a 64-bit integer.
  45. * Thus we use ARCH_KMALLOC_MINALIGN here and get exactly the same
  46. * buffer alignment as if it was allocated by plain kmalloc().
  47. */
  48. u8 __aligned(ARCH_KMALLOC_MINALIGN) data[];
  49. };
  50. static void free_dr(struct drmres *dr)
  51. {
  52. kfree_const(dr->node.name);
  53. kfree(dr);
  54. }
  55. void drm_managed_release(struct drm_device *dev)
  56. {
  57. struct drmres *dr, *tmp;
  58. drm_dbg_drmres(dev, "drmres release begin\n");
  59. list_for_each_entry_safe(dr, tmp, &dev->managed.resources, node.entry) {
  60. drm_dbg_drmres(dev, "REL %p %s (%zu bytes)\n",
  61. dr, dr->node.name, dr->node.size);
  62. if (dr->node.release)
  63. dr->node.release(dev, dr->node.size ? *(void **)&dr->data : NULL);
  64. list_del(&dr->node.entry);
  65. free_dr(dr);
  66. }
  67. drm_dbg_drmres(dev, "drmres release end\n");
  68. }
  69. /*
  70. * Always inline so that kmalloc_track_caller tracks the actual interesting
  71. * caller outside of drm_managed.c.
  72. */
  73. static __always_inline struct drmres * alloc_dr(drmres_release_t release,
  74. size_t size, gfp_t gfp, int nid)
  75. {
  76. size_t tot_size;
  77. struct drmres *dr;
  78. /* We must catch any near-SIZE_MAX cases that could overflow. */
  79. if (unlikely(check_add_overflow(sizeof(*dr), size, &tot_size)))
  80. return NULL;
  81. dr = kmalloc_node_track_caller(tot_size, gfp, nid);
  82. if (unlikely(!dr))
  83. return NULL;
  84. memset(dr, 0, offsetof(struct drmres, data));
  85. INIT_LIST_HEAD(&dr->node.entry);
  86. dr->node.release = release;
  87. dr->node.size = size;
  88. return dr;
  89. }
  90. static void del_dr(struct drm_device *dev, struct drmres *dr)
  91. {
  92. list_del_init(&dr->node.entry);
  93. drm_dbg_drmres(dev, "DEL %p %s (%lu bytes)\n",
  94. dr, dr->node.name, (unsigned long) dr->node.size);
  95. }
  96. static void add_dr(struct drm_device *dev, struct drmres *dr)
  97. {
  98. unsigned long flags;
  99. spin_lock_irqsave(&dev->managed.lock, flags);
  100. list_add(&dr->node.entry, &dev->managed.resources);
  101. spin_unlock_irqrestore(&dev->managed.lock, flags);
  102. drm_dbg_drmres(dev, "ADD %p %s (%lu bytes)\n",
  103. dr, dr->node.name, (unsigned long) dr->node.size);
  104. }
  105. void drmm_add_final_kfree(struct drm_device *dev, void *container)
  106. {
  107. WARN_ON(dev->managed.final_kfree);
  108. WARN_ON(dev < (struct drm_device *) container);
  109. WARN_ON(dev + 1 > (struct drm_device *) (container + ksize(container)));
  110. dev->managed.final_kfree = container;
  111. }
  112. int __drmm_add_action(struct drm_device *dev,
  113. drmres_release_t action,
  114. void *data, const char *name)
  115. {
  116. struct drmres *dr;
  117. void **void_ptr;
  118. dr = alloc_dr(action, data ? sizeof(void*) : 0,
  119. GFP_KERNEL | __GFP_ZERO,
  120. dev_to_node(dev->dev));
  121. if (!dr) {
  122. drm_dbg_drmres(dev, "failed to add action %s for %p\n",
  123. name, data);
  124. return -ENOMEM;
  125. }
  126. dr->node.name = kstrdup_const(name, GFP_KERNEL);
  127. if (data) {
  128. void_ptr = (void **)&dr->data;
  129. *void_ptr = data;
  130. }
  131. add_dr(dev, dr);
  132. return 0;
  133. }
  134. EXPORT_SYMBOL(__drmm_add_action);
  135. int __drmm_add_action_or_reset(struct drm_device *dev,
  136. drmres_release_t action,
  137. void *data, const char *name)
  138. {
  139. int ret;
  140. ret = __drmm_add_action(dev, action, data, name);
  141. if (ret)
  142. action(dev, data);
  143. return ret;
  144. }
  145. EXPORT_SYMBOL(__drmm_add_action_or_reset);
  146. /**
  147. * drmm_kmalloc - &drm_device managed kmalloc()
  148. * @dev: DRM device
  149. * @size: size of the memory allocation
  150. * @gfp: GFP allocation flags
  151. *
  152. * This is a &drm_device managed version of kmalloc(). The allocated memory is
  153. * automatically freed on the final drm_dev_put(). Memory can also be freed
  154. * before the final drm_dev_put() by calling drmm_kfree().
  155. */
  156. void *drmm_kmalloc(struct drm_device *dev, size_t size, gfp_t gfp)
  157. {
  158. struct drmres *dr;
  159. dr = alloc_dr(NULL, size, gfp, dev_to_node(dev->dev));
  160. if (!dr) {
  161. drm_dbg_drmres(dev, "failed to allocate %zu bytes, %u flags\n",
  162. size, gfp);
  163. return NULL;
  164. }
  165. dr->node.name = kstrdup_const("kmalloc", GFP_KERNEL);
  166. add_dr(dev, dr);
  167. return dr->data;
  168. }
  169. EXPORT_SYMBOL(drmm_kmalloc);
  170. /**
  171. * drmm_kstrdup - &drm_device managed kstrdup()
  172. * @dev: DRM device
  173. * @s: 0-terminated string to be duplicated
  174. * @gfp: GFP allocation flags
  175. *
  176. * This is a &drm_device managed version of kstrdup(). The allocated memory is
  177. * automatically freed on the final drm_dev_put() and works exactly like a
  178. * memory allocation obtained by drmm_kmalloc().
  179. */
  180. char *drmm_kstrdup(struct drm_device *dev, const char *s, gfp_t gfp)
  181. {
  182. size_t size;
  183. char *buf;
  184. if (!s)
  185. return NULL;
  186. size = strlen(s) + 1;
  187. buf = drmm_kmalloc(dev, size, gfp);
  188. if (buf)
  189. memcpy(buf, s, size);
  190. return buf;
  191. }
  192. EXPORT_SYMBOL_GPL(drmm_kstrdup);
  193. /**
  194. * drmm_kfree - &drm_device managed kfree()
  195. * @dev: DRM device
  196. * @data: memory allocation to be freed
  197. *
  198. * This is a &drm_device managed version of kfree() which can be used to
  199. * release memory allocated through drmm_kmalloc() or any of its related
  200. * functions before the final drm_dev_put() of @dev.
  201. */
  202. void drmm_kfree(struct drm_device *dev, void *data)
  203. {
  204. struct drmres *dr_match = NULL, *dr;
  205. unsigned long flags;
  206. if (!data)
  207. return;
  208. spin_lock_irqsave(&dev->managed.lock, flags);
  209. list_for_each_entry(dr, &dev->managed.resources, node.entry) {
  210. if (dr->data == data) {
  211. dr_match = dr;
  212. del_dr(dev, dr_match);
  213. break;
  214. }
  215. }
  216. spin_unlock_irqrestore(&dev->managed.lock, flags);
  217. if (WARN_ON(!dr_match))
  218. return;
  219. free_dr(dr_match);
  220. }
  221. EXPORT_SYMBOL(drmm_kfree);
  222. void __drmm_mutex_release(struct drm_device *dev, void *res)
  223. {
  224. struct mutex *lock = res;
  225. mutex_destroy(lock);
  226. }
  227. EXPORT_SYMBOL(__drmm_mutex_release);