rbinregion.c 8.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353
  1. /*
  2. * linux/mm/rbinregion.c
  3. *
  4. * A physical memory allocator for rbincache.
  5. *
  6. * Manages physical pages based on lru
  7. * Although struct page->lru is used for managing lists,
  8. * struct page is not explicitly provided outside the region api.
  9. * Instead, struct rr_handle is provided to the clients.
  10. */
  11. #include <linux/vmalloc.h>
  12. #include "rbinregion.h"
  13. static struct rbin_region region;
  14. struct rr_handle *pfn_to_handle(unsigned long pfn)
  15. {
  16. unsigned long idx = pfn - region.start_pfn;
  17. return &region.handles[idx];
  18. }
  19. struct page *handle_to_page(struct rr_handle *handle)
  20. {
  21. unsigned long idx = (unsigned long)(handle - region.handles);
  22. return pfn_to_page(region.start_pfn + idx);
  23. }
  24. bool handle_is_valid(struct rr_handle *handle)
  25. {
  26. return (handle >= region.handles)
  27. && (handle < region.handles + region.nr_pages);
  28. }
  29. #define RC_AUTO_ENABLE_TIMEOUT (5*HZ) /* 5 sec */
  30. bool try_get_rbincache(void)
  31. {
  32. bool ret = true;
  33. unsigned long flags;
  34. spin_lock_irqsave(&region.region_lock, flags);
  35. if (time_before(region.timeout, jiffies)) {
  36. if (region.rc_disabled == true)
  37. wake_dmabuf_rbin_heap_shrink();
  38. region.rc_disabled = false;
  39. }
  40. if (region.rc_disabled || region.dmabuf_inflight)
  41. ret = false;
  42. else
  43. region.rc_inflight++;
  44. spin_unlock_irqrestore(&region.region_lock, flags);
  45. return ret;
  46. }
  47. void put_rbincache(void)
  48. {
  49. unsigned long flags;
  50. spin_lock_irqsave(&region.region_lock, flags);
  51. region.rc_inflight--;
  52. spin_unlock_irqrestore(&region.region_lock, flags);
  53. }
  54. static bool try_get_dmabuf_rbin(void)
  55. {
  56. bool ret = true;
  57. unsigned long flags;
  58. spin_lock_irqsave(&region.region_lock, flags);
  59. /* disable rbincache ops for a while */
  60. region.rc_disabled = true;
  61. region.timeout = jiffies + RC_AUTO_ENABLE_TIMEOUT;
  62. if (region.rc_inflight)
  63. ret = false;
  64. else
  65. region.dmabuf_inflight++;
  66. spin_unlock_irqrestore(&region.region_lock, flags);
  67. return ret;
  68. }
  69. static void put_dmabuf_rbin(void)
  70. {
  71. unsigned long flags;
  72. spin_lock_irqsave(&region.region_lock, flags);
  73. region.dmabuf_inflight--;
  74. spin_unlock_irqrestore(&region.region_lock, flags);
  75. }
  76. #define RC_EVICT_BATCH 1
  77. static void region_mem_evict(void)
  78. {
  79. struct rr_handle *handle, *next;
  80. int count = 0;
  81. unsigned long flags;
  82. if (!region.ops->evict)
  83. return;
  84. spin_lock_irqsave(&region.lru_lock, flags);
  85. list_for_each_entry_safe(handle, next, &region.usedlist, lru) {
  86. if (++count > RC_EVICT_BATCH)
  87. break;
  88. /* move to list tail and skip handle being used by ion. */
  89. if (handle->usage == DMABUF_INUSE) {
  90. list_move_tail(&handle->lru, &region.usedlist);
  91. continue;
  92. }
  93. if (handle->usage == RC_INUSE) {
  94. atomic_inc(&rbin_free_pages);
  95. atomic_dec(&rbin_cached_pages);
  96. }
  97. /* mark freed, load/flush ops will be denied */
  98. handle->usage = RC_FREED;
  99. list_del(&handle->lru);
  100. spin_unlock_irqrestore(&region.lru_lock, flags);
  101. region.ops->evict((unsigned long)handle);
  102. spin_lock_irqsave(&region.lru_lock, flags);
  103. list_add(&handle->lru, &region.freelist);
  104. }
  105. spin_unlock_irqrestore(&region.lru_lock, flags);
  106. }
  107. /* Add handle to [free|used]list */
  108. static void add_to_list(struct rr_handle *handle, struct list_head *head)
  109. {
  110. unsigned long flags;
  111. spin_lock_irqsave(&region.lru_lock, flags);
  112. list_add_tail(&handle->lru, head);
  113. spin_unlock_irqrestore(&region.lru_lock, flags);
  114. }
  115. /*
  116. * Find a free slot from region, detach the memory chunk from the list,
  117. * then returns the corresponding handle.
  118. * If there are no free slot, evict a slot from usedlist.
  119. */
  120. static struct rr_handle *region_get_freemem(void)
  121. {
  122. struct rr_handle *handle = NULL;
  123. unsigned long flags;
  124. spin_lock_irqsave(&region.lru_lock, flags);
  125. if (list_empty(&region.freelist)) {
  126. spin_unlock_irqrestore(&region.lru_lock, flags);
  127. region_mem_evict();
  128. spin_lock_irqsave(&region.lru_lock, flags);
  129. }
  130. handle = list_first_entry_or_null(&region.freelist, struct rr_handle, lru);
  131. if (!handle) {
  132. spin_unlock_irqrestore(&region.lru_lock, flags);
  133. goto out;
  134. }
  135. list_del(&handle->lru);
  136. spin_unlock_irqrestore(&region.lru_lock, flags);
  137. /* Skip if handle is(was) used by dmabuf. Wait for eviction in usedlist */
  138. if (handle->usage == DMABUF_INUSE) {
  139. add_to_list(handle, &region.usedlist);
  140. return NULL;
  141. }
  142. if (handle->usage == DMABUF_FREED)
  143. region.ops->evict((unsigned long)handle);
  144. memset(handle, 0, sizeof(struct rr_handle));
  145. out:
  146. return handle;
  147. }
  148. struct rr_handle *region_store_cache(struct page *src, int pool_id,
  149. int rb_index, int ra_index)
  150. {
  151. struct rr_handle *handle;
  152. unsigned long flags;
  153. if (!try_get_rbincache())
  154. return NULL;
  155. handle = region_get_freemem();
  156. if (!handle)
  157. goto out;
  158. BUG_ON(!handle_is_valid(handle));
  159. copy_page(page_address(handle_to_page(handle)), page_address(src));
  160. spin_lock_irqsave(&region.lru_lock, flags);
  161. handle->pool_id = pool_id;
  162. handle->rb_index = rb_index;
  163. handle->ra_index = ra_index;
  164. handle->usage = RC_INUSE;
  165. spin_unlock_irqrestore(&region.lru_lock, flags);
  166. add_to_list(handle, &region.usedlist);
  167. atomic_dec(&rbin_free_pages);
  168. atomic_inc(&rbin_cached_pages);
  169. out:
  170. put_rbincache();
  171. return handle;
  172. }
  173. int region_load_cache(struct rr_handle *handle, struct page *dst,
  174. int pool_id, int rb_index, int ra_index)
  175. {
  176. struct page *page;
  177. unsigned long flags;
  178. int ret = -EINVAL;
  179. if (!try_get_rbincache())
  180. return ret;
  181. BUG_ON(!handle_is_valid(handle));
  182. if (handle->usage != RC_INUSE)
  183. goto out;
  184. spin_lock_irqsave(&region.lru_lock, flags);
  185. /* skip if handle is invalid (freed or overwritten) */
  186. if ((handle->usage != RC_INUSE) ||
  187. (dst && (handle->pool_id != pool_id ||
  188. handle->rb_index != rb_index ||
  189. handle->ra_index != ra_index))) {
  190. spin_unlock_irqrestore(&region.lru_lock, flags);
  191. goto out;
  192. }
  193. handle->usage = RC_FREED;
  194. list_del(&handle->lru);
  195. spin_unlock_irqrestore(&region.lru_lock, flags);
  196. if (dst) {
  197. page = handle_to_page(handle);
  198. copy_page(page_address(dst), page_address(page));
  199. }
  200. add_to_list(handle, &region.freelist);
  201. atomic_inc(&rbin_free_pages);
  202. atomic_dec(&rbin_cached_pages);
  203. ret = 0;
  204. out:
  205. put_rbincache();
  206. return ret;
  207. }
  208. int region_flush_cache(struct rr_handle *handle)
  209. {
  210. return region_load_cache(handle, NULL, 0, 0, 0);
  211. }
  212. void init_region(unsigned long pfn, unsigned long nr_pages,
  213. const struct region_ops *ops)
  214. {
  215. struct rr_handle *handle;
  216. unsigned long i;
  217. region.start_pfn = pfn;
  218. region.nr_pages = nr_pages;
  219. region.ops = ops;
  220. spin_lock_init(&region.lru_lock);
  221. spin_lock_init(&region.region_lock);
  222. region.handles = vzalloc(nr_pages * sizeof(struct rr_handle));
  223. INIT_LIST_HEAD(&region.freelist);
  224. INIT_LIST_HEAD(&region.usedlist);
  225. for (i = 0; i < nr_pages; i++) {
  226. handle = &region.handles[i];
  227. INIT_LIST_HEAD(&handle->lru);
  228. list_add(&handle->lru, &region.freelist);
  229. }
  230. atomic_set(&rbin_free_pages, nr_pages);
  231. }
  232. static void isolate_region(unsigned long start_pfn, unsigned long nr_pages)
  233. {
  234. struct rr_handle *handle;
  235. unsigned long pfn;
  236. int nr_cached = 0;
  237. for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) {
  238. /*
  239. * Mark pages used by dmabuf. Rbincache ops are blocked for now,
  240. * so it's okay to do this without any lock. Later, accessing
  241. * these pages by rbincache will be denied.
  242. */
  243. handle = pfn_to_handle(pfn);
  244. if (handle->usage == RC_INUSE)
  245. nr_cached++;
  246. handle->usage = DMABUF_INUSE;
  247. }
  248. atomic_sub(nr_pages - nr_cached, &rbin_free_pages);
  249. atomic_sub(nr_cached, &rbin_cached_pages);
  250. }
  251. static void putback_region(unsigned long start_pfn, unsigned long nr_pages)
  252. {
  253. struct rr_handle *handle;
  254. unsigned long pfn;
  255. for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) {
  256. handle = pfn_to_handle(pfn);
  257. handle->usage = DMABUF_FREED;
  258. }
  259. atomic_add(nr_pages, &rbin_free_pages);
  260. }
  261. phys_addr_t dmabuf_rbin_allocate(unsigned long size)
  262. {
  263. unsigned long paddr;
  264. if (!try_get_dmabuf_rbin())
  265. return -EBUSY;
  266. paddr = gen_pool_alloc(region.pool, size);
  267. if (!paddr) {
  268. paddr = -ENOMEM;
  269. goto out;
  270. }
  271. isolate_region(PFN_DOWN(paddr), size >> PAGE_SHIFT);
  272. out:
  273. put_dmabuf_rbin();
  274. return paddr;
  275. }
  276. void dmabuf_rbin_free(phys_addr_t addr, unsigned long size)
  277. {
  278. if (IS_ERR_VALUE(addr))
  279. return;
  280. putback_region(PFN_DOWN(addr), size >> PAGE_SHIFT);
  281. gen_pool_free(region.pool, addr, size);
  282. }
  283. int init_rbinregion(unsigned long base, unsigned long size)
  284. {
  285. region.pool = gen_pool_create(PAGE_SHIFT, -1);
  286. if (!region.pool) {
  287. pr_err("%s failed get_pool_create\n", __func__);
  288. return -1;
  289. }
  290. gen_pool_add(region.pool, base, size, -1);
  291. if (init_rbincache(PFN_DOWN(base), size >> PAGE_SHIFT)) {
  292. gen_pool_destroy(region.pool);
  293. return -1;
  294. }
  295. return 0;
  296. }