binder_alloc.c 35 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /* binder_alloc.c
  3. *
  4. * Android IPC Subsystem
  5. *
  6. * Copyright (C) 2007-2017 Google, Inc.
  7. */
  8. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  9. #include <linux/list.h>
  10. #include <linux/sched/mm.h>
  11. #include <linux/module.h>
  12. #include <linux/rtmutex.h>
  13. #include <linux/rbtree.h>
  14. #include <linux/seq_file.h>
  15. #include <linux/vmalloc.h>
  16. #include <linux/slab.h>
  17. #include <linux/sched.h>
  18. #include <linux/list_lru.h>
  19. #include <linux/ratelimit.h>
  20. #include <asm/cacheflush.h>
  21. #include <linux/uaccess.h>
  22. #include <linux/highmem.h>
  23. #include <linux/sizes.h>
  24. #include "binder_alloc.h"
  25. #include "binder_trace.h"
  26. #include <trace/hooks/binder.h>
  27. struct list_lru binder_alloc_lru;
  28. static DEFINE_MUTEX(binder_alloc_mmap_lock);
  29. enum {
  30. BINDER_DEBUG_USER_ERROR = 1U << 0,
  31. BINDER_DEBUG_OPEN_CLOSE = 1U << 1,
  32. BINDER_DEBUG_BUFFER_ALLOC = 1U << 2,
  33. BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 3,
  34. };
  35. static uint32_t binder_alloc_debug_mask = BINDER_DEBUG_USER_ERROR;
  36. module_param_named(debug_mask, binder_alloc_debug_mask,
  37. uint, 0644);
  38. #define binder_alloc_debug(mask, x...) \
  39. do { \
  40. if (binder_alloc_debug_mask & mask) \
  41. pr_info_ratelimited(x); \
  42. } while (0)
  43. static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer)
  44. {
  45. return list_entry(buffer->entry.next, struct binder_buffer, entry);
  46. }
  47. static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer)
  48. {
  49. return list_entry(buffer->entry.prev, struct binder_buffer, entry);
  50. }
  51. static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
  52. struct binder_buffer *buffer)
  53. {
  54. if (list_is_last(&buffer->entry, &alloc->buffers))
  55. return alloc->buffer + alloc->buffer_size - buffer->user_data;
  56. return binder_buffer_next(buffer)->user_data - buffer->user_data;
  57. }
  58. static void binder_insert_free_buffer(struct binder_alloc *alloc,
  59. struct binder_buffer *new_buffer)
  60. {
  61. struct rb_node **p = &alloc->free_buffers.rb_node;
  62. struct rb_node *parent = NULL;
  63. struct binder_buffer *buffer;
  64. size_t buffer_size;
  65. size_t new_buffer_size;
  66. BUG_ON(!new_buffer->free);
  67. new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer);
  68. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  69. "%d: add free buffer, size %zd, at %pK\n",
  70. alloc->pid, new_buffer_size, new_buffer);
  71. while (*p) {
  72. parent = *p;
  73. buffer = rb_entry(parent, struct binder_buffer, rb_node);
  74. BUG_ON(!buffer->free);
  75. buffer_size = binder_alloc_buffer_size(alloc, buffer);
  76. if (new_buffer_size < buffer_size)
  77. p = &parent->rb_left;
  78. else
  79. p = &parent->rb_right;
  80. }
  81. rb_link_node(&new_buffer->rb_node, parent, p);
  82. rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers);
  83. }
  84. static void binder_insert_allocated_buffer_locked(
  85. struct binder_alloc *alloc, struct binder_buffer *new_buffer)
  86. {
  87. struct rb_node **p = &alloc->allocated_buffers.rb_node;
  88. struct rb_node *parent = NULL;
  89. struct binder_buffer *buffer;
  90. BUG_ON(new_buffer->free);
  91. while (*p) {
  92. parent = *p;
  93. buffer = rb_entry(parent, struct binder_buffer, rb_node);
  94. BUG_ON(buffer->free);
  95. if (new_buffer->user_data < buffer->user_data)
  96. p = &parent->rb_left;
  97. else if (new_buffer->user_data > buffer->user_data)
  98. p = &parent->rb_right;
  99. else
  100. BUG();
  101. }
  102. rb_link_node(&new_buffer->rb_node, parent, p);
  103. rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers);
  104. }
  105. static struct binder_buffer *binder_alloc_prepare_to_free_locked(
  106. struct binder_alloc *alloc,
  107. uintptr_t user_ptr)
  108. {
  109. struct rb_node *n = alloc->allocated_buffers.rb_node;
  110. struct binder_buffer *buffer;
  111. void __user *uptr;
  112. uptr = (void __user *)user_ptr;
  113. while (n) {
  114. buffer = rb_entry(n, struct binder_buffer, rb_node);
  115. BUG_ON(buffer->free);
  116. if (uptr < buffer->user_data)
  117. n = n->rb_left;
  118. else if (uptr > buffer->user_data)
  119. n = n->rb_right;
  120. else {
  121. /*
  122. * Guard against user threads attempting to
  123. * free the buffer when in use by kernel or
  124. * after it's already been freed.
  125. */
  126. if (!buffer->allow_user_free)
  127. return ERR_PTR(-EPERM);
  128. buffer->allow_user_free = 0;
  129. return buffer;
  130. }
  131. }
  132. return NULL;
  133. }
  134. /**
  135. * binder_alloc_prepare_to_free() - get buffer given user ptr
  136. * @alloc: binder_alloc for this proc
  137. * @user_ptr: User pointer to buffer data
  138. *
  139. * Validate userspace pointer to buffer data and return buffer corresponding to
  140. * that user pointer. Search the rb tree for buffer that matches user data
  141. * pointer.
  142. *
  143. * Return: Pointer to buffer or NULL
  144. */
  145. struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
  146. uintptr_t user_ptr)
  147. {
  148. struct binder_buffer *buffer;
  149. mutex_lock(&alloc->mutex);
  150. buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
  151. mutex_unlock(&alloc->mutex);
  152. return buffer;
  153. }
  154. static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
  155. void __user *start, void __user *end)
  156. {
  157. void __user *page_addr;
  158. unsigned long user_page_addr;
  159. struct binder_lru_page *page;
  160. struct vm_area_struct *vma = NULL;
  161. struct mm_struct *mm = NULL;
  162. bool need_mm = false;
  163. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  164. "%d: %s pages %pK-%pK\n", alloc->pid,
  165. allocate ? "allocate" : "free", start, end);
  166. if (end <= start)
  167. return 0;
  168. trace_binder_update_page_range(alloc, allocate, start, end);
  169. if (allocate == 0)
  170. goto free_range;
  171. for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
  172. page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
  173. if (!page->page_ptr) {
  174. need_mm = true;
  175. break;
  176. }
  177. }
  178. if (need_mm && mmget_not_zero(alloc->mm))
  179. mm = alloc->mm;
  180. if (mm) {
  181. mmap_write_lock(mm);
  182. vma = alloc->vma;
  183. }
  184. if (!vma && need_mm) {
  185. binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
  186. "%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
  187. alloc->pid);
  188. goto err_no_vma;
  189. }
  190. for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
  191. int ret;
  192. bool on_lru;
  193. size_t index;
  194. index = (page_addr - alloc->buffer) / PAGE_SIZE;
  195. page = &alloc->pages[index];
  196. if (page->page_ptr) {
  197. trace_binder_alloc_lru_start(alloc, index);
  198. on_lru = list_lru_del(&binder_alloc_lru, &page->lru);
  199. WARN_ON(!on_lru);
  200. trace_binder_alloc_lru_end(alloc, index);
  201. continue;
  202. }
  203. if (WARN_ON(!vma))
  204. goto err_page_ptr_cleared;
  205. trace_binder_alloc_page_start(alloc, index);
  206. page->page_ptr = alloc_page(GFP_KERNEL |
  207. __GFP_HIGHMEM |
  208. __GFP_ZERO);
  209. if (!page->page_ptr) {
  210. pr_err("%d: binder_alloc_buf failed for page at %pK\n",
  211. alloc->pid, page_addr);
  212. goto err_alloc_page_failed;
  213. }
  214. page->alloc = alloc;
  215. INIT_LIST_HEAD(&page->lru);
  216. user_page_addr = (uintptr_t)page_addr;
  217. ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr);
  218. if (ret) {
  219. pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
  220. alloc->pid, user_page_addr);
  221. goto err_vm_insert_page_failed;
  222. }
  223. if (index + 1 > alloc->pages_high)
  224. alloc->pages_high = index + 1;
  225. trace_binder_alloc_page_end(alloc, index);
  226. }
  227. if (mm) {
  228. mmap_write_unlock(mm);
  229. mmput(mm);
  230. }
  231. return 0;
  232. free_range:
  233. for (page_addr = end - PAGE_SIZE; 1; page_addr -= PAGE_SIZE) {
  234. bool ret;
  235. size_t index;
  236. index = (page_addr - alloc->buffer) / PAGE_SIZE;
  237. page = &alloc->pages[index];
  238. trace_binder_free_lru_start(alloc, index);
  239. ret = list_lru_add(&binder_alloc_lru, &page->lru);
  240. WARN_ON(!ret);
  241. trace_binder_free_lru_end(alloc, index);
  242. if (page_addr == start)
  243. break;
  244. continue;
  245. err_vm_insert_page_failed:
  246. __free_page(page->page_ptr);
  247. page->page_ptr = NULL;
  248. err_alloc_page_failed:
  249. err_page_ptr_cleared:
  250. if (page_addr == start)
  251. break;
  252. }
  253. err_no_vma:
  254. if (mm) {
  255. mmap_write_unlock(mm);
  256. mmput(mm);
  257. }
  258. return vma ? -ENOMEM : -ESRCH;
  259. }
  260. static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
  261. struct vm_area_struct *vma)
  262. {
  263. /* pairs with smp_load_acquire in binder_alloc_get_vma() */
  264. smp_store_release(&alloc->vma, vma);
  265. }
  266. static inline struct vm_area_struct *binder_alloc_get_vma(
  267. struct binder_alloc *alloc)
  268. {
  269. /* pairs with smp_store_release in binder_alloc_set_vma() */
  270. return smp_load_acquire(&alloc->vma);
  271. }
  272. static bool debug_low_async_space_locked(struct binder_alloc *alloc, int pid)
  273. {
  274. /*
  275. * Find the amount and size of buffers allocated by the current caller;
  276. * The idea is that once we cross the threshold, whoever is responsible
  277. * for the low async space is likely to try to send another async txn,
  278. * and at some point we'll catch them in the act. This is more efficient
  279. * than keeping a map per pid.
  280. */
  281. struct rb_node *n;
  282. struct binder_buffer *buffer;
  283. size_t total_alloc_size = 0;
  284. size_t num_buffers = 0;
  285. for (n = rb_first(&alloc->allocated_buffers); n != NULL;
  286. n = rb_next(n)) {
  287. buffer = rb_entry(n, struct binder_buffer, rb_node);
  288. if (buffer->pid != pid)
  289. continue;
  290. if (!buffer->async_transaction)
  291. continue;
  292. total_alloc_size += binder_alloc_buffer_size(alloc, buffer)
  293. + sizeof(struct binder_buffer);
  294. num_buffers++;
  295. }
  296. /*
  297. * Warn if this pid has more than 50 transactions, or more than 50% of
  298. * async space (which is 25% of total buffer size). Oneway spam is only
  299. * detected when the threshold is exceeded.
  300. */
  301. if (num_buffers > 50 || total_alloc_size > alloc->buffer_size / 4) {
  302. binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
  303. "%d: pid %d spamming oneway? %zd buffers allocated for a total size of %zd\n",
  304. alloc->pid, pid, num_buffers, total_alloc_size);
  305. if (!alloc->oneway_spam_detected) {
  306. alloc->oneway_spam_detected = true;
  307. return true;
  308. }
  309. }
  310. return false;
  311. }
  312. static struct binder_buffer *binder_alloc_new_buf_locked(
  313. struct binder_alloc *alloc,
  314. size_t data_size,
  315. size_t offsets_size,
  316. size_t extra_buffers_size,
  317. int is_async,
  318. int pid)
  319. {
  320. struct rb_node *n = alloc->free_buffers.rb_node;
  321. struct binder_buffer *buffer;
  322. size_t buffer_size;
  323. struct rb_node *best_fit = NULL;
  324. void __user *has_page_addr;
  325. void __user *end_page_addr;
  326. size_t size, data_offsets_size;
  327. int ret;
  328. bool should_fail = false;
  329. /* Check binder_alloc is fully initialized */
  330. if (!binder_alloc_get_vma(alloc)) {
  331. binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
  332. "%d: binder_alloc_buf, no vma\n",
  333. alloc->pid);
  334. return ERR_PTR(-ESRCH);
  335. }
  336. data_offsets_size = ALIGN(data_size, sizeof(void *)) +
  337. ALIGN(offsets_size, sizeof(void *));
  338. if (data_offsets_size < data_size || data_offsets_size < offsets_size) {
  339. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  340. "%d: got transaction with invalid size %zd-%zd\n",
  341. alloc->pid, data_size, offsets_size);
  342. return ERR_PTR(-EINVAL);
  343. }
  344. size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *));
  345. if (size < data_offsets_size || size < extra_buffers_size) {
  346. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  347. "%d: got transaction with invalid extra_buffers_size %zd\n",
  348. alloc->pid, extra_buffers_size);
  349. return ERR_PTR(-EINVAL);
  350. }
  351. trace_android_vh_binder_alloc_new_buf_locked(size, &alloc->free_async_space, is_async);
  352. trace_android_vh_binder_detect_low_async_space(is_async, &alloc->free_async_space, pid,
  353. &should_fail);
  354. if (should_fail) {
  355. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  356. "%d: binder_alloc_buf size %zd failed, not allowed to alloc more async space\n",
  357. alloc->pid, size);
  358. return ERR_PTR(-EPERM);
  359. }
  360. if (is_async &&
  361. alloc->free_async_space < size + sizeof(struct binder_buffer)) {
  362. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  363. "%d: binder_alloc_buf size %zd failed, no async space left\n",
  364. alloc->pid, size);
  365. return ERR_PTR(-ENOSPC);
  366. }
  367. /* Pad 0-size buffers so they get assigned unique addresses */
  368. size = max(size, sizeof(void *));
  369. while (n) {
  370. buffer = rb_entry(n, struct binder_buffer, rb_node);
  371. BUG_ON(!buffer->free);
  372. buffer_size = binder_alloc_buffer_size(alloc, buffer);
  373. if (size < buffer_size) {
  374. best_fit = n;
  375. n = n->rb_left;
  376. } else if (size > buffer_size)
  377. n = n->rb_right;
  378. else {
  379. best_fit = n;
  380. break;
  381. }
  382. }
  383. if (best_fit == NULL) {
  384. size_t allocated_buffers = 0;
  385. size_t largest_alloc_size = 0;
  386. size_t total_alloc_size = 0;
  387. size_t free_buffers = 0;
  388. size_t largest_free_size = 0;
  389. size_t total_free_size = 0;
  390. for (n = rb_first(&alloc->allocated_buffers); n != NULL;
  391. n = rb_next(n)) {
  392. buffer = rb_entry(n, struct binder_buffer, rb_node);
  393. buffer_size = binder_alloc_buffer_size(alloc, buffer);
  394. allocated_buffers++;
  395. total_alloc_size += buffer_size;
  396. if (buffer_size > largest_alloc_size)
  397. largest_alloc_size = buffer_size;
  398. }
  399. for (n = rb_first(&alloc->free_buffers); n != NULL;
  400. n = rb_next(n)) {
  401. buffer = rb_entry(n, struct binder_buffer, rb_node);
  402. buffer_size = binder_alloc_buffer_size(alloc, buffer);
  403. free_buffers++;
  404. total_free_size += buffer_size;
  405. if (buffer_size > largest_free_size)
  406. largest_free_size = buffer_size;
  407. }
  408. binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
  409. "%d: binder_alloc_buf size %zd failed, no address space\n",
  410. alloc->pid, size);
  411. binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
  412. "allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
  413. total_alloc_size, allocated_buffers,
  414. largest_alloc_size, total_free_size,
  415. free_buffers, largest_free_size);
  416. return ERR_PTR(-ENOSPC);
  417. }
  418. if (n == NULL) {
  419. buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
  420. buffer_size = binder_alloc_buffer_size(alloc, buffer);
  421. }
  422. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  423. "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
  424. alloc->pid, size, buffer, buffer_size);
  425. has_page_addr = (void __user *)
  426. (((uintptr_t)buffer->user_data + buffer_size) & PAGE_MASK);
  427. WARN_ON(n && buffer_size != size);
  428. end_page_addr =
  429. (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data + size);
  430. if (end_page_addr > has_page_addr)
  431. end_page_addr = has_page_addr;
  432. ret = binder_update_page_range(alloc, 1, (void __user *)
  433. PAGE_ALIGN((uintptr_t)buffer->user_data), end_page_addr);
  434. if (ret)
  435. return ERR_PTR(ret);
  436. if (buffer_size != size) {
  437. struct binder_buffer *new_buffer;
  438. new_buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
  439. if (!new_buffer) {
  440. pr_err("%s: %d failed to alloc new buffer struct\n",
  441. __func__, alloc->pid);
  442. goto err_alloc_buf_struct_failed;
  443. }
  444. new_buffer->user_data = (u8 __user *)buffer->user_data + size;
  445. list_add(&new_buffer->entry, &buffer->entry);
  446. new_buffer->free = 1;
  447. binder_insert_free_buffer(alloc, new_buffer);
  448. }
  449. rb_erase(best_fit, &alloc->free_buffers);
  450. buffer->free = 0;
  451. buffer->allow_user_free = 0;
  452. binder_insert_allocated_buffer_locked(alloc, buffer);
  453. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  454. "%d: binder_alloc_buf size %zd got %pK\n",
  455. alloc->pid, size, buffer);
  456. buffer->data_size = data_size;
  457. buffer->offsets_size = offsets_size;
  458. buffer->async_transaction = is_async;
  459. buffer->extra_buffers_size = extra_buffers_size;
  460. buffer->pid = pid;
  461. buffer->oneway_spam_suspect = false;
  462. if (is_async) {
  463. alloc->free_async_space -= size + sizeof(struct binder_buffer);
  464. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
  465. "%d: binder_alloc_buf size %zd async free %zd\n",
  466. alloc->pid, size, alloc->free_async_space);
  467. if (alloc->free_async_space < alloc->buffer_size / 10) {
  468. /*
  469. * Start detecting spammers once we have less than 20%
  470. * of async space left (which is less than 10% of total
  471. * buffer size).
  472. */
  473. buffer->oneway_spam_suspect = debug_low_async_space_locked(alloc, pid);
  474. } else {
  475. alloc->oneway_spam_detected = false;
  476. }
  477. }
  478. return buffer;
  479. err_alloc_buf_struct_failed:
  480. binder_update_page_range(alloc, 0, (void __user *)
  481. PAGE_ALIGN((uintptr_t)buffer->user_data),
  482. end_page_addr);
  483. return ERR_PTR(-ENOMEM);
  484. }
  485. /**
  486. * binder_alloc_new_buf() - Allocate a new binder buffer
  487. * @alloc: binder_alloc for this proc
  488. * @data_size: size of user data buffer
  489. * @offsets_size: user specified buffer offset
  490. * @extra_buffers_size: size of extra space for meta-data (eg, security context)
  491. * @is_async: buffer for async transaction
  492. * @pid: pid to attribute allocation to (used for debugging)
  493. *
  494. * Allocate a new buffer given the requested sizes. Returns
  495. * the kernel version of the buffer pointer. The size allocated
  496. * is the sum of the three given sizes (each rounded up to
  497. * pointer-sized boundary)
  498. *
  499. * Return: The allocated buffer or %NULL if error
  500. */
  501. struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
  502. size_t data_size,
  503. size_t offsets_size,
  504. size_t extra_buffers_size,
  505. int is_async,
  506. int pid)
  507. {
  508. struct binder_buffer *buffer;
  509. mutex_lock(&alloc->mutex);
  510. buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size,
  511. extra_buffers_size, is_async, pid);
  512. mutex_unlock(&alloc->mutex);
  513. return buffer;
  514. }
  515. static void __user *buffer_start_page(struct binder_buffer *buffer)
  516. {
  517. return (void __user *)((uintptr_t)buffer->user_data & PAGE_MASK);
  518. }
  519. static void __user *prev_buffer_end_page(struct binder_buffer *buffer)
  520. {
  521. return (void __user *)
  522. (((uintptr_t)(buffer->user_data) - 1) & PAGE_MASK);
  523. }
  524. static void binder_delete_free_buffer(struct binder_alloc *alloc,
  525. struct binder_buffer *buffer)
  526. {
  527. struct binder_buffer *prev, *next = NULL;
  528. bool to_free = true;
  529. BUG_ON(alloc->buffers.next == &buffer->entry);
  530. prev = binder_buffer_prev(buffer);
  531. BUG_ON(!prev->free);
  532. if (prev_buffer_end_page(prev) == buffer_start_page(buffer)) {
  533. to_free = false;
  534. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  535. "%d: merge free, buffer %pK share page with %pK\n",
  536. alloc->pid, buffer->user_data,
  537. prev->user_data);
  538. }
  539. if (!list_is_last(&buffer->entry, &alloc->buffers)) {
  540. next = binder_buffer_next(buffer);
  541. if (buffer_start_page(next) == buffer_start_page(buffer)) {
  542. to_free = false;
  543. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  544. "%d: merge free, buffer %pK share page with %pK\n",
  545. alloc->pid,
  546. buffer->user_data,
  547. next->user_data);
  548. }
  549. }
  550. if (PAGE_ALIGNED(buffer->user_data)) {
  551. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  552. "%d: merge free, buffer start %pK is page aligned\n",
  553. alloc->pid, buffer->user_data);
  554. to_free = false;
  555. }
  556. if (to_free) {
  557. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  558. "%d: merge free, buffer %pK do not share page with %pK or %pK\n",
  559. alloc->pid, buffer->user_data,
  560. prev->user_data,
  561. next ? next->user_data : NULL);
  562. binder_update_page_range(alloc, 0, buffer_start_page(buffer),
  563. buffer_start_page(buffer) + PAGE_SIZE);
  564. }
  565. list_del(&buffer->entry);
  566. kfree(buffer);
  567. }
  568. static void binder_free_buf_locked(struct binder_alloc *alloc,
  569. struct binder_buffer *buffer)
  570. {
  571. size_t size, buffer_size;
  572. buffer_size = binder_alloc_buffer_size(alloc, buffer);
  573. size = ALIGN(buffer->data_size, sizeof(void *)) +
  574. ALIGN(buffer->offsets_size, sizeof(void *)) +
  575. ALIGN(buffer->extra_buffers_size, sizeof(void *));
  576. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  577. "%d: binder_free_buf %pK size %zd buffer_size %zd\n",
  578. alloc->pid, buffer, size, buffer_size);
  579. BUG_ON(buffer->free);
  580. BUG_ON(size > buffer_size);
  581. BUG_ON(buffer->transaction != NULL);
  582. BUG_ON(buffer->user_data < alloc->buffer);
  583. BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size);
  584. if (buffer->async_transaction) {
  585. alloc->free_async_space += buffer_size + sizeof(struct binder_buffer);
  586. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
  587. "%d: binder_free_buf size %zd async free %zd\n",
  588. alloc->pid, size, alloc->free_async_space);
  589. }
  590. binder_update_page_range(alloc, 0,
  591. (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data),
  592. (void __user *)(((uintptr_t)
  593. buffer->user_data + buffer_size) & PAGE_MASK));
  594. rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
  595. buffer->free = 1;
  596. if (!list_is_last(&buffer->entry, &alloc->buffers)) {
  597. struct binder_buffer *next = binder_buffer_next(buffer);
  598. if (next->free) {
  599. rb_erase(&next->rb_node, &alloc->free_buffers);
  600. binder_delete_free_buffer(alloc, next);
  601. }
  602. }
  603. if (alloc->buffers.next != &buffer->entry) {
  604. struct binder_buffer *prev = binder_buffer_prev(buffer);
  605. if (prev->free) {
  606. binder_delete_free_buffer(alloc, buffer);
  607. rb_erase(&prev->rb_node, &alloc->free_buffers);
  608. buffer = prev;
  609. }
  610. }
  611. binder_insert_free_buffer(alloc, buffer);
  612. }
  613. static void binder_alloc_clear_buf(struct binder_alloc *alloc,
  614. struct binder_buffer *buffer);
  615. /**
  616. * binder_alloc_free_buf() - free a binder buffer
  617. * @alloc: binder_alloc for this proc
  618. * @buffer: kernel pointer to buffer
  619. *
  620. * Free the buffer allocated via binder_alloc_new_buf()
  621. */
  622. void binder_alloc_free_buf(struct binder_alloc *alloc,
  623. struct binder_buffer *buffer)
  624. {
  625. /*
  626. * We could eliminate the call to binder_alloc_clear_buf()
  627. * from binder_alloc_deferred_release() by moving this to
  628. * binder_alloc_free_buf_locked(). However, that could
  629. * increase contention for the alloc mutex if clear_on_free
  630. * is used frequently for large buffers. The mutex is not
  631. * needed for correctness here.
  632. */
  633. if (buffer->clear_on_free) {
  634. binder_alloc_clear_buf(alloc, buffer);
  635. buffer->clear_on_free = false;
  636. }
  637. mutex_lock(&alloc->mutex);
  638. binder_free_buf_locked(alloc, buffer);
  639. mutex_unlock(&alloc->mutex);
  640. }
  641. /**
  642. * binder_alloc_mmap_handler() - map virtual address space for proc
  643. * @alloc: alloc structure for this proc
  644. * @vma: vma passed to mmap()
  645. *
  646. * Called by binder_mmap() to initialize the space specified in
  647. * vma for allocating binder buffers
  648. *
  649. * Return:
  650. * 0 = success
  651. * -EBUSY = address space already mapped
  652. * -ENOMEM = failed to map memory to given address space
  653. */
  654. int binder_alloc_mmap_handler(struct binder_alloc *alloc,
  655. struct vm_area_struct *vma)
  656. {
  657. int ret;
  658. const char *failure_string;
  659. struct binder_buffer *buffer;
  660. if (unlikely(vma->vm_mm != alloc->mm)) {
  661. ret = -EINVAL;
  662. failure_string = "invalid vma->vm_mm";
  663. goto err_invalid_mm;
  664. }
  665. mutex_lock(&binder_alloc_mmap_lock);
  666. if (alloc->buffer_size) {
  667. ret = -EBUSY;
  668. failure_string = "already mapped";
  669. goto err_already_mapped;
  670. }
  671. alloc->buffer_size = min_t(unsigned long, vma->vm_end - vma->vm_start,
  672. SZ_4M);
  673. mutex_unlock(&binder_alloc_mmap_lock);
  674. alloc->buffer = (void __user *)vma->vm_start;
  675. alloc->pages = kcalloc(alloc->buffer_size / PAGE_SIZE,
  676. sizeof(alloc->pages[0]),
  677. GFP_KERNEL);
  678. if (alloc->pages == NULL) {
  679. ret = -ENOMEM;
  680. failure_string = "alloc page array";
  681. goto err_alloc_pages_failed;
  682. }
  683. buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
  684. if (!buffer) {
  685. ret = -ENOMEM;
  686. failure_string = "alloc buffer struct";
  687. goto err_alloc_buf_struct_failed;
  688. }
  689. buffer->user_data = alloc->buffer;
  690. list_add(&buffer->entry, &alloc->buffers);
  691. buffer->free = 1;
  692. binder_insert_free_buffer(alloc, buffer);
  693. alloc->free_async_space = alloc->buffer_size / 2;
  694. /* Signal binder_alloc is fully initialized */
  695. binder_alloc_set_vma(alloc, vma);
  696. return 0;
  697. err_alloc_buf_struct_failed:
  698. kfree(alloc->pages);
  699. alloc->pages = NULL;
  700. err_alloc_pages_failed:
  701. alloc->buffer = NULL;
  702. mutex_lock(&binder_alloc_mmap_lock);
  703. alloc->buffer_size = 0;
  704. err_already_mapped:
  705. mutex_unlock(&binder_alloc_mmap_lock);
  706. err_invalid_mm:
  707. binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
  708. "%s: %d %lx-%lx %s failed %d\n", __func__,
  709. alloc->pid, vma->vm_start, vma->vm_end,
  710. failure_string, ret);
  711. return ret;
  712. }
  713. void binder_alloc_deferred_release(struct binder_alloc *alloc)
  714. {
  715. struct rb_node *n;
  716. int buffers, page_count;
  717. struct binder_buffer *buffer;
  718. buffers = 0;
  719. mutex_lock(&alloc->mutex);
  720. BUG_ON(alloc->vma);
  721. while ((n = rb_first(&alloc->allocated_buffers))) {
  722. buffer = rb_entry(n, struct binder_buffer, rb_node);
  723. /* Transaction should already have been freed */
  724. BUG_ON(buffer->transaction);
  725. if (buffer->clear_on_free) {
  726. binder_alloc_clear_buf(alloc, buffer);
  727. buffer->clear_on_free = false;
  728. }
  729. binder_free_buf_locked(alloc, buffer);
  730. buffers++;
  731. }
  732. while (!list_empty(&alloc->buffers)) {
  733. buffer = list_first_entry(&alloc->buffers,
  734. struct binder_buffer, entry);
  735. WARN_ON(!buffer->free);
  736. list_del(&buffer->entry);
  737. WARN_ON_ONCE(!list_empty(&alloc->buffers));
  738. kfree(buffer);
  739. }
  740. page_count = 0;
  741. if (alloc->pages) {
  742. int i;
  743. for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
  744. void __user *page_addr;
  745. bool on_lru;
  746. if (!alloc->pages[i].page_ptr)
  747. continue;
  748. on_lru = list_lru_del(&binder_alloc_lru,
  749. &alloc->pages[i].lru);
  750. page_addr = alloc->buffer + i * PAGE_SIZE;
  751. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  752. "%s: %d: page %d at %pK %s\n",
  753. __func__, alloc->pid, i, page_addr,
  754. on_lru ? "on lru" : "active");
  755. __free_page(alloc->pages[i].page_ptr);
  756. page_count++;
  757. }
  758. kfree(alloc->pages);
  759. }
  760. mutex_unlock(&alloc->mutex);
  761. if (alloc->mm)
  762. mmdrop(alloc->mm);
  763. binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
  764. "%s: %d buffers %d, pages %d\n",
  765. __func__, alloc->pid, buffers, page_count);
  766. }
  767. static void print_binder_buffer(struct seq_file *m, const char *prefix,
  768. struct binder_buffer *buffer)
  769. {
  770. seq_printf(m, "%s %d: %pK size %zd:%zd:%zd %s\n",
  771. prefix, buffer->debug_id, buffer->user_data,
  772. buffer->data_size, buffer->offsets_size,
  773. buffer->extra_buffers_size,
  774. buffer->transaction ? "active" : "delivered");
  775. }
  776. /**
  777. * binder_alloc_print_allocated() - print buffer info
  778. * @m: seq_file for output via seq_printf()
  779. * @alloc: binder_alloc for this proc
  780. *
  781. * Prints information about every buffer associated with
  782. * the binder_alloc state to the given seq_file
  783. */
  784. void binder_alloc_print_allocated(struct seq_file *m,
  785. struct binder_alloc *alloc)
  786. {
  787. struct rb_node *n;
  788. mutex_lock(&alloc->mutex);
  789. for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
  790. print_binder_buffer(m, " buffer",
  791. rb_entry(n, struct binder_buffer, rb_node));
  792. mutex_unlock(&alloc->mutex);
  793. }
  794. /**
  795. * binder_alloc_print_pages() - print page usage
  796. * @m: seq_file for output via seq_printf()
  797. * @alloc: binder_alloc for this proc
  798. */
  799. void binder_alloc_print_pages(struct seq_file *m,
  800. struct binder_alloc *alloc)
  801. {
  802. struct binder_lru_page *page;
  803. int i;
  804. int active = 0;
  805. int lru = 0;
  806. int free = 0;
  807. mutex_lock(&alloc->mutex);
  808. /*
  809. * Make sure the binder_alloc is fully initialized, otherwise we might
  810. * read inconsistent state.
  811. */
  812. if (binder_alloc_get_vma(alloc) != NULL) {
  813. for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
  814. page = &alloc->pages[i];
  815. if (!page->page_ptr)
  816. free++;
  817. else if (list_empty(&page->lru))
  818. active++;
  819. else
  820. lru++;
  821. }
  822. }
  823. mutex_unlock(&alloc->mutex);
  824. seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);
  825. seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high);
  826. }
  827. /**
  828. * binder_alloc_get_allocated_count() - return count of buffers
  829. * @alloc: binder_alloc for this proc
  830. *
  831. * Return: count of allocated buffers
  832. */
  833. int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
  834. {
  835. struct rb_node *n;
  836. int count = 0;
  837. mutex_lock(&alloc->mutex);
  838. for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
  839. count++;
  840. mutex_unlock(&alloc->mutex);
  841. return count;
  842. }
  843. /**
  844. * binder_alloc_vma_close() - invalidate address space
  845. * @alloc: binder_alloc for this proc
  846. *
  847. * Called from binder_vma_close() when releasing address space.
  848. * Clears alloc->vma to prevent new incoming transactions from
  849. * allocating more buffers.
  850. */
  851. void binder_alloc_vma_close(struct binder_alloc *alloc)
  852. {
  853. binder_alloc_set_vma(alloc, NULL);
  854. }
  855. /**
  856. * binder_alloc_free_page() - shrinker callback to free pages
  857. * @item: item to free
  858. * @lock: lock protecting the item
  859. * @cb_arg: callback argument
  860. *
  861. * Called from list_lru_walk() in binder_shrink_scan() to free
  862. * up pages when the system is under memory pressure.
  863. */
  864. enum lru_status binder_alloc_free_page(struct list_head *item,
  865. struct list_lru_one *lru,
  866. spinlock_t *lock,
  867. void *cb_arg)
  868. __must_hold(lock)
  869. {
  870. struct mm_struct *mm = NULL;
  871. struct binder_lru_page *page = container_of(item,
  872. struct binder_lru_page,
  873. lru);
  874. struct binder_alloc *alloc;
  875. uintptr_t page_addr;
  876. size_t index;
  877. struct vm_area_struct *vma;
  878. alloc = page->alloc;
  879. if (!mutex_trylock(&alloc->mutex))
  880. goto err_get_alloc_mutex_failed;
  881. if (!page->page_ptr)
  882. goto err_page_already_freed;
  883. index = page - alloc->pages;
  884. page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
  885. mm = alloc->mm;
  886. if (!mmget_not_zero(mm))
  887. goto err_mmget;
  888. if (!mmap_read_trylock(mm))
  889. goto err_mmap_read_lock_failed;
  890. vma = binder_alloc_get_vma(alloc);
  891. list_lru_isolate(lru, item);
  892. spin_unlock(lock);
  893. if (vma) {
  894. trace_binder_unmap_user_start(alloc, index);
  895. zap_page_range(vma, page_addr, PAGE_SIZE);
  896. trace_binder_unmap_user_end(alloc, index);
  897. }
  898. mmap_read_unlock(mm);
  899. mmput_async(mm);
  900. trace_binder_unmap_kernel_start(alloc, index);
  901. __free_page(page->page_ptr);
  902. page->page_ptr = NULL;
  903. trace_binder_unmap_kernel_end(alloc, index);
  904. spin_lock(lock);
  905. mutex_unlock(&alloc->mutex);
  906. return LRU_REMOVED_RETRY;
  907. err_mmap_read_lock_failed:
  908. mmput_async(mm);
  909. err_mmget:
  910. err_page_already_freed:
  911. mutex_unlock(&alloc->mutex);
  912. err_get_alloc_mutex_failed:
  913. return LRU_SKIP;
  914. }
  915. static unsigned long
  916. binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
  917. {
  918. return list_lru_count(&binder_alloc_lru);
  919. }
  920. static unsigned long
  921. binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
  922. {
  923. return list_lru_walk(&binder_alloc_lru, binder_alloc_free_page,
  924. NULL, sc->nr_to_scan);
  925. }
  926. static struct shrinker binder_shrinker = {
  927. .count_objects = binder_shrink_count,
  928. .scan_objects = binder_shrink_scan,
  929. .seeks = DEFAULT_SEEKS,
  930. };
  931. /**
  932. * binder_alloc_init() - called by binder_open() for per-proc initialization
  933. * @alloc: binder_alloc for this proc
  934. *
  935. * Called from binder_open() to initialize binder_alloc fields for
  936. * new binder proc
  937. */
  938. void binder_alloc_init(struct binder_alloc *alloc)
  939. {
  940. alloc->pid = current->group_leader->pid;
  941. alloc->mm = current->mm;
  942. mmgrab(alloc->mm);
  943. mutex_init(&alloc->mutex);
  944. INIT_LIST_HEAD(&alloc->buffers);
  945. }
  946. int binder_alloc_shrinker_init(void)
  947. {
  948. int ret = list_lru_init(&binder_alloc_lru);
  949. if (ret == 0) {
  950. ret = register_shrinker(&binder_shrinker, "android-binder");
  951. if (ret)
  952. list_lru_destroy(&binder_alloc_lru);
  953. }
  954. return ret;
  955. }
  956. void binder_alloc_shrinker_exit(void)
  957. {
  958. unregister_shrinker(&binder_shrinker);
  959. list_lru_destroy(&binder_alloc_lru);
  960. }
  961. /**
  962. * check_buffer() - verify that buffer/offset is safe to access
  963. * @alloc: binder_alloc for this proc
  964. * @buffer: binder buffer to be accessed
  965. * @offset: offset into @buffer data
  966. * @bytes: bytes to access from offset
  967. *
  968. * Check that the @offset/@bytes are within the size of the given
  969. * @buffer and that the buffer is currently active and not freeable.
  970. * Offsets must also be multiples of sizeof(u32). The kernel is
  971. * allowed to touch the buffer in two cases:
  972. *
  973. * 1) when the buffer is being created:
  974. * (buffer->free == 0 && buffer->allow_user_free == 0)
  975. * 2) when the buffer is being torn down:
  976. * (buffer->free == 0 && buffer->transaction == NULL).
  977. *
  978. * Return: true if the buffer is safe to access
  979. */
  980. static inline bool check_buffer(struct binder_alloc *alloc,
  981. struct binder_buffer *buffer,
  982. binder_size_t offset, size_t bytes)
  983. {
  984. size_t buffer_size = binder_alloc_buffer_size(alloc, buffer);
  985. return buffer_size >= bytes &&
  986. offset <= buffer_size - bytes &&
  987. IS_ALIGNED(offset, sizeof(u32)) &&
  988. !buffer->free &&
  989. (!buffer->allow_user_free || !buffer->transaction);
  990. }
  991. /**
  992. * binder_alloc_get_page() - get kernel pointer for given buffer offset
  993. * @alloc: binder_alloc for this proc
  994. * @buffer: binder buffer to be accessed
  995. * @buffer_offset: offset into @buffer data
  996. * @pgoffp: address to copy final page offset to
  997. *
  998. * Lookup the struct page corresponding to the address
  999. * at @buffer_offset into @buffer->user_data. If @pgoffp is not
  1000. * NULL, the byte-offset into the page is written there.
  1001. *
  1002. * The caller is responsible to ensure that the offset points
  1003. * to a valid address within the @buffer and that @buffer is
  1004. * not freeable by the user. Since it can't be freed, we are
  1005. * guaranteed that the corresponding elements of @alloc->pages[]
  1006. * cannot change.
  1007. *
  1008. * Return: struct page
  1009. */
  1010. static struct page *binder_alloc_get_page(struct binder_alloc *alloc,
  1011. struct binder_buffer *buffer,
  1012. binder_size_t buffer_offset,
  1013. pgoff_t *pgoffp)
  1014. {
  1015. binder_size_t buffer_space_offset = buffer_offset +
  1016. (buffer->user_data - alloc->buffer);
  1017. pgoff_t pgoff = buffer_space_offset & ~PAGE_MASK;
  1018. size_t index = buffer_space_offset >> PAGE_SHIFT;
  1019. struct binder_lru_page *lru_page;
  1020. lru_page = &alloc->pages[index];
  1021. *pgoffp = pgoff;
  1022. return lru_page->page_ptr;
  1023. }
  1024. /**
  1025. * binder_alloc_clear_buf() - zero out buffer
  1026. * @alloc: binder_alloc for this proc
  1027. * @buffer: binder buffer to be cleared
  1028. *
  1029. * memset the given buffer to 0
  1030. */
  1031. static void binder_alloc_clear_buf(struct binder_alloc *alloc,
  1032. struct binder_buffer *buffer)
  1033. {
  1034. size_t bytes = binder_alloc_buffer_size(alloc, buffer);
  1035. binder_size_t buffer_offset = 0;
  1036. while (bytes) {
  1037. unsigned long size;
  1038. struct page *page;
  1039. pgoff_t pgoff;
  1040. page = binder_alloc_get_page(alloc, buffer,
  1041. buffer_offset, &pgoff);
  1042. size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
  1043. memset_page(page, pgoff, 0, size);
  1044. bytes -= size;
  1045. buffer_offset += size;
  1046. }
  1047. }
  1048. /**
  1049. * binder_alloc_copy_user_to_buffer() - copy src user to tgt user
  1050. * @alloc: binder_alloc for this proc
  1051. * @buffer: binder buffer to be accessed
  1052. * @buffer_offset: offset into @buffer data
  1053. * @from: userspace pointer to source buffer
  1054. * @bytes: bytes to copy
  1055. *
  1056. * Copy bytes from source userspace to target buffer.
  1057. *
  1058. * Return: bytes remaining to be copied
  1059. */
  1060. unsigned long
  1061. binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc,
  1062. struct binder_buffer *buffer,
  1063. binder_size_t buffer_offset,
  1064. const void __user *from,
  1065. size_t bytes)
  1066. {
  1067. if (!check_buffer(alloc, buffer, buffer_offset, bytes))
  1068. return bytes;
  1069. while (bytes) {
  1070. unsigned long size;
  1071. unsigned long ret;
  1072. struct page *page;
  1073. pgoff_t pgoff;
  1074. void *kptr;
  1075. page = binder_alloc_get_page(alloc, buffer,
  1076. buffer_offset, &pgoff);
  1077. size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
  1078. kptr = kmap_local_page(page) + pgoff;
  1079. ret = copy_from_user(kptr, from, size);
  1080. kunmap_local(kptr);
  1081. if (ret)
  1082. return bytes - size + ret;
  1083. bytes -= size;
  1084. from += size;
  1085. buffer_offset += size;
  1086. }
  1087. return 0;
  1088. }
  1089. static int binder_alloc_do_buffer_copy(struct binder_alloc *alloc,
  1090. bool to_buffer,
  1091. struct binder_buffer *buffer,
  1092. binder_size_t buffer_offset,
  1093. void *ptr,
  1094. size_t bytes)
  1095. {
  1096. /* All copies must be 32-bit aligned and 32-bit size */
  1097. if (!check_buffer(alloc, buffer, buffer_offset, bytes))
  1098. return -EINVAL;
  1099. while (bytes) {
  1100. unsigned long size;
  1101. struct page *page;
  1102. pgoff_t pgoff;
  1103. page = binder_alloc_get_page(alloc, buffer,
  1104. buffer_offset, &pgoff);
  1105. size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
  1106. if (to_buffer)
  1107. memcpy_to_page(page, pgoff, ptr, size);
  1108. else
  1109. memcpy_from_page(ptr, page, pgoff, size);
  1110. bytes -= size;
  1111. pgoff = 0;
  1112. ptr = ptr + size;
  1113. buffer_offset += size;
  1114. }
  1115. return 0;
  1116. }
  1117. int binder_alloc_copy_to_buffer(struct binder_alloc *alloc,
  1118. struct binder_buffer *buffer,
  1119. binder_size_t buffer_offset,
  1120. void *src,
  1121. size_t bytes)
  1122. {
  1123. return binder_alloc_do_buffer_copy(alloc, true, buffer, buffer_offset,
  1124. src, bytes);
  1125. }
  1126. int binder_alloc_copy_from_buffer(struct binder_alloc *alloc,
  1127. void *dest,
  1128. struct binder_buffer *buffer,
  1129. binder_size_t buffer_offset,
  1130. size_t bytes)
  1131. {
  1132. return binder_alloc_do_buffer_copy(alloc, false, buffer, buffer_offset,
  1133. dest, bytes);
  1134. }