privcmd.c 23 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /******************************************************************************
  3. * privcmd.c
  4. *
  5. * Interface to privileged domain-0 commands.
  6. *
  7. * Copyright (c) 2002-2004, K A Fraser, B Dragovic
  8. */
  9. #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
  10. #include <linux/kernel.h>
  11. #include <linux/module.h>
  12. #include <linux/sched.h>
  13. #include <linux/slab.h>
  14. #include <linux/string.h>
  15. #include <linux/errno.h>
  16. #include <linux/mm.h>
  17. #include <linux/mman.h>
  18. #include <linux/uaccess.h>
  19. #include <linux/swap.h>
  20. #include <linux/highmem.h>
  21. #include <linux/pagemap.h>
  22. #include <linux/seq_file.h>
  23. #include <linux/miscdevice.h>
  24. #include <linux/moduleparam.h>
  25. #include <asm/xen/hypervisor.h>
  26. #include <asm/xen/hypercall.h>
  27. #include <xen/xen.h>
  28. #include <xen/privcmd.h>
  29. #include <xen/interface/xen.h>
  30. #include <xen/interface/memory.h>
  31. #include <xen/interface/hvm/dm_op.h>
  32. #include <xen/features.h>
  33. #include <xen/page.h>
  34. #include <xen/xen-ops.h>
  35. #include <xen/balloon.h>
  36. #include "privcmd.h"
  37. MODULE_LICENSE("GPL");
  38. #define PRIV_VMA_LOCKED ((void *)1)
  39. static unsigned int privcmd_dm_op_max_num = 16;
  40. module_param_named(dm_op_max_nr_bufs, privcmd_dm_op_max_num, uint, 0644);
  41. MODULE_PARM_DESC(dm_op_max_nr_bufs,
  42. "Maximum number of buffers per dm_op hypercall");
  43. static unsigned int privcmd_dm_op_buf_max_size = 4096;
  44. module_param_named(dm_op_buf_max_size, privcmd_dm_op_buf_max_size, uint,
  45. 0644);
  46. MODULE_PARM_DESC(dm_op_buf_max_size,
  47. "Maximum size of a dm_op hypercall buffer");
  48. struct privcmd_data {
  49. domid_t domid;
  50. };
  51. static int privcmd_vma_range_is_mapped(
  52. struct vm_area_struct *vma,
  53. unsigned long addr,
  54. unsigned long nr_pages);
  55. static long privcmd_ioctl_hypercall(struct file *file, void __user *udata)
  56. {
  57. struct privcmd_data *data = file->private_data;
  58. struct privcmd_hypercall hypercall;
  59. long ret;
  60. /* Disallow arbitrary hypercalls if restricted */
  61. if (data->domid != DOMID_INVALID)
  62. return -EPERM;
  63. if (copy_from_user(&hypercall, udata, sizeof(hypercall)))
  64. return -EFAULT;
  65. xen_preemptible_hcall_begin();
  66. ret = privcmd_call(hypercall.op,
  67. hypercall.arg[0], hypercall.arg[1],
  68. hypercall.arg[2], hypercall.arg[3],
  69. hypercall.arg[4]);
  70. xen_preemptible_hcall_end();
  71. return ret;
  72. }
  73. static void free_page_list(struct list_head *pages)
  74. {
  75. struct page *p, *n;
  76. list_for_each_entry_safe(p, n, pages, lru)
  77. __free_page(p);
  78. INIT_LIST_HEAD(pages);
  79. }
  80. /*
  81. * Given an array of items in userspace, return a list of pages
  82. * containing the data. If copying fails, either because of memory
  83. * allocation failure or a problem reading user memory, return an
  84. * error code; its up to the caller to dispose of any partial list.
  85. */
  86. static int gather_array(struct list_head *pagelist,
  87. unsigned nelem, size_t size,
  88. const void __user *data)
  89. {
  90. unsigned pageidx;
  91. void *pagedata;
  92. int ret;
  93. if (size > PAGE_SIZE)
  94. return 0;
  95. pageidx = PAGE_SIZE;
  96. pagedata = NULL; /* quiet, gcc */
  97. while (nelem--) {
  98. if (pageidx > PAGE_SIZE-size) {
  99. struct page *page = alloc_page(GFP_KERNEL);
  100. ret = -ENOMEM;
  101. if (page == NULL)
  102. goto fail;
  103. pagedata = page_address(page);
  104. list_add_tail(&page->lru, pagelist);
  105. pageidx = 0;
  106. }
  107. ret = -EFAULT;
  108. if (copy_from_user(pagedata + pageidx, data, size))
  109. goto fail;
  110. data += size;
  111. pageidx += size;
  112. }
  113. ret = 0;
  114. fail:
  115. return ret;
  116. }
  117. /*
  118. * Call function "fn" on each element of the array fragmented
  119. * over a list of pages.
  120. */
  121. static int traverse_pages(unsigned nelem, size_t size,
  122. struct list_head *pos,
  123. int (*fn)(void *data, void *state),
  124. void *state)
  125. {
  126. void *pagedata;
  127. unsigned pageidx;
  128. int ret = 0;
  129. BUG_ON(size > PAGE_SIZE);
  130. pageidx = PAGE_SIZE;
  131. pagedata = NULL; /* hush, gcc */
  132. while (nelem--) {
  133. if (pageidx > PAGE_SIZE-size) {
  134. struct page *page;
  135. pos = pos->next;
  136. page = list_entry(pos, struct page, lru);
  137. pagedata = page_address(page);
  138. pageidx = 0;
  139. }
  140. ret = (*fn)(pagedata + pageidx, state);
  141. if (ret)
  142. break;
  143. pageidx += size;
  144. }
  145. return ret;
  146. }
  147. /*
  148. * Similar to traverse_pages, but use each page as a "block" of
  149. * data to be processed as one unit.
  150. */
  151. static int traverse_pages_block(unsigned nelem, size_t size,
  152. struct list_head *pos,
  153. int (*fn)(void *data, int nr, void *state),
  154. void *state)
  155. {
  156. void *pagedata;
  157. int ret = 0;
  158. BUG_ON(size > PAGE_SIZE);
  159. while (nelem) {
  160. int nr = (PAGE_SIZE/size);
  161. struct page *page;
  162. if (nr > nelem)
  163. nr = nelem;
  164. pos = pos->next;
  165. page = list_entry(pos, struct page, lru);
  166. pagedata = page_address(page);
  167. ret = (*fn)(pagedata, nr, state);
  168. if (ret)
  169. break;
  170. nelem -= nr;
  171. }
  172. return ret;
  173. }
  174. struct mmap_gfn_state {
  175. unsigned long va;
  176. struct vm_area_struct *vma;
  177. domid_t domain;
  178. };
  179. static int mmap_gfn_range(void *data, void *state)
  180. {
  181. struct privcmd_mmap_entry *msg = data;
  182. struct mmap_gfn_state *st = state;
  183. struct vm_area_struct *vma = st->vma;
  184. int rc;
  185. /* Do not allow range to wrap the address space. */
  186. if ((msg->npages > (LONG_MAX >> PAGE_SHIFT)) ||
  187. ((unsigned long)(msg->npages << PAGE_SHIFT) >= -st->va))
  188. return -EINVAL;
  189. /* Range chunks must be contiguous in va space. */
  190. if ((msg->va != st->va) ||
  191. ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end))
  192. return -EINVAL;
  193. rc = xen_remap_domain_gfn_range(vma,
  194. msg->va & PAGE_MASK,
  195. msg->mfn, msg->npages,
  196. vma->vm_page_prot,
  197. st->domain, NULL);
  198. if (rc < 0)
  199. return rc;
  200. st->va += msg->npages << PAGE_SHIFT;
  201. return 0;
  202. }
  203. static long privcmd_ioctl_mmap(struct file *file, void __user *udata)
  204. {
  205. struct privcmd_data *data = file->private_data;
  206. struct privcmd_mmap mmapcmd;
  207. struct mm_struct *mm = current->mm;
  208. struct vm_area_struct *vma;
  209. int rc;
  210. LIST_HEAD(pagelist);
  211. struct mmap_gfn_state state;
  212. /* We only support privcmd_ioctl_mmap_batch for non-auto-translated. */
  213. if (xen_feature(XENFEAT_auto_translated_physmap))
  214. return -ENOSYS;
  215. if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
  216. return -EFAULT;
  217. /* If restriction is in place, check the domid matches */
  218. if (data->domid != DOMID_INVALID && data->domid != mmapcmd.dom)
  219. return -EPERM;
  220. rc = gather_array(&pagelist,
  221. mmapcmd.num, sizeof(struct privcmd_mmap_entry),
  222. mmapcmd.entry);
  223. if (rc || list_empty(&pagelist))
  224. goto out;
  225. mmap_write_lock(mm);
  226. {
  227. struct page *page = list_first_entry(&pagelist,
  228. struct page, lru);
  229. struct privcmd_mmap_entry *msg = page_address(page);
  230. vma = vma_lookup(mm, msg->va);
  231. rc = -EINVAL;
  232. if (!vma || (msg->va != vma->vm_start) || vma->vm_private_data)
  233. goto out_up;
  234. vma->vm_private_data = PRIV_VMA_LOCKED;
  235. }
  236. state.va = vma->vm_start;
  237. state.vma = vma;
  238. state.domain = mmapcmd.dom;
  239. rc = traverse_pages(mmapcmd.num, sizeof(struct privcmd_mmap_entry),
  240. &pagelist,
  241. mmap_gfn_range, &state);
  242. out_up:
  243. mmap_write_unlock(mm);
  244. out:
  245. free_page_list(&pagelist);
  246. return rc;
  247. }
  248. struct mmap_batch_state {
  249. domid_t domain;
  250. unsigned long va;
  251. struct vm_area_struct *vma;
  252. int index;
  253. /* A tristate:
  254. * 0 for no errors
  255. * 1 if at least one error has happened (and no
  256. * -ENOENT errors have happened)
  257. * -ENOENT if at least 1 -ENOENT has happened.
  258. */
  259. int global_error;
  260. int version;
  261. /* User-space gfn array to store errors in the second pass for V1. */
  262. xen_pfn_t __user *user_gfn;
  263. /* User-space int array to store errors in the second pass for V2. */
  264. int __user *user_err;
  265. };
  266. /* auto translated dom0 note: if domU being created is PV, then gfn is
  267. * mfn(addr on bus). If it's auto xlated, then gfn is pfn (input to HAP).
  268. */
  269. static int mmap_batch_fn(void *data, int nr, void *state)
  270. {
  271. xen_pfn_t *gfnp = data;
  272. struct mmap_batch_state *st = state;
  273. struct vm_area_struct *vma = st->vma;
  274. struct page **pages = vma->vm_private_data;
  275. struct page **cur_pages = NULL;
  276. int ret;
  277. if (xen_feature(XENFEAT_auto_translated_physmap))
  278. cur_pages = &pages[st->index];
  279. BUG_ON(nr < 0);
  280. ret = xen_remap_domain_gfn_array(st->vma, st->va & PAGE_MASK, gfnp, nr,
  281. (int *)gfnp, st->vma->vm_page_prot,
  282. st->domain, cur_pages);
  283. /* Adjust the global_error? */
  284. if (ret != nr) {
  285. if (ret == -ENOENT)
  286. st->global_error = -ENOENT;
  287. else {
  288. /* Record that at least one error has happened. */
  289. if (st->global_error == 0)
  290. st->global_error = 1;
  291. }
  292. }
  293. st->va += XEN_PAGE_SIZE * nr;
  294. st->index += nr / XEN_PFN_PER_PAGE;
  295. return 0;
  296. }
  297. static int mmap_return_error(int err, struct mmap_batch_state *st)
  298. {
  299. int ret;
  300. if (st->version == 1) {
  301. if (err) {
  302. xen_pfn_t gfn;
  303. ret = get_user(gfn, st->user_gfn);
  304. if (ret < 0)
  305. return ret;
  306. /*
  307. * V1 encodes the error codes in the 32bit top
  308. * nibble of the gfn (with its known
  309. * limitations vis-a-vis 64 bit callers).
  310. */
  311. gfn |= (err == -ENOENT) ?
  312. PRIVCMD_MMAPBATCH_PAGED_ERROR :
  313. PRIVCMD_MMAPBATCH_MFN_ERROR;
  314. return __put_user(gfn, st->user_gfn++);
  315. } else
  316. st->user_gfn++;
  317. } else { /* st->version == 2 */
  318. if (err)
  319. return __put_user(err, st->user_err++);
  320. else
  321. st->user_err++;
  322. }
  323. return 0;
  324. }
  325. static int mmap_return_errors(void *data, int nr, void *state)
  326. {
  327. struct mmap_batch_state *st = state;
  328. int *errs = data;
  329. int i;
  330. int ret;
  331. for (i = 0; i < nr; i++) {
  332. ret = mmap_return_error(errs[i], st);
  333. if (ret < 0)
  334. return ret;
  335. }
  336. return 0;
  337. }
  338. /* Allocate pfns that are then mapped with gfns from foreign domid. Update
  339. * the vma with the page info to use later.
  340. * Returns: 0 if success, otherwise -errno
  341. */
  342. static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs)
  343. {
  344. int rc;
  345. struct page **pages;
  346. pages = kvcalloc(numpgs, sizeof(pages[0]), GFP_KERNEL);
  347. if (pages == NULL)
  348. return -ENOMEM;
  349. rc = xen_alloc_unpopulated_pages(numpgs, pages);
  350. if (rc != 0) {
  351. pr_warn("%s Could not alloc %d pfns rc:%d\n", __func__,
  352. numpgs, rc);
  353. kvfree(pages);
  354. return -ENOMEM;
  355. }
  356. BUG_ON(vma->vm_private_data != NULL);
  357. vma->vm_private_data = pages;
  358. return 0;
  359. }
  360. static const struct vm_operations_struct privcmd_vm_ops;
  361. static long privcmd_ioctl_mmap_batch(
  362. struct file *file, void __user *udata, int version)
  363. {
  364. struct privcmd_data *data = file->private_data;
  365. int ret;
  366. struct privcmd_mmapbatch_v2 m;
  367. struct mm_struct *mm = current->mm;
  368. struct vm_area_struct *vma;
  369. unsigned long nr_pages;
  370. LIST_HEAD(pagelist);
  371. struct mmap_batch_state state;
  372. switch (version) {
  373. case 1:
  374. if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch)))
  375. return -EFAULT;
  376. /* Returns per-frame error in m.arr. */
  377. m.err = NULL;
  378. if (!access_ok(m.arr, m.num * sizeof(*m.arr)))
  379. return -EFAULT;
  380. break;
  381. case 2:
  382. if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch_v2)))
  383. return -EFAULT;
  384. /* Returns per-frame error code in m.err. */
  385. if (!access_ok(m.err, m.num * (sizeof(*m.err))))
  386. return -EFAULT;
  387. break;
  388. default:
  389. return -EINVAL;
  390. }
  391. /* If restriction is in place, check the domid matches */
  392. if (data->domid != DOMID_INVALID && data->domid != m.dom)
  393. return -EPERM;
  394. nr_pages = DIV_ROUND_UP(m.num, XEN_PFN_PER_PAGE);
  395. if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT)))
  396. return -EINVAL;
  397. ret = gather_array(&pagelist, m.num, sizeof(xen_pfn_t), m.arr);
  398. if (ret)
  399. goto out;
  400. if (list_empty(&pagelist)) {
  401. ret = -EINVAL;
  402. goto out;
  403. }
  404. if (version == 2) {
  405. /* Zero error array now to only copy back actual errors. */
  406. if (clear_user(m.err, sizeof(int) * m.num)) {
  407. ret = -EFAULT;
  408. goto out;
  409. }
  410. }
  411. mmap_write_lock(mm);
  412. vma = find_vma(mm, m.addr);
  413. if (!vma ||
  414. vma->vm_ops != &privcmd_vm_ops) {
  415. ret = -EINVAL;
  416. goto out_unlock;
  417. }
  418. /*
  419. * Caller must either:
  420. *
  421. * Map the whole VMA range, which will also allocate all the
  422. * pages required for the auto_translated_physmap case.
  423. *
  424. * Or
  425. *
  426. * Map unmapped holes left from a previous map attempt (e.g.,
  427. * because those foreign frames were previously paged out).
  428. */
  429. if (vma->vm_private_data == NULL) {
  430. if (m.addr != vma->vm_start ||
  431. m.addr + (nr_pages << PAGE_SHIFT) != vma->vm_end) {
  432. ret = -EINVAL;
  433. goto out_unlock;
  434. }
  435. if (xen_feature(XENFEAT_auto_translated_physmap)) {
  436. ret = alloc_empty_pages(vma, nr_pages);
  437. if (ret < 0)
  438. goto out_unlock;
  439. } else
  440. vma->vm_private_data = PRIV_VMA_LOCKED;
  441. } else {
  442. if (m.addr < vma->vm_start ||
  443. m.addr + (nr_pages << PAGE_SHIFT) > vma->vm_end) {
  444. ret = -EINVAL;
  445. goto out_unlock;
  446. }
  447. if (privcmd_vma_range_is_mapped(vma, m.addr, nr_pages)) {
  448. ret = -EINVAL;
  449. goto out_unlock;
  450. }
  451. }
  452. state.domain = m.dom;
  453. state.vma = vma;
  454. state.va = m.addr;
  455. state.index = 0;
  456. state.global_error = 0;
  457. state.version = version;
  458. BUILD_BUG_ON(((PAGE_SIZE / sizeof(xen_pfn_t)) % XEN_PFN_PER_PAGE) != 0);
  459. /* mmap_batch_fn guarantees ret == 0 */
  460. BUG_ON(traverse_pages_block(m.num, sizeof(xen_pfn_t),
  461. &pagelist, mmap_batch_fn, &state));
  462. mmap_write_unlock(mm);
  463. if (state.global_error) {
  464. /* Write back errors in second pass. */
  465. state.user_gfn = (xen_pfn_t *)m.arr;
  466. state.user_err = m.err;
  467. ret = traverse_pages_block(m.num, sizeof(xen_pfn_t),
  468. &pagelist, mmap_return_errors, &state);
  469. } else
  470. ret = 0;
  471. /* If we have not had any EFAULT-like global errors then set the global
  472. * error to -ENOENT if necessary. */
  473. if ((ret == 0) && (state.global_error == -ENOENT))
  474. ret = -ENOENT;
  475. out:
  476. free_page_list(&pagelist);
  477. return ret;
  478. out_unlock:
  479. mmap_write_unlock(mm);
  480. goto out;
  481. }
  482. static int lock_pages(
  483. struct privcmd_dm_op_buf kbufs[], unsigned int num,
  484. struct page *pages[], unsigned int nr_pages, unsigned int *pinned)
  485. {
  486. unsigned int i, off = 0;
  487. for (i = 0; i < num; ) {
  488. unsigned int requested;
  489. int page_count;
  490. requested = DIV_ROUND_UP(
  491. offset_in_page(kbufs[i].uptr) + kbufs[i].size,
  492. PAGE_SIZE) - off;
  493. if (requested > nr_pages)
  494. return -ENOSPC;
  495. page_count = pin_user_pages_fast(
  496. (unsigned long)kbufs[i].uptr + off * PAGE_SIZE,
  497. requested, FOLL_WRITE, pages);
  498. if (page_count <= 0)
  499. return page_count ? : -EFAULT;
  500. *pinned += page_count;
  501. nr_pages -= page_count;
  502. pages += page_count;
  503. off = (requested == page_count) ? 0 : off + page_count;
  504. i += !off;
  505. }
  506. return 0;
  507. }
  508. static void unlock_pages(struct page *pages[], unsigned int nr_pages)
  509. {
  510. unpin_user_pages_dirty_lock(pages, nr_pages, true);
  511. }
  512. static long privcmd_ioctl_dm_op(struct file *file, void __user *udata)
  513. {
  514. struct privcmd_data *data = file->private_data;
  515. struct privcmd_dm_op kdata;
  516. struct privcmd_dm_op_buf *kbufs;
  517. unsigned int nr_pages = 0;
  518. struct page **pages = NULL;
  519. struct xen_dm_op_buf *xbufs = NULL;
  520. unsigned int i;
  521. long rc;
  522. unsigned int pinned = 0;
  523. if (copy_from_user(&kdata, udata, sizeof(kdata)))
  524. return -EFAULT;
  525. /* If restriction is in place, check the domid matches */
  526. if (data->domid != DOMID_INVALID && data->domid != kdata.dom)
  527. return -EPERM;
  528. if (kdata.num == 0)
  529. return 0;
  530. if (kdata.num > privcmd_dm_op_max_num)
  531. return -E2BIG;
  532. kbufs = kcalloc(kdata.num, sizeof(*kbufs), GFP_KERNEL);
  533. if (!kbufs)
  534. return -ENOMEM;
  535. if (copy_from_user(kbufs, kdata.ubufs,
  536. sizeof(*kbufs) * kdata.num)) {
  537. rc = -EFAULT;
  538. goto out;
  539. }
  540. for (i = 0; i < kdata.num; i++) {
  541. if (kbufs[i].size > privcmd_dm_op_buf_max_size) {
  542. rc = -E2BIG;
  543. goto out;
  544. }
  545. if (!access_ok(kbufs[i].uptr,
  546. kbufs[i].size)) {
  547. rc = -EFAULT;
  548. goto out;
  549. }
  550. nr_pages += DIV_ROUND_UP(
  551. offset_in_page(kbufs[i].uptr) + kbufs[i].size,
  552. PAGE_SIZE);
  553. }
  554. pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL);
  555. if (!pages) {
  556. rc = -ENOMEM;
  557. goto out;
  558. }
  559. xbufs = kcalloc(kdata.num, sizeof(*xbufs), GFP_KERNEL);
  560. if (!xbufs) {
  561. rc = -ENOMEM;
  562. goto out;
  563. }
  564. rc = lock_pages(kbufs, kdata.num, pages, nr_pages, &pinned);
  565. if (rc < 0)
  566. goto out;
  567. for (i = 0; i < kdata.num; i++) {
  568. set_xen_guest_handle(xbufs[i].h, kbufs[i].uptr);
  569. xbufs[i].size = kbufs[i].size;
  570. }
  571. xen_preemptible_hcall_begin();
  572. rc = HYPERVISOR_dm_op(kdata.dom, kdata.num, xbufs);
  573. xen_preemptible_hcall_end();
  574. out:
  575. unlock_pages(pages, pinned);
  576. kfree(xbufs);
  577. kfree(pages);
  578. kfree(kbufs);
  579. return rc;
  580. }
  581. static long privcmd_ioctl_restrict(struct file *file, void __user *udata)
  582. {
  583. struct privcmd_data *data = file->private_data;
  584. domid_t dom;
  585. if (copy_from_user(&dom, udata, sizeof(dom)))
  586. return -EFAULT;
  587. /* Set restriction to the specified domain, or check it matches */
  588. if (data->domid == DOMID_INVALID)
  589. data->domid = dom;
  590. else if (data->domid != dom)
  591. return -EINVAL;
  592. return 0;
  593. }
  594. static long privcmd_ioctl_mmap_resource(struct file *file,
  595. struct privcmd_mmap_resource __user *udata)
  596. {
  597. struct privcmd_data *data = file->private_data;
  598. struct mm_struct *mm = current->mm;
  599. struct vm_area_struct *vma;
  600. struct privcmd_mmap_resource kdata;
  601. xen_pfn_t *pfns = NULL;
  602. struct xen_mem_acquire_resource xdata = { };
  603. int rc;
  604. if (copy_from_user(&kdata, udata, sizeof(kdata)))
  605. return -EFAULT;
  606. /* If restriction is in place, check the domid matches */
  607. if (data->domid != DOMID_INVALID && data->domid != kdata.dom)
  608. return -EPERM;
  609. /* Both fields must be set or unset */
  610. if (!!kdata.addr != !!kdata.num)
  611. return -EINVAL;
  612. xdata.domid = kdata.dom;
  613. xdata.type = kdata.type;
  614. xdata.id = kdata.id;
  615. if (!kdata.addr && !kdata.num) {
  616. /* Query the size of the resource. */
  617. rc = HYPERVISOR_memory_op(XENMEM_acquire_resource, &xdata);
  618. if (rc)
  619. return rc;
  620. return __put_user(xdata.nr_frames, &udata->num);
  621. }
  622. mmap_write_lock(mm);
  623. vma = find_vma(mm, kdata.addr);
  624. if (!vma || vma->vm_ops != &privcmd_vm_ops) {
  625. rc = -EINVAL;
  626. goto out;
  627. }
  628. pfns = kcalloc(kdata.num, sizeof(*pfns), GFP_KERNEL | __GFP_NOWARN);
  629. if (!pfns) {
  630. rc = -ENOMEM;
  631. goto out;
  632. }
  633. if (IS_ENABLED(CONFIG_XEN_AUTO_XLATE) &&
  634. xen_feature(XENFEAT_auto_translated_physmap)) {
  635. unsigned int nr = DIV_ROUND_UP(kdata.num, XEN_PFN_PER_PAGE);
  636. struct page **pages;
  637. unsigned int i;
  638. rc = alloc_empty_pages(vma, nr);
  639. if (rc < 0)
  640. goto out;
  641. pages = vma->vm_private_data;
  642. for (i = 0; i < kdata.num; i++) {
  643. xen_pfn_t pfn =
  644. page_to_xen_pfn(pages[i / XEN_PFN_PER_PAGE]);
  645. pfns[i] = pfn + (i % XEN_PFN_PER_PAGE);
  646. }
  647. } else
  648. vma->vm_private_data = PRIV_VMA_LOCKED;
  649. xdata.frame = kdata.idx;
  650. xdata.nr_frames = kdata.num;
  651. set_xen_guest_handle(xdata.frame_list, pfns);
  652. xen_preemptible_hcall_begin();
  653. rc = HYPERVISOR_memory_op(XENMEM_acquire_resource, &xdata);
  654. xen_preemptible_hcall_end();
  655. if (rc)
  656. goto out;
  657. if (IS_ENABLED(CONFIG_XEN_AUTO_XLATE) &&
  658. xen_feature(XENFEAT_auto_translated_physmap)) {
  659. rc = xen_remap_vma_range(vma, kdata.addr, kdata.num << PAGE_SHIFT);
  660. } else {
  661. unsigned int domid =
  662. (xdata.flags & XENMEM_rsrc_acq_caller_owned) ?
  663. DOMID_SELF : kdata.dom;
  664. int num, *errs = (int *)pfns;
  665. BUILD_BUG_ON(sizeof(*errs) > sizeof(*pfns));
  666. num = xen_remap_domain_mfn_array(vma,
  667. kdata.addr & PAGE_MASK,
  668. pfns, kdata.num, errs,
  669. vma->vm_page_prot,
  670. domid);
  671. if (num < 0)
  672. rc = num;
  673. else if (num != kdata.num) {
  674. unsigned int i;
  675. for (i = 0; i < num; i++) {
  676. rc = errs[i];
  677. if (rc < 0)
  678. break;
  679. }
  680. } else
  681. rc = 0;
  682. }
  683. out:
  684. mmap_write_unlock(mm);
  685. kfree(pfns);
  686. return rc;
  687. }
  688. static long privcmd_ioctl(struct file *file,
  689. unsigned int cmd, unsigned long data)
  690. {
  691. int ret = -ENOTTY;
  692. void __user *udata = (void __user *) data;
  693. switch (cmd) {
  694. case IOCTL_PRIVCMD_HYPERCALL:
  695. ret = privcmd_ioctl_hypercall(file, udata);
  696. break;
  697. case IOCTL_PRIVCMD_MMAP:
  698. ret = privcmd_ioctl_mmap(file, udata);
  699. break;
  700. case IOCTL_PRIVCMD_MMAPBATCH:
  701. ret = privcmd_ioctl_mmap_batch(file, udata, 1);
  702. break;
  703. case IOCTL_PRIVCMD_MMAPBATCH_V2:
  704. ret = privcmd_ioctl_mmap_batch(file, udata, 2);
  705. break;
  706. case IOCTL_PRIVCMD_DM_OP:
  707. ret = privcmd_ioctl_dm_op(file, udata);
  708. break;
  709. case IOCTL_PRIVCMD_RESTRICT:
  710. ret = privcmd_ioctl_restrict(file, udata);
  711. break;
  712. case IOCTL_PRIVCMD_MMAP_RESOURCE:
  713. ret = privcmd_ioctl_mmap_resource(file, udata);
  714. break;
  715. default:
  716. break;
  717. }
  718. return ret;
  719. }
  720. static int privcmd_open(struct inode *ino, struct file *file)
  721. {
  722. struct privcmd_data *data = kzalloc(sizeof(*data), GFP_KERNEL);
  723. if (!data)
  724. return -ENOMEM;
  725. /* DOMID_INVALID implies no restriction */
  726. data->domid = DOMID_INVALID;
  727. file->private_data = data;
  728. return 0;
  729. }
  730. static int privcmd_release(struct inode *ino, struct file *file)
  731. {
  732. struct privcmd_data *data = file->private_data;
  733. kfree(data);
  734. return 0;
  735. }
  736. static void privcmd_close(struct vm_area_struct *vma)
  737. {
  738. struct page **pages = vma->vm_private_data;
  739. int numpgs = vma_pages(vma);
  740. int numgfns = (vma->vm_end - vma->vm_start) >> XEN_PAGE_SHIFT;
  741. int rc;
  742. if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages)
  743. return;
  744. rc = xen_unmap_domain_gfn_range(vma, numgfns, pages);
  745. if (rc == 0)
  746. xen_free_unpopulated_pages(numpgs, pages);
  747. else
  748. pr_crit("unable to unmap MFN range: leaking %d pages. rc=%d\n",
  749. numpgs, rc);
  750. kvfree(pages);
  751. }
  752. static vm_fault_t privcmd_fault(struct vm_fault *vmf)
  753. {
  754. printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
  755. vmf->vma, vmf->vma->vm_start, vmf->vma->vm_end,
  756. vmf->pgoff, (void *)vmf->address);
  757. return VM_FAULT_SIGBUS;
  758. }
  759. static const struct vm_operations_struct privcmd_vm_ops = {
  760. .close = privcmd_close,
  761. .fault = privcmd_fault
  762. };
  763. static int privcmd_mmap(struct file *file, struct vm_area_struct *vma)
  764. {
  765. /* DONTCOPY is essential for Xen because copy_page_range doesn't know
  766. * how to recreate these mappings */
  767. vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTCOPY |
  768. VM_DONTEXPAND | VM_DONTDUMP);
  769. vma->vm_ops = &privcmd_vm_ops;
  770. vma->vm_private_data = NULL;
  771. return 0;
  772. }
  773. /*
  774. * For MMAPBATCH*. This allows asserting the singleshot mapping
  775. * on a per pfn/pte basis. Mapping calls that fail with ENOENT
  776. * can be then retried until success.
  777. */
  778. static int is_mapped_fn(pte_t *pte, unsigned long addr, void *data)
  779. {
  780. return pte_none(*pte) ? 0 : -EBUSY;
  781. }
  782. static int privcmd_vma_range_is_mapped(
  783. struct vm_area_struct *vma,
  784. unsigned long addr,
  785. unsigned long nr_pages)
  786. {
  787. return apply_to_page_range(vma->vm_mm, addr, nr_pages << PAGE_SHIFT,
  788. is_mapped_fn, NULL) != 0;
  789. }
  790. const struct file_operations xen_privcmd_fops = {
  791. .owner = THIS_MODULE,
  792. .unlocked_ioctl = privcmd_ioctl,
  793. .open = privcmd_open,
  794. .release = privcmd_release,
  795. .mmap = privcmd_mmap,
  796. };
  797. EXPORT_SYMBOL_GPL(xen_privcmd_fops);
  798. static struct miscdevice privcmd_dev = {
  799. .minor = MISC_DYNAMIC_MINOR,
  800. .name = "xen/privcmd",
  801. .fops = &xen_privcmd_fops,
  802. };
  803. static int __init privcmd_init(void)
  804. {
  805. int err;
  806. if (!xen_domain())
  807. return -ENODEV;
  808. err = misc_register(&privcmd_dev);
  809. if (err != 0) {
  810. pr_err("Could not register Xen privcmd device\n");
  811. return err;
  812. }
  813. err = misc_register(&xen_privcmdbuf_dev);
  814. if (err != 0) {
  815. pr_err("Could not register Xen hypercall-buf device\n");
  816. misc_deregister(&privcmd_dev);
  817. return err;
  818. }
  819. return 0;
  820. }
  821. static void __exit privcmd_exit(void)
  822. {
  823. misc_deregister(&privcmd_dev);
  824. misc_deregister(&xen_privcmdbuf_dev);
  825. }
  826. module_init(privcmd_init);
  827. module_exit(privcmd_exit);