virtio_uml.c 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Virtio vhost-user driver
  4. *
  5. * Copyright(c) 2019 Intel Corporation
  6. *
  7. * This driver allows virtio devices to be used over a vhost-user socket.
  8. *
  9. * Guest devices can be instantiated by kernel module or command line
  10. * parameters. One device will be created for each parameter. Syntax:
  11. *
  12. * virtio_uml.device=<socket>:<virtio_id>[:<platform_id>]
  13. * where:
  14. * <socket> := vhost-user socket path to connect
  15. * <virtio_id> := virtio device id (as in virtio_ids.h)
  16. * <platform_id> := (optional) platform device id
  17. *
  18. * example:
  19. * virtio_uml.device=/var/uml.socket:1
  20. *
  21. * Based on Virtio MMIO driver by Pawel Moll, copyright 2011-2014, ARM Ltd.
  22. */
  23. #include <linux/module.h>
  24. #include <linux/of.h>
  25. #include <linux/platform_device.h>
  26. #include <linux/slab.h>
  27. #include <linux/virtio.h>
  28. #include <linux/virtio_config.h>
  29. #include <linux/virtio_ring.h>
  30. #include <linux/time-internal.h>
  31. #include <linux/virtio-uml.h>
  32. #include <shared/as-layout.h>
  33. #include <irq_kern.h>
  34. #include <init.h>
  35. #include <os.h>
  36. #include "vhost_user.h"
  37. #define MAX_SUPPORTED_QUEUE_SIZE 256
  38. #define to_virtio_uml_device(_vdev) \
  39. container_of(_vdev, struct virtio_uml_device, vdev)
  40. struct virtio_uml_platform_data {
  41. u32 virtio_device_id;
  42. const char *socket_path;
  43. struct work_struct conn_broken_wk;
  44. struct platform_device *pdev;
  45. };
  46. struct virtio_uml_device {
  47. struct virtio_device vdev;
  48. struct platform_device *pdev;
  49. struct virtio_uml_platform_data *pdata;
  50. spinlock_t sock_lock;
  51. int sock, req_fd, irq;
  52. u64 features;
  53. u64 protocol_features;
  54. u8 status;
  55. u8 registered:1;
  56. u8 suspended:1;
  57. u8 no_vq_suspend:1;
  58. u8 config_changed_irq:1;
  59. uint64_t vq_irq_vq_map;
  60. int recv_rc;
  61. };
  62. struct virtio_uml_vq_info {
  63. int kick_fd, call_fd;
  64. char name[32];
  65. bool suspended;
  66. };
  67. extern unsigned long long physmem_size, highmem;
  68. #define vu_err(vu_dev, ...) dev_err(&(vu_dev)->pdev->dev, ##__VA_ARGS__)
  69. /* Vhost-user protocol */
  70. static int full_sendmsg_fds(int fd, const void *buf, unsigned int len,
  71. const int *fds, unsigned int fds_num)
  72. {
  73. int rc;
  74. do {
  75. rc = os_sendmsg_fds(fd, buf, len, fds, fds_num);
  76. if (rc > 0) {
  77. buf += rc;
  78. len -= rc;
  79. fds = NULL;
  80. fds_num = 0;
  81. }
  82. } while (len && (rc >= 0 || rc == -EINTR));
  83. if (rc < 0)
  84. return rc;
  85. return 0;
  86. }
  87. static int full_read(int fd, void *buf, int len, bool abortable)
  88. {
  89. int rc;
  90. if (!len)
  91. return 0;
  92. do {
  93. rc = os_read_file(fd, buf, len);
  94. if (rc > 0) {
  95. buf += rc;
  96. len -= rc;
  97. }
  98. } while (len && (rc > 0 || rc == -EINTR || (!abortable && rc == -EAGAIN)));
  99. if (rc < 0)
  100. return rc;
  101. if (rc == 0)
  102. return -ECONNRESET;
  103. return 0;
  104. }
  105. static int vhost_user_recv_header(int fd, struct vhost_user_msg *msg)
  106. {
  107. return full_read(fd, msg, sizeof(msg->header), true);
  108. }
  109. static int vhost_user_recv(struct virtio_uml_device *vu_dev,
  110. int fd, struct vhost_user_msg *msg,
  111. size_t max_payload_size, bool wait)
  112. {
  113. size_t size;
  114. int rc;
  115. /*
  116. * In virtio time-travel mode, we're handling all the vhost-user
  117. * FDs by polling them whenever appropriate. However, we may get
  118. * into a situation where we're sending out an interrupt message
  119. * to a device (e.g. a net device) and need to handle a simulation
  120. * time message while doing so, e.g. one that tells us to update
  121. * our idea of how long we can run without scheduling.
  122. *
  123. * Thus, we need to not just read() from the given fd, but need
  124. * to also handle messages for the simulation time - this function
  125. * does that for us while waiting for the given fd to be readable.
  126. */
  127. if (wait)
  128. time_travel_wait_readable(fd);
  129. rc = vhost_user_recv_header(fd, msg);
  130. if (rc)
  131. return rc;
  132. size = msg->header.size;
  133. if (size > max_payload_size)
  134. return -EPROTO;
  135. return full_read(fd, &msg->payload, size, false);
  136. }
  137. static void vhost_user_check_reset(struct virtio_uml_device *vu_dev,
  138. int rc)
  139. {
  140. struct virtio_uml_platform_data *pdata = vu_dev->pdata;
  141. if (rc != -ECONNRESET)
  142. return;
  143. if (!vu_dev->registered)
  144. return;
  145. vu_dev->registered = 0;
  146. schedule_work(&pdata->conn_broken_wk);
  147. }
  148. static int vhost_user_recv_resp(struct virtio_uml_device *vu_dev,
  149. struct vhost_user_msg *msg,
  150. size_t max_payload_size)
  151. {
  152. int rc = vhost_user_recv(vu_dev, vu_dev->sock, msg,
  153. max_payload_size, true);
  154. if (rc) {
  155. vhost_user_check_reset(vu_dev, rc);
  156. return rc;
  157. }
  158. if (msg->header.flags != (VHOST_USER_FLAG_REPLY | VHOST_USER_VERSION))
  159. return -EPROTO;
  160. return 0;
  161. }
  162. static int vhost_user_recv_u64(struct virtio_uml_device *vu_dev,
  163. u64 *value)
  164. {
  165. struct vhost_user_msg msg;
  166. int rc = vhost_user_recv_resp(vu_dev, &msg,
  167. sizeof(msg.payload.integer));
  168. if (rc)
  169. return rc;
  170. if (msg.header.size != sizeof(msg.payload.integer))
  171. return -EPROTO;
  172. *value = msg.payload.integer;
  173. return 0;
  174. }
  175. static int vhost_user_recv_req(struct virtio_uml_device *vu_dev,
  176. struct vhost_user_msg *msg,
  177. size_t max_payload_size)
  178. {
  179. int rc = vhost_user_recv(vu_dev, vu_dev->req_fd, msg,
  180. max_payload_size, false);
  181. if (rc)
  182. return rc;
  183. if ((msg->header.flags & ~VHOST_USER_FLAG_NEED_REPLY) !=
  184. VHOST_USER_VERSION)
  185. return -EPROTO;
  186. return 0;
  187. }
  188. static int vhost_user_send(struct virtio_uml_device *vu_dev,
  189. bool need_response, struct vhost_user_msg *msg,
  190. int *fds, size_t num_fds)
  191. {
  192. size_t size = sizeof(msg->header) + msg->header.size;
  193. unsigned long flags;
  194. bool request_ack;
  195. int rc;
  196. msg->header.flags |= VHOST_USER_VERSION;
  197. /*
  198. * The need_response flag indicates that we already need a response,
  199. * e.g. to read the features. In these cases, don't request an ACK as
  200. * it is meaningless. Also request an ACK only if supported.
  201. */
  202. request_ack = !need_response;
  203. if (!(vu_dev->protocol_features &
  204. BIT_ULL(VHOST_USER_PROTOCOL_F_REPLY_ACK)))
  205. request_ack = false;
  206. if (request_ack)
  207. msg->header.flags |= VHOST_USER_FLAG_NEED_REPLY;
  208. spin_lock_irqsave(&vu_dev->sock_lock, flags);
  209. rc = full_sendmsg_fds(vu_dev->sock, msg, size, fds, num_fds);
  210. if (rc < 0)
  211. goto out;
  212. if (request_ack) {
  213. uint64_t status;
  214. rc = vhost_user_recv_u64(vu_dev, &status);
  215. if (rc)
  216. goto out;
  217. if (status) {
  218. vu_err(vu_dev, "slave reports error: %llu\n", status);
  219. rc = -EIO;
  220. goto out;
  221. }
  222. }
  223. out:
  224. spin_unlock_irqrestore(&vu_dev->sock_lock, flags);
  225. return rc;
  226. }
  227. static int vhost_user_send_no_payload(struct virtio_uml_device *vu_dev,
  228. bool need_response, u32 request)
  229. {
  230. struct vhost_user_msg msg = {
  231. .header.request = request,
  232. };
  233. return vhost_user_send(vu_dev, need_response, &msg, NULL, 0);
  234. }
  235. static int vhost_user_send_no_payload_fd(struct virtio_uml_device *vu_dev,
  236. u32 request, int fd)
  237. {
  238. struct vhost_user_msg msg = {
  239. .header.request = request,
  240. };
  241. return vhost_user_send(vu_dev, false, &msg, &fd, 1);
  242. }
  243. static int vhost_user_send_u64(struct virtio_uml_device *vu_dev,
  244. u32 request, u64 value)
  245. {
  246. struct vhost_user_msg msg = {
  247. .header.request = request,
  248. .header.size = sizeof(msg.payload.integer),
  249. .payload.integer = value,
  250. };
  251. return vhost_user_send(vu_dev, false, &msg, NULL, 0);
  252. }
  253. static int vhost_user_set_owner(struct virtio_uml_device *vu_dev)
  254. {
  255. return vhost_user_send_no_payload(vu_dev, false, VHOST_USER_SET_OWNER);
  256. }
  257. static int vhost_user_get_features(struct virtio_uml_device *vu_dev,
  258. u64 *features)
  259. {
  260. int rc = vhost_user_send_no_payload(vu_dev, true,
  261. VHOST_USER_GET_FEATURES);
  262. if (rc)
  263. return rc;
  264. return vhost_user_recv_u64(vu_dev, features);
  265. }
  266. static int vhost_user_set_features(struct virtio_uml_device *vu_dev,
  267. u64 features)
  268. {
  269. return vhost_user_send_u64(vu_dev, VHOST_USER_SET_FEATURES, features);
  270. }
  271. static int vhost_user_get_protocol_features(struct virtio_uml_device *vu_dev,
  272. u64 *protocol_features)
  273. {
  274. int rc = vhost_user_send_no_payload(vu_dev, true,
  275. VHOST_USER_GET_PROTOCOL_FEATURES);
  276. if (rc)
  277. return rc;
  278. return vhost_user_recv_u64(vu_dev, protocol_features);
  279. }
  280. static int vhost_user_set_protocol_features(struct virtio_uml_device *vu_dev,
  281. u64 protocol_features)
  282. {
  283. return vhost_user_send_u64(vu_dev, VHOST_USER_SET_PROTOCOL_FEATURES,
  284. protocol_features);
  285. }
  286. static void vhost_user_reply(struct virtio_uml_device *vu_dev,
  287. struct vhost_user_msg *msg, int response)
  288. {
  289. struct vhost_user_msg reply = {
  290. .payload.integer = response,
  291. };
  292. size_t size = sizeof(reply.header) + sizeof(reply.payload.integer);
  293. int rc;
  294. reply.header = msg->header;
  295. reply.header.flags &= ~VHOST_USER_FLAG_NEED_REPLY;
  296. reply.header.flags |= VHOST_USER_FLAG_REPLY;
  297. reply.header.size = sizeof(reply.payload.integer);
  298. rc = full_sendmsg_fds(vu_dev->req_fd, &reply, size, NULL, 0);
  299. if (rc)
  300. vu_err(vu_dev,
  301. "sending reply to slave request failed: %d (size %zu)\n",
  302. rc, size);
  303. }
  304. static irqreturn_t vu_req_read_message(struct virtio_uml_device *vu_dev,
  305. struct time_travel_event *ev)
  306. {
  307. struct virtqueue *vq;
  308. int response = 1;
  309. struct {
  310. struct vhost_user_msg msg;
  311. u8 extra_payload[512];
  312. } msg;
  313. int rc;
  314. irqreturn_t irq_rc = IRQ_NONE;
  315. while (1) {
  316. rc = vhost_user_recv_req(vu_dev, &msg.msg,
  317. sizeof(msg.msg.payload) +
  318. sizeof(msg.extra_payload));
  319. if (rc)
  320. break;
  321. switch (msg.msg.header.request) {
  322. case VHOST_USER_SLAVE_CONFIG_CHANGE_MSG:
  323. vu_dev->config_changed_irq = true;
  324. response = 0;
  325. break;
  326. case VHOST_USER_SLAVE_VRING_CALL:
  327. virtio_device_for_each_vq((&vu_dev->vdev), vq) {
  328. if (vq->index == msg.msg.payload.vring_state.index) {
  329. response = 0;
  330. vu_dev->vq_irq_vq_map |= BIT_ULL(vq->index);
  331. break;
  332. }
  333. }
  334. break;
  335. case VHOST_USER_SLAVE_IOTLB_MSG:
  336. /* not supported - VIRTIO_F_ACCESS_PLATFORM */
  337. case VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG:
  338. /* not supported - VHOST_USER_PROTOCOL_F_HOST_NOTIFIER */
  339. default:
  340. vu_err(vu_dev, "unexpected slave request %d\n",
  341. msg.msg.header.request);
  342. }
  343. if (ev && !vu_dev->suspended)
  344. time_travel_add_irq_event(ev);
  345. if (msg.msg.header.flags & VHOST_USER_FLAG_NEED_REPLY)
  346. vhost_user_reply(vu_dev, &msg.msg, response);
  347. irq_rc = IRQ_HANDLED;
  348. };
  349. /* mask EAGAIN as we try non-blocking read until socket is empty */
  350. vu_dev->recv_rc = (rc == -EAGAIN) ? 0 : rc;
  351. return irq_rc;
  352. }
  353. static irqreturn_t vu_req_interrupt(int irq, void *data)
  354. {
  355. struct virtio_uml_device *vu_dev = data;
  356. irqreturn_t ret = IRQ_HANDLED;
  357. if (!um_irq_timetravel_handler_used())
  358. ret = vu_req_read_message(vu_dev, NULL);
  359. if (vu_dev->recv_rc) {
  360. vhost_user_check_reset(vu_dev, vu_dev->recv_rc);
  361. } else if (vu_dev->vq_irq_vq_map) {
  362. struct virtqueue *vq;
  363. virtio_device_for_each_vq((&vu_dev->vdev), vq) {
  364. if (vu_dev->vq_irq_vq_map & BIT_ULL(vq->index))
  365. vring_interrupt(0 /* ignored */, vq);
  366. }
  367. vu_dev->vq_irq_vq_map = 0;
  368. } else if (vu_dev->config_changed_irq) {
  369. virtio_config_changed(&vu_dev->vdev);
  370. vu_dev->config_changed_irq = false;
  371. }
  372. return ret;
  373. }
  374. static void vu_req_interrupt_comm_handler(int irq, int fd, void *data,
  375. struct time_travel_event *ev)
  376. {
  377. vu_req_read_message(data, ev);
  378. }
  379. static int vhost_user_init_slave_req(struct virtio_uml_device *vu_dev)
  380. {
  381. int rc, req_fds[2];
  382. /* Use a pipe for slave req fd, SIGIO is not supported for eventfd */
  383. rc = os_pipe(req_fds, true, true);
  384. if (rc < 0)
  385. return rc;
  386. vu_dev->req_fd = req_fds[0];
  387. rc = um_request_irq_tt(UM_IRQ_ALLOC, vu_dev->req_fd, IRQ_READ,
  388. vu_req_interrupt, IRQF_SHARED,
  389. vu_dev->pdev->name, vu_dev,
  390. vu_req_interrupt_comm_handler);
  391. if (rc < 0)
  392. goto err_close;
  393. vu_dev->irq = rc;
  394. rc = vhost_user_send_no_payload_fd(vu_dev, VHOST_USER_SET_SLAVE_REQ_FD,
  395. req_fds[1]);
  396. if (rc)
  397. goto err_free_irq;
  398. goto out;
  399. err_free_irq:
  400. um_free_irq(vu_dev->irq, vu_dev);
  401. err_close:
  402. os_close_file(req_fds[0]);
  403. out:
  404. /* Close unused write end of request fds */
  405. os_close_file(req_fds[1]);
  406. return rc;
  407. }
  408. static int vhost_user_init(struct virtio_uml_device *vu_dev)
  409. {
  410. int rc = vhost_user_set_owner(vu_dev);
  411. if (rc)
  412. return rc;
  413. rc = vhost_user_get_features(vu_dev, &vu_dev->features);
  414. if (rc)
  415. return rc;
  416. if (vu_dev->features & BIT_ULL(VHOST_USER_F_PROTOCOL_FEATURES)) {
  417. rc = vhost_user_get_protocol_features(vu_dev,
  418. &vu_dev->protocol_features);
  419. if (rc)
  420. return rc;
  421. vu_dev->protocol_features &= VHOST_USER_SUPPORTED_PROTOCOL_F;
  422. rc = vhost_user_set_protocol_features(vu_dev,
  423. vu_dev->protocol_features);
  424. if (rc)
  425. return rc;
  426. }
  427. if (vu_dev->protocol_features &
  428. BIT_ULL(VHOST_USER_PROTOCOL_F_SLAVE_REQ)) {
  429. rc = vhost_user_init_slave_req(vu_dev);
  430. if (rc)
  431. return rc;
  432. }
  433. return 0;
  434. }
  435. static void vhost_user_get_config(struct virtio_uml_device *vu_dev,
  436. u32 offset, void *buf, u32 len)
  437. {
  438. u32 cfg_size = offset + len;
  439. struct vhost_user_msg *msg;
  440. size_t payload_size = sizeof(msg->payload.config) + cfg_size;
  441. size_t msg_size = sizeof(msg->header) + payload_size;
  442. int rc;
  443. if (!(vu_dev->protocol_features &
  444. BIT_ULL(VHOST_USER_PROTOCOL_F_CONFIG)))
  445. return;
  446. msg = kzalloc(msg_size, GFP_KERNEL);
  447. if (!msg)
  448. return;
  449. msg->header.request = VHOST_USER_GET_CONFIG;
  450. msg->header.size = payload_size;
  451. msg->payload.config.offset = 0;
  452. msg->payload.config.size = cfg_size;
  453. rc = vhost_user_send(vu_dev, true, msg, NULL, 0);
  454. if (rc) {
  455. vu_err(vu_dev, "sending VHOST_USER_GET_CONFIG failed: %d\n",
  456. rc);
  457. goto free;
  458. }
  459. rc = vhost_user_recv_resp(vu_dev, msg, msg_size);
  460. if (rc) {
  461. vu_err(vu_dev,
  462. "receiving VHOST_USER_GET_CONFIG response failed: %d\n",
  463. rc);
  464. goto free;
  465. }
  466. if (msg->header.size != payload_size ||
  467. msg->payload.config.size != cfg_size) {
  468. rc = -EPROTO;
  469. vu_err(vu_dev,
  470. "Invalid VHOST_USER_GET_CONFIG sizes (payload %d expected %zu, config %u expected %u)\n",
  471. msg->header.size, payload_size,
  472. msg->payload.config.size, cfg_size);
  473. goto free;
  474. }
  475. memcpy(buf, msg->payload.config.payload + offset, len);
  476. free:
  477. kfree(msg);
  478. }
  479. static void vhost_user_set_config(struct virtio_uml_device *vu_dev,
  480. u32 offset, const void *buf, u32 len)
  481. {
  482. struct vhost_user_msg *msg;
  483. size_t payload_size = sizeof(msg->payload.config) + len;
  484. size_t msg_size = sizeof(msg->header) + payload_size;
  485. int rc;
  486. if (!(vu_dev->protocol_features &
  487. BIT_ULL(VHOST_USER_PROTOCOL_F_CONFIG)))
  488. return;
  489. msg = kzalloc(msg_size, GFP_KERNEL);
  490. if (!msg)
  491. return;
  492. msg->header.request = VHOST_USER_SET_CONFIG;
  493. msg->header.size = payload_size;
  494. msg->payload.config.offset = offset;
  495. msg->payload.config.size = len;
  496. memcpy(msg->payload.config.payload, buf, len);
  497. rc = vhost_user_send(vu_dev, false, msg, NULL, 0);
  498. if (rc)
  499. vu_err(vu_dev, "sending VHOST_USER_SET_CONFIG failed: %d\n",
  500. rc);
  501. kfree(msg);
  502. }
  503. static int vhost_user_init_mem_region(u64 addr, u64 size, int *fd_out,
  504. struct vhost_user_mem_region *region_out)
  505. {
  506. unsigned long long mem_offset;
  507. int rc = phys_mapping(addr, &mem_offset);
  508. if (WARN(rc < 0, "phys_mapping of 0x%llx returned %d\n", addr, rc))
  509. return -EFAULT;
  510. *fd_out = rc;
  511. region_out->guest_addr = addr;
  512. region_out->user_addr = addr;
  513. region_out->size = size;
  514. region_out->mmap_offset = mem_offset;
  515. /* Ensure mapping is valid for the entire region */
  516. rc = phys_mapping(addr + size - 1, &mem_offset);
  517. if (WARN(rc != *fd_out, "phys_mapping of 0x%llx failed: %d != %d\n",
  518. addr + size - 1, rc, *fd_out))
  519. return -EFAULT;
  520. return 0;
  521. }
  522. static int vhost_user_set_mem_table(struct virtio_uml_device *vu_dev)
  523. {
  524. struct vhost_user_msg msg = {
  525. .header.request = VHOST_USER_SET_MEM_TABLE,
  526. .header.size = sizeof(msg.payload.mem_regions),
  527. .payload.mem_regions.num = 1,
  528. };
  529. unsigned long reserved = uml_reserved - uml_physmem;
  530. int fds[2];
  531. int rc;
  532. /*
  533. * This is a bit tricky, see also the comment with setup_physmem().
  534. *
  535. * Essentially, setup_physmem() uses a file to mmap() our physmem,
  536. * but the code and data we *already* have is omitted. To us, this
  537. * is no difference, since they both become part of our address
  538. * space and memory consumption. To somebody looking in from the
  539. * outside, however, it is different because the part of our memory
  540. * consumption that's already part of the binary (code/data) is not
  541. * mapped from the file, so it's not visible to another mmap from
  542. * the file descriptor.
  543. *
  544. * Thus, don't advertise this space to the vhost-user slave. This
  545. * means that the slave will likely abort or similar when we give
  546. * it an address from the hidden range, since it's not marked as
  547. * a valid address, but at least that way we detect the issue and
  548. * don't just have the slave read an all-zeroes buffer from the
  549. * shared memory file, or write something there that we can never
  550. * see (depending on the direction of the virtqueue traffic.)
  551. *
  552. * Since we usually don't want to use .text for virtio buffers,
  553. * this effectively means that you cannot use
  554. * 1) global variables, which are in the .bss and not in the shm
  555. * file-backed memory
  556. * 2) the stack in some processes, depending on where they have
  557. * their stack (or maybe only no interrupt stack?)
  558. *
  559. * The stack is already not typically valid for DMA, so this isn't
  560. * much of a restriction, but global variables might be encountered.
  561. *
  562. * It might be possible to fix it by copying around the data that's
  563. * between bss_start and where we map the file now, but it's not
  564. * something that you typically encounter with virtio drivers, so
  565. * it didn't seem worthwhile.
  566. */
  567. rc = vhost_user_init_mem_region(reserved, physmem_size - reserved,
  568. &fds[0],
  569. &msg.payload.mem_regions.regions[0]);
  570. if (rc < 0)
  571. return rc;
  572. if (highmem) {
  573. msg.payload.mem_regions.num++;
  574. rc = vhost_user_init_mem_region(__pa(end_iomem), highmem,
  575. &fds[1], &msg.payload.mem_regions.regions[1]);
  576. if (rc < 0)
  577. return rc;
  578. }
  579. return vhost_user_send(vu_dev, false, &msg, fds,
  580. msg.payload.mem_regions.num);
  581. }
  582. static int vhost_user_set_vring_state(struct virtio_uml_device *vu_dev,
  583. u32 request, u32 index, u32 num)
  584. {
  585. struct vhost_user_msg msg = {
  586. .header.request = request,
  587. .header.size = sizeof(msg.payload.vring_state),
  588. .payload.vring_state.index = index,
  589. .payload.vring_state.num = num,
  590. };
  591. return vhost_user_send(vu_dev, false, &msg, NULL, 0);
  592. }
  593. static int vhost_user_set_vring_num(struct virtio_uml_device *vu_dev,
  594. u32 index, u32 num)
  595. {
  596. return vhost_user_set_vring_state(vu_dev, VHOST_USER_SET_VRING_NUM,
  597. index, num);
  598. }
  599. static int vhost_user_set_vring_base(struct virtio_uml_device *vu_dev,
  600. u32 index, u32 offset)
  601. {
  602. return vhost_user_set_vring_state(vu_dev, VHOST_USER_SET_VRING_BASE,
  603. index, offset);
  604. }
  605. static int vhost_user_set_vring_addr(struct virtio_uml_device *vu_dev,
  606. u32 index, u64 desc, u64 used, u64 avail,
  607. u64 log)
  608. {
  609. struct vhost_user_msg msg = {
  610. .header.request = VHOST_USER_SET_VRING_ADDR,
  611. .header.size = sizeof(msg.payload.vring_addr),
  612. .payload.vring_addr.index = index,
  613. .payload.vring_addr.desc = desc,
  614. .payload.vring_addr.used = used,
  615. .payload.vring_addr.avail = avail,
  616. .payload.vring_addr.log = log,
  617. };
  618. return vhost_user_send(vu_dev, false, &msg, NULL, 0);
  619. }
  620. static int vhost_user_set_vring_fd(struct virtio_uml_device *vu_dev,
  621. u32 request, int index, int fd)
  622. {
  623. struct vhost_user_msg msg = {
  624. .header.request = request,
  625. .header.size = sizeof(msg.payload.integer),
  626. .payload.integer = index,
  627. };
  628. if (index & ~VHOST_USER_VRING_INDEX_MASK)
  629. return -EINVAL;
  630. if (fd < 0) {
  631. msg.payload.integer |= VHOST_USER_VRING_POLL_MASK;
  632. return vhost_user_send(vu_dev, false, &msg, NULL, 0);
  633. }
  634. return vhost_user_send(vu_dev, false, &msg, &fd, 1);
  635. }
  636. static int vhost_user_set_vring_call(struct virtio_uml_device *vu_dev,
  637. int index, int fd)
  638. {
  639. return vhost_user_set_vring_fd(vu_dev, VHOST_USER_SET_VRING_CALL,
  640. index, fd);
  641. }
  642. static int vhost_user_set_vring_kick(struct virtio_uml_device *vu_dev,
  643. int index, int fd)
  644. {
  645. return vhost_user_set_vring_fd(vu_dev, VHOST_USER_SET_VRING_KICK,
  646. index, fd);
  647. }
  648. static int vhost_user_set_vring_enable(struct virtio_uml_device *vu_dev,
  649. u32 index, bool enable)
  650. {
  651. if (!(vu_dev->features & BIT_ULL(VHOST_USER_F_PROTOCOL_FEATURES)))
  652. return 0;
  653. return vhost_user_set_vring_state(vu_dev, VHOST_USER_SET_VRING_ENABLE,
  654. index, enable);
  655. }
  656. /* Virtio interface */
  657. static bool vu_notify(struct virtqueue *vq)
  658. {
  659. struct virtio_uml_vq_info *info = vq->priv;
  660. const uint64_t n = 1;
  661. int rc;
  662. if (info->suspended)
  663. return true;
  664. time_travel_propagate_time();
  665. if (info->kick_fd < 0) {
  666. struct virtio_uml_device *vu_dev;
  667. vu_dev = to_virtio_uml_device(vq->vdev);
  668. return vhost_user_set_vring_state(vu_dev, VHOST_USER_VRING_KICK,
  669. vq->index, 0) == 0;
  670. }
  671. do {
  672. rc = os_write_file(info->kick_fd, &n, sizeof(n));
  673. } while (rc == -EINTR);
  674. return !WARN(rc != sizeof(n), "write returned %d\n", rc);
  675. }
  676. static irqreturn_t vu_interrupt(int irq, void *opaque)
  677. {
  678. struct virtqueue *vq = opaque;
  679. struct virtio_uml_vq_info *info = vq->priv;
  680. uint64_t n;
  681. int rc;
  682. irqreturn_t ret = IRQ_NONE;
  683. do {
  684. rc = os_read_file(info->call_fd, &n, sizeof(n));
  685. if (rc == sizeof(n))
  686. ret |= vring_interrupt(irq, vq);
  687. } while (rc == sizeof(n) || rc == -EINTR);
  688. WARN(rc != -EAGAIN, "read returned %d\n", rc);
  689. return ret;
  690. }
  691. static void vu_get(struct virtio_device *vdev, unsigned offset,
  692. void *buf, unsigned len)
  693. {
  694. struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
  695. vhost_user_get_config(vu_dev, offset, buf, len);
  696. }
  697. static void vu_set(struct virtio_device *vdev, unsigned offset,
  698. const void *buf, unsigned len)
  699. {
  700. struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
  701. vhost_user_set_config(vu_dev, offset, buf, len);
  702. }
  703. static u8 vu_get_status(struct virtio_device *vdev)
  704. {
  705. struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
  706. return vu_dev->status;
  707. }
  708. static void vu_set_status(struct virtio_device *vdev, u8 status)
  709. {
  710. struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
  711. vu_dev->status = status;
  712. }
  713. static void vu_reset(struct virtio_device *vdev)
  714. {
  715. struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
  716. vu_dev->status = 0;
  717. }
  718. static void vu_del_vq(struct virtqueue *vq)
  719. {
  720. struct virtio_uml_vq_info *info = vq->priv;
  721. if (info->call_fd >= 0) {
  722. struct virtio_uml_device *vu_dev;
  723. vu_dev = to_virtio_uml_device(vq->vdev);
  724. um_free_irq(vu_dev->irq, vq);
  725. os_close_file(info->call_fd);
  726. }
  727. if (info->kick_fd >= 0)
  728. os_close_file(info->kick_fd);
  729. vring_del_virtqueue(vq);
  730. kfree(info);
  731. }
  732. static void vu_del_vqs(struct virtio_device *vdev)
  733. {
  734. struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
  735. struct virtqueue *vq, *n;
  736. u64 features;
  737. /* Note: reverse order as a workaround to a decoding bug in snabb */
  738. list_for_each_entry_reverse(vq, &vdev->vqs, list)
  739. WARN_ON(vhost_user_set_vring_enable(vu_dev, vq->index, false));
  740. /* Ensure previous messages have been processed */
  741. WARN_ON(vhost_user_get_features(vu_dev, &features));
  742. list_for_each_entry_safe(vq, n, &vdev->vqs, list)
  743. vu_del_vq(vq);
  744. }
  745. static int vu_setup_vq_call_fd(struct virtio_uml_device *vu_dev,
  746. struct virtqueue *vq)
  747. {
  748. struct virtio_uml_vq_info *info = vq->priv;
  749. int call_fds[2];
  750. int rc;
  751. /* no call FD needed/desired in this case */
  752. if (vu_dev->protocol_features &
  753. BIT_ULL(VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS) &&
  754. vu_dev->protocol_features &
  755. BIT_ULL(VHOST_USER_PROTOCOL_F_SLAVE_REQ)) {
  756. info->call_fd = -1;
  757. return 0;
  758. }
  759. /* Use a pipe for call fd, since SIGIO is not supported for eventfd */
  760. rc = os_pipe(call_fds, true, true);
  761. if (rc < 0)
  762. return rc;
  763. info->call_fd = call_fds[0];
  764. rc = um_request_irq(vu_dev->irq, info->call_fd, IRQ_READ,
  765. vu_interrupt, IRQF_SHARED, info->name, vq);
  766. if (rc < 0)
  767. goto close_both;
  768. rc = vhost_user_set_vring_call(vu_dev, vq->index, call_fds[1]);
  769. if (rc)
  770. goto release_irq;
  771. goto out;
  772. release_irq:
  773. um_free_irq(vu_dev->irq, vq);
  774. close_both:
  775. os_close_file(call_fds[0]);
  776. out:
  777. /* Close (unused) write end of call fds */
  778. os_close_file(call_fds[1]);
  779. return rc;
  780. }
  781. static struct virtqueue *vu_setup_vq(struct virtio_device *vdev,
  782. unsigned index, vq_callback_t *callback,
  783. const char *name, bool ctx)
  784. {
  785. struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
  786. struct platform_device *pdev = vu_dev->pdev;
  787. struct virtio_uml_vq_info *info;
  788. struct virtqueue *vq;
  789. int num = MAX_SUPPORTED_QUEUE_SIZE;
  790. int rc;
  791. info = kzalloc(sizeof(*info), GFP_KERNEL);
  792. if (!info) {
  793. rc = -ENOMEM;
  794. goto error_kzalloc;
  795. }
  796. snprintf(info->name, sizeof(info->name), "%s.%d-%s", pdev->name,
  797. pdev->id, name);
  798. vq = vring_create_virtqueue(index, num, PAGE_SIZE, vdev, true, true,
  799. ctx, vu_notify, callback, info->name);
  800. if (!vq) {
  801. rc = -ENOMEM;
  802. goto error_create;
  803. }
  804. vq->priv = info;
  805. vq->num_max = num;
  806. num = virtqueue_get_vring_size(vq);
  807. if (vu_dev->protocol_features &
  808. BIT_ULL(VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS)) {
  809. info->kick_fd = -1;
  810. } else {
  811. rc = os_eventfd(0, 0);
  812. if (rc < 0)
  813. goto error_kick;
  814. info->kick_fd = rc;
  815. }
  816. rc = vu_setup_vq_call_fd(vu_dev, vq);
  817. if (rc)
  818. goto error_call;
  819. rc = vhost_user_set_vring_num(vu_dev, index, num);
  820. if (rc)
  821. goto error_setup;
  822. rc = vhost_user_set_vring_base(vu_dev, index, 0);
  823. if (rc)
  824. goto error_setup;
  825. rc = vhost_user_set_vring_addr(vu_dev, index,
  826. virtqueue_get_desc_addr(vq),
  827. virtqueue_get_used_addr(vq),
  828. virtqueue_get_avail_addr(vq),
  829. (u64) -1);
  830. if (rc)
  831. goto error_setup;
  832. return vq;
  833. error_setup:
  834. if (info->call_fd >= 0) {
  835. um_free_irq(vu_dev->irq, vq);
  836. os_close_file(info->call_fd);
  837. }
  838. error_call:
  839. if (info->kick_fd >= 0)
  840. os_close_file(info->kick_fd);
  841. error_kick:
  842. vring_del_virtqueue(vq);
  843. error_create:
  844. kfree(info);
  845. error_kzalloc:
  846. return ERR_PTR(rc);
  847. }
  848. static int vu_find_vqs(struct virtio_device *vdev, unsigned nvqs,
  849. struct virtqueue *vqs[], vq_callback_t *callbacks[],
  850. const char * const names[], const bool *ctx,
  851. struct irq_affinity *desc)
  852. {
  853. struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
  854. int i, queue_idx = 0, rc;
  855. struct virtqueue *vq;
  856. /* not supported for now */
  857. if (WARN_ON(nvqs > 64))
  858. return -EINVAL;
  859. rc = vhost_user_set_mem_table(vu_dev);
  860. if (rc)
  861. return rc;
  862. for (i = 0; i < nvqs; ++i) {
  863. if (!names[i]) {
  864. vqs[i] = NULL;
  865. continue;
  866. }
  867. vqs[i] = vu_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
  868. ctx ? ctx[i] : false);
  869. if (IS_ERR(vqs[i])) {
  870. rc = PTR_ERR(vqs[i]);
  871. goto error_setup;
  872. }
  873. }
  874. list_for_each_entry(vq, &vdev->vqs, list) {
  875. struct virtio_uml_vq_info *info = vq->priv;
  876. if (info->kick_fd >= 0) {
  877. rc = vhost_user_set_vring_kick(vu_dev, vq->index,
  878. info->kick_fd);
  879. if (rc)
  880. goto error_setup;
  881. }
  882. rc = vhost_user_set_vring_enable(vu_dev, vq->index, true);
  883. if (rc)
  884. goto error_setup;
  885. }
  886. return 0;
  887. error_setup:
  888. vu_del_vqs(vdev);
  889. return rc;
  890. }
  891. static u64 vu_get_features(struct virtio_device *vdev)
  892. {
  893. struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
  894. return vu_dev->features;
  895. }
  896. static int vu_finalize_features(struct virtio_device *vdev)
  897. {
  898. struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
  899. u64 supported = vdev->features & VHOST_USER_SUPPORTED_F;
  900. vring_transport_features(vdev);
  901. vu_dev->features = vdev->features | supported;
  902. return vhost_user_set_features(vu_dev, vu_dev->features);
  903. }
  904. static const char *vu_bus_name(struct virtio_device *vdev)
  905. {
  906. struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
  907. return vu_dev->pdev->name;
  908. }
  909. static const struct virtio_config_ops virtio_uml_config_ops = {
  910. .get = vu_get,
  911. .set = vu_set,
  912. .get_status = vu_get_status,
  913. .set_status = vu_set_status,
  914. .reset = vu_reset,
  915. .find_vqs = vu_find_vqs,
  916. .del_vqs = vu_del_vqs,
  917. .get_features = vu_get_features,
  918. .finalize_features = vu_finalize_features,
  919. .bus_name = vu_bus_name,
  920. };
  921. static void virtio_uml_release_dev(struct device *d)
  922. {
  923. struct virtio_device *vdev =
  924. container_of(d, struct virtio_device, dev);
  925. struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
  926. time_travel_propagate_time();
  927. /* might not have been opened due to not negotiating the feature */
  928. if (vu_dev->req_fd >= 0) {
  929. um_free_irq(vu_dev->irq, vu_dev);
  930. os_close_file(vu_dev->req_fd);
  931. }
  932. os_close_file(vu_dev->sock);
  933. kfree(vu_dev);
  934. }
  935. void virtio_uml_set_no_vq_suspend(struct virtio_device *vdev,
  936. bool no_vq_suspend)
  937. {
  938. struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
  939. if (WARN_ON(vdev->config != &virtio_uml_config_ops))
  940. return;
  941. vu_dev->no_vq_suspend = no_vq_suspend;
  942. dev_info(&vdev->dev, "%sabled VQ suspend\n",
  943. no_vq_suspend ? "dis" : "en");
  944. }
  945. static void vu_of_conn_broken(struct work_struct *wk)
  946. {
  947. struct virtio_uml_platform_data *pdata;
  948. struct virtio_uml_device *vu_dev;
  949. pdata = container_of(wk, struct virtio_uml_platform_data, conn_broken_wk);
  950. vu_dev = platform_get_drvdata(pdata->pdev);
  951. virtio_break_device(&vu_dev->vdev);
  952. /*
  953. * We can't remove the device from the devicetree so the only thing we
  954. * can do is warn.
  955. */
  956. WARN_ON(1);
  957. }
  958. /* Platform device */
  959. static struct virtio_uml_platform_data *
  960. virtio_uml_create_pdata(struct platform_device *pdev)
  961. {
  962. struct device_node *np = pdev->dev.of_node;
  963. struct virtio_uml_platform_data *pdata;
  964. int ret;
  965. if (!np)
  966. return ERR_PTR(-EINVAL);
  967. pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
  968. if (!pdata)
  969. return ERR_PTR(-ENOMEM);
  970. INIT_WORK(&pdata->conn_broken_wk, vu_of_conn_broken);
  971. pdata->pdev = pdev;
  972. ret = of_property_read_string(np, "socket-path", &pdata->socket_path);
  973. if (ret)
  974. return ERR_PTR(ret);
  975. ret = of_property_read_u32(np, "virtio-device-id",
  976. &pdata->virtio_device_id);
  977. if (ret)
  978. return ERR_PTR(ret);
  979. return pdata;
  980. }
  981. static int virtio_uml_probe(struct platform_device *pdev)
  982. {
  983. struct virtio_uml_platform_data *pdata = pdev->dev.platform_data;
  984. struct virtio_uml_device *vu_dev;
  985. int rc;
  986. if (!pdata) {
  987. pdata = virtio_uml_create_pdata(pdev);
  988. if (IS_ERR(pdata))
  989. return PTR_ERR(pdata);
  990. }
  991. vu_dev = kzalloc(sizeof(*vu_dev), GFP_KERNEL);
  992. if (!vu_dev)
  993. return -ENOMEM;
  994. vu_dev->pdata = pdata;
  995. vu_dev->vdev.dev.parent = &pdev->dev;
  996. vu_dev->vdev.dev.release = virtio_uml_release_dev;
  997. vu_dev->vdev.config = &virtio_uml_config_ops;
  998. vu_dev->vdev.id.device = pdata->virtio_device_id;
  999. vu_dev->vdev.id.vendor = VIRTIO_DEV_ANY_ID;
  1000. vu_dev->pdev = pdev;
  1001. vu_dev->req_fd = -1;
  1002. time_travel_propagate_time();
  1003. do {
  1004. rc = os_connect_socket(pdata->socket_path);
  1005. } while (rc == -EINTR);
  1006. if (rc < 0)
  1007. goto error_free;
  1008. vu_dev->sock = rc;
  1009. spin_lock_init(&vu_dev->sock_lock);
  1010. rc = vhost_user_init(vu_dev);
  1011. if (rc)
  1012. goto error_init;
  1013. platform_set_drvdata(pdev, vu_dev);
  1014. device_set_wakeup_capable(&vu_dev->vdev.dev, true);
  1015. rc = register_virtio_device(&vu_dev->vdev);
  1016. if (rc)
  1017. put_device(&vu_dev->vdev.dev);
  1018. vu_dev->registered = 1;
  1019. return rc;
  1020. error_init:
  1021. os_close_file(vu_dev->sock);
  1022. error_free:
  1023. kfree(vu_dev);
  1024. return rc;
  1025. }
  1026. static int virtio_uml_remove(struct platform_device *pdev)
  1027. {
  1028. struct virtio_uml_device *vu_dev = platform_get_drvdata(pdev);
  1029. unregister_virtio_device(&vu_dev->vdev);
  1030. return 0;
  1031. }
  1032. /* Command line device list */
  1033. static void vu_cmdline_release_dev(struct device *d)
  1034. {
  1035. }
  1036. static struct device vu_cmdline_parent = {
  1037. .init_name = "virtio-uml-cmdline",
  1038. .release = vu_cmdline_release_dev,
  1039. };
  1040. static bool vu_cmdline_parent_registered;
  1041. static int vu_cmdline_id;
  1042. static int vu_unregister_cmdline_device(struct device *dev, void *data)
  1043. {
  1044. struct platform_device *pdev = to_platform_device(dev);
  1045. struct virtio_uml_platform_data *pdata = pdev->dev.platform_data;
  1046. kfree(pdata->socket_path);
  1047. platform_device_unregister(pdev);
  1048. return 0;
  1049. }
  1050. static void vu_conn_broken(struct work_struct *wk)
  1051. {
  1052. struct virtio_uml_platform_data *pdata;
  1053. struct virtio_uml_device *vu_dev;
  1054. pdata = container_of(wk, struct virtio_uml_platform_data, conn_broken_wk);
  1055. vu_dev = platform_get_drvdata(pdata->pdev);
  1056. virtio_break_device(&vu_dev->vdev);
  1057. vu_unregister_cmdline_device(&pdata->pdev->dev, NULL);
  1058. }
  1059. static int vu_cmdline_set(const char *device, const struct kernel_param *kp)
  1060. {
  1061. const char *ids = strchr(device, ':');
  1062. unsigned int virtio_device_id;
  1063. int processed, consumed, err;
  1064. char *socket_path;
  1065. struct virtio_uml_platform_data pdata, *ppdata;
  1066. struct platform_device *pdev;
  1067. if (!ids || ids == device)
  1068. return -EINVAL;
  1069. processed = sscanf(ids, ":%u%n:%d%n",
  1070. &virtio_device_id, &consumed,
  1071. &vu_cmdline_id, &consumed);
  1072. if (processed < 1 || ids[consumed])
  1073. return -EINVAL;
  1074. if (!vu_cmdline_parent_registered) {
  1075. err = device_register(&vu_cmdline_parent);
  1076. if (err) {
  1077. pr_err("Failed to register parent device!\n");
  1078. put_device(&vu_cmdline_parent);
  1079. return err;
  1080. }
  1081. vu_cmdline_parent_registered = true;
  1082. }
  1083. socket_path = kmemdup_nul(device, ids - device, GFP_KERNEL);
  1084. if (!socket_path)
  1085. return -ENOMEM;
  1086. pdata.virtio_device_id = (u32) virtio_device_id;
  1087. pdata.socket_path = socket_path;
  1088. pr_info("Registering device virtio-uml.%d id=%d at %s\n",
  1089. vu_cmdline_id, virtio_device_id, socket_path);
  1090. pdev = platform_device_register_data(&vu_cmdline_parent, "virtio-uml",
  1091. vu_cmdline_id++, &pdata,
  1092. sizeof(pdata));
  1093. err = PTR_ERR_OR_ZERO(pdev);
  1094. if (err)
  1095. goto free;
  1096. ppdata = pdev->dev.platform_data;
  1097. ppdata->pdev = pdev;
  1098. INIT_WORK(&ppdata->conn_broken_wk, vu_conn_broken);
  1099. return 0;
  1100. free:
  1101. kfree(socket_path);
  1102. return err;
  1103. }
  1104. static int vu_cmdline_get_device(struct device *dev, void *data)
  1105. {
  1106. struct platform_device *pdev = to_platform_device(dev);
  1107. struct virtio_uml_platform_data *pdata = pdev->dev.platform_data;
  1108. char *buffer = data;
  1109. unsigned int len = strlen(buffer);
  1110. snprintf(buffer + len, PAGE_SIZE - len, "%s:%d:%d\n",
  1111. pdata->socket_path, pdata->virtio_device_id, pdev->id);
  1112. return 0;
  1113. }
  1114. static int vu_cmdline_get(char *buffer, const struct kernel_param *kp)
  1115. {
  1116. buffer[0] = '\0';
  1117. if (vu_cmdline_parent_registered)
  1118. device_for_each_child(&vu_cmdline_parent, buffer,
  1119. vu_cmdline_get_device);
  1120. return strlen(buffer) + 1;
  1121. }
  1122. static const struct kernel_param_ops vu_cmdline_param_ops = {
  1123. .set = vu_cmdline_set,
  1124. .get = vu_cmdline_get,
  1125. };
  1126. device_param_cb(device, &vu_cmdline_param_ops, NULL, S_IRUSR);
  1127. __uml_help(vu_cmdline_param_ops,
  1128. "virtio_uml.device=<socket>:<virtio_id>[:<platform_id>]\n"
  1129. " Configure a virtio device over a vhost-user socket.\n"
  1130. " See virtio_ids.h for a list of possible virtio device id values.\n"
  1131. " Optionally use a specific platform_device id.\n\n"
  1132. );
  1133. static void vu_unregister_cmdline_devices(void)
  1134. {
  1135. if (vu_cmdline_parent_registered) {
  1136. device_for_each_child(&vu_cmdline_parent, NULL,
  1137. vu_unregister_cmdline_device);
  1138. device_unregister(&vu_cmdline_parent);
  1139. vu_cmdline_parent_registered = false;
  1140. }
  1141. }
  1142. /* Platform driver */
  1143. static const struct of_device_id virtio_uml_match[] = {
  1144. { .compatible = "virtio,uml", },
  1145. { }
  1146. };
  1147. MODULE_DEVICE_TABLE(of, virtio_uml_match);
  1148. static int virtio_uml_suspend(struct platform_device *pdev, pm_message_t state)
  1149. {
  1150. struct virtio_uml_device *vu_dev = platform_get_drvdata(pdev);
  1151. if (!vu_dev->no_vq_suspend) {
  1152. struct virtqueue *vq;
  1153. virtio_device_for_each_vq((&vu_dev->vdev), vq) {
  1154. struct virtio_uml_vq_info *info = vq->priv;
  1155. info->suspended = true;
  1156. vhost_user_set_vring_enable(vu_dev, vq->index, false);
  1157. }
  1158. }
  1159. if (!device_may_wakeup(&vu_dev->vdev.dev)) {
  1160. vu_dev->suspended = true;
  1161. return 0;
  1162. }
  1163. return irq_set_irq_wake(vu_dev->irq, 1);
  1164. }
  1165. static int virtio_uml_resume(struct platform_device *pdev)
  1166. {
  1167. struct virtio_uml_device *vu_dev = platform_get_drvdata(pdev);
  1168. if (!vu_dev->no_vq_suspend) {
  1169. struct virtqueue *vq;
  1170. virtio_device_for_each_vq((&vu_dev->vdev), vq) {
  1171. struct virtio_uml_vq_info *info = vq->priv;
  1172. info->suspended = false;
  1173. vhost_user_set_vring_enable(vu_dev, vq->index, true);
  1174. }
  1175. }
  1176. vu_dev->suspended = false;
  1177. if (!device_may_wakeup(&vu_dev->vdev.dev))
  1178. return 0;
  1179. return irq_set_irq_wake(vu_dev->irq, 0);
  1180. }
  1181. static struct platform_driver virtio_uml_driver = {
  1182. .probe = virtio_uml_probe,
  1183. .remove = virtio_uml_remove,
  1184. .driver = {
  1185. .name = "virtio-uml",
  1186. .of_match_table = virtio_uml_match,
  1187. },
  1188. .suspend = virtio_uml_suspend,
  1189. .resume = virtio_uml_resume,
  1190. };
  1191. static int __init virtio_uml_init(void)
  1192. {
  1193. return platform_driver_register(&virtio_uml_driver);
  1194. }
  1195. static void __exit virtio_uml_exit(void)
  1196. {
  1197. platform_driver_unregister(&virtio_uml_driver);
  1198. vu_unregister_cmdline_devices();
  1199. }
  1200. module_init(virtio_uml_init);
  1201. module_exit(virtio_uml_exit);
  1202. __uml_exitcall(virtio_uml_exit);
  1203. MODULE_DESCRIPTION("UML driver for vhost-user virtio devices");
  1204. MODULE_LICENSE("GPL");