xenbus.c 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Xenbus code for netif backend
  4. *
  5. * Copyright (C) 2005 Rusty Russell <[email protected]>
  6. * Copyright (C) 2005 XenSource Ltd
  7. */
  8. #include "common.h"
  9. #include <linux/vmalloc.h>
  10. #include <linux/rtnetlink.h>
  11. static int connect_data_rings(struct backend_info *be,
  12. struct xenvif_queue *queue);
  13. static void connect(struct backend_info *be);
  14. static int read_xenbus_vif_flags(struct backend_info *be);
  15. static int backend_create_xenvif(struct backend_info *be);
  16. static void unregister_hotplug_status_watch(struct backend_info *be);
  17. static void xen_unregister_watchers(struct xenvif *vif);
  18. static void set_backend_state(struct backend_info *be,
  19. enum xenbus_state state);
  20. #ifdef CONFIG_DEBUG_FS
  21. struct dentry *xen_netback_dbg_root = NULL;
  22. static int xenvif_read_io_ring(struct seq_file *m, void *v)
  23. {
  24. struct xenvif_queue *queue = m->private;
  25. struct xen_netif_tx_back_ring *tx_ring = &queue->tx;
  26. struct xen_netif_rx_back_ring *rx_ring = &queue->rx;
  27. struct netdev_queue *dev_queue;
  28. if (tx_ring->sring) {
  29. struct xen_netif_tx_sring *sring = tx_ring->sring;
  30. seq_printf(m, "Queue %d\nTX: nr_ents %u\n", queue->id,
  31. tx_ring->nr_ents);
  32. seq_printf(m, "req prod %u (%d) cons %u (%d) event %u (%d)\n",
  33. sring->req_prod,
  34. sring->req_prod - sring->rsp_prod,
  35. tx_ring->req_cons,
  36. tx_ring->req_cons - sring->rsp_prod,
  37. sring->req_event,
  38. sring->req_event - sring->rsp_prod);
  39. seq_printf(m, "rsp prod %u (base) pvt %u (%d) event %u (%d)\n",
  40. sring->rsp_prod,
  41. tx_ring->rsp_prod_pvt,
  42. tx_ring->rsp_prod_pvt - sring->rsp_prod,
  43. sring->rsp_event,
  44. sring->rsp_event - sring->rsp_prod);
  45. seq_printf(m, "pending prod %u pending cons %u nr_pending_reqs %u\n",
  46. queue->pending_prod,
  47. queue->pending_cons,
  48. nr_pending_reqs(queue));
  49. seq_printf(m, "dealloc prod %u dealloc cons %u dealloc_queue %u\n\n",
  50. queue->dealloc_prod,
  51. queue->dealloc_cons,
  52. queue->dealloc_prod - queue->dealloc_cons);
  53. }
  54. if (rx_ring->sring) {
  55. struct xen_netif_rx_sring *sring = rx_ring->sring;
  56. seq_printf(m, "RX: nr_ents %u\n", rx_ring->nr_ents);
  57. seq_printf(m, "req prod %u (%d) cons %u (%d) event %u (%d)\n",
  58. sring->req_prod,
  59. sring->req_prod - sring->rsp_prod,
  60. rx_ring->req_cons,
  61. rx_ring->req_cons - sring->rsp_prod,
  62. sring->req_event,
  63. sring->req_event - sring->rsp_prod);
  64. seq_printf(m, "rsp prod %u (base) pvt %u (%d) event %u (%d)\n\n",
  65. sring->rsp_prod,
  66. rx_ring->rsp_prod_pvt,
  67. rx_ring->rsp_prod_pvt - sring->rsp_prod,
  68. sring->rsp_event,
  69. sring->rsp_event - sring->rsp_prod);
  70. }
  71. seq_printf(m, "NAPI state: %lx NAPI weight: %d TX queue len %u\n"
  72. "Credit timer_pending: %d, credit: %lu, usec: %lu\n"
  73. "remaining: %lu, expires: %lu, now: %lu\n",
  74. queue->napi.state, queue->napi.weight,
  75. skb_queue_len(&queue->tx_queue),
  76. timer_pending(&queue->credit_timeout),
  77. queue->credit_bytes,
  78. queue->credit_usec,
  79. queue->remaining_credit,
  80. queue->credit_timeout.expires,
  81. jiffies);
  82. dev_queue = netdev_get_tx_queue(queue->vif->dev, queue->id);
  83. seq_printf(m, "\nRx internal queue: len %u max %u pkts %u %s\n",
  84. queue->rx_queue_len, queue->rx_queue_max,
  85. skb_queue_len(&queue->rx_queue),
  86. netif_tx_queue_stopped(dev_queue) ? "stopped" : "running");
  87. return 0;
  88. }
  89. #define XENVIF_KICK_STR "kick"
  90. #define BUFFER_SIZE 32
  91. static ssize_t
  92. xenvif_write_io_ring(struct file *filp, const char __user *buf, size_t count,
  93. loff_t *ppos)
  94. {
  95. struct xenvif_queue *queue =
  96. ((struct seq_file *)filp->private_data)->private;
  97. int len;
  98. char write[BUFFER_SIZE];
  99. /* don't allow partial writes and check the length */
  100. if (*ppos != 0)
  101. return 0;
  102. if (count >= sizeof(write))
  103. return -ENOSPC;
  104. len = simple_write_to_buffer(write,
  105. sizeof(write) - 1,
  106. ppos,
  107. buf,
  108. count);
  109. if (len < 0)
  110. return len;
  111. write[len] = '\0';
  112. if (!strncmp(write, XENVIF_KICK_STR, sizeof(XENVIF_KICK_STR) - 1))
  113. xenvif_interrupt(0, (void *)queue);
  114. else {
  115. pr_warn("Unknown command to io_ring_q%d. Available: kick\n",
  116. queue->id);
  117. count = -EINVAL;
  118. }
  119. return count;
  120. }
  121. static int xenvif_io_ring_open(struct inode *inode, struct file *filp)
  122. {
  123. int ret;
  124. void *queue = NULL;
  125. if (inode->i_private)
  126. queue = inode->i_private;
  127. ret = single_open(filp, xenvif_read_io_ring, queue);
  128. filp->f_mode |= FMODE_PWRITE;
  129. return ret;
  130. }
  131. static const struct file_operations xenvif_dbg_io_ring_ops_fops = {
  132. .owner = THIS_MODULE,
  133. .open = xenvif_io_ring_open,
  134. .read = seq_read,
  135. .llseek = seq_lseek,
  136. .release = single_release,
  137. .write = xenvif_write_io_ring,
  138. };
  139. static int xenvif_ctrl_show(struct seq_file *m, void *v)
  140. {
  141. struct xenvif *vif = m->private;
  142. xenvif_dump_hash_info(vif, m);
  143. return 0;
  144. }
  145. DEFINE_SHOW_ATTRIBUTE(xenvif_ctrl);
  146. static void xenvif_debugfs_addif(struct xenvif *vif)
  147. {
  148. int i;
  149. vif->xenvif_dbg_root = debugfs_create_dir(vif->dev->name,
  150. xen_netback_dbg_root);
  151. for (i = 0; i < vif->num_queues; ++i) {
  152. char filename[sizeof("io_ring_q") + 4];
  153. snprintf(filename, sizeof(filename), "io_ring_q%d", i);
  154. debugfs_create_file(filename, 0600, vif->xenvif_dbg_root,
  155. &vif->queues[i],
  156. &xenvif_dbg_io_ring_ops_fops);
  157. }
  158. if (vif->ctrl_irq)
  159. debugfs_create_file("ctrl", 0400, vif->xenvif_dbg_root, vif,
  160. &xenvif_ctrl_fops);
  161. }
  162. static void xenvif_debugfs_delif(struct xenvif *vif)
  163. {
  164. debugfs_remove_recursive(vif->xenvif_dbg_root);
  165. vif->xenvif_dbg_root = NULL;
  166. }
  167. #endif /* CONFIG_DEBUG_FS */
  168. /*
  169. * Handle the creation of the hotplug script environment. We add the script
  170. * and vif variables to the environment, for the benefit of the vif-* hotplug
  171. * scripts.
  172. */
  173. static int netback_uevent(struct xenbus_device *xdev,
  174. struct kobj_uevent_env *env)
  175. {
  176. struct backend_info *be = dev_get_drvdata(&xdev->dev);
  177. if (!be)
  178. return 0;
  179. if (add_uevent_var(env, "script=%s", be->hotplug_script))
  180. return -ENOMEM;
  181. if (!be->vif)
  182. return 0;
  183. return add_uevent_var(env, "vif=%s", be->vif->dev->name);
  184. }
  185. static int backend_create_xenvif(struct backend_info *be)
  186. {
  187. int err;
  188. long handle;
  189. struct xenbus_device *dev = be->dev;
  190. struct xenvif *vif;
  191. if (be->vif != NULL)
  192. return 0;
  193. err = xenbus_scanf(XBT_NIL, dev->nodename, "handle", "%li", &handle);
  194. if (err != 1) {
  195. xenbus_dev_fatal(dev, err, "reading handle");
  196. return (err < 0) ? err : -EINVAL;
  197. }
  198. vif = xenvif_alloc(&dev->dev, dev->otherend_id, handle);
  199. if (IS_ERR(vif)) {
  200. err = PTR_ERR(vif);
  201. xenbus_dev_fatal(dev, err, "creating interface");
  202. return err;
  203. }
  204. be->vif = vif;
  205. vif->be = be;
  206. kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE);
  207. return 0;
  208. }
  209. static void backend_disconnect(struct backend_info *be)
  210. {
  211. struct xenvif *vif = be->vif;
  212. if (vif) {
  213. unsigned int num_queues = vif->num_queues;
  214. unsigned int queue_index;
  215. xen_unregister_watchers(vif);
  216. #ifdef CONFIG_DEBUG_FS
  217. xenvif_debugfs_delif(vif);
  218. #endif /* CONFIG_DEBUG_FS */
  219. xenvif_disconnect_data(vif);
  220. /* At this point some of the handlers may still be active
  221. * so we need to have additional synchronization here.
  222. */
  223. vif->num_queues = 0;
  224. synchronize_net();
  225. for (queue_index = 0; queue_index < num_queues; ++queue_index)
  226. xenvif_deinit_queue(&vif->queues[queue_index]);
  227. vfree(vif->queues);
  228. vif->queues = NULL;
  229. xenvif_disconnect_ctrl(vif);
  230. }
  231. }
  232. static void backend_connect(struct backend_info *be)
  233. {
  234. if (be->vif)
  235. connect(be);
  236. }
  237. static inline void backend_switch_state(struct backend_info *be,
  238. enum xenbus_state state)
  239. {
  240. struct xenbus_device *dev = be->dev;
  241. pr_debug("%s -> %s\n", dev->nodename, xenbus_strstate(state));
  242. be->state = state;
  243. /* If we are waiting for a hotplug script then defer the
  244. * actual xenbus state change.
  245. */
  246. if (!be->have_hotplug_status_watch)
  247. xenbus_switch_state(dev, state);
  248. }
  249. /* Handle backend state transitions:
  250. *
  251. * The backend state starts in Initialising and the following transitions are
  252. * allowed.
  253. *
  254. * Initialising -> InitWait -> Connected
  255. * \
  256. * \ ^ \ |
  257. * \ | \ |
  258. * \ | \ |
  259. * \ | \ |
  260. * \ | \ |
  261. * \ | \ |
  262. * V | V V
  263. *
  264. * Closed <-> Closing
  265. *
  266. * The state argument specifies the eventual state of the backend and the
  267. * function transitions to that state via the shortest path.
  268. */
  269. static void set_backend_state(struct backend_info *be,
  270. enum xenbus_state state)
  271. {
  272. while (be->state != state) {
  273. switch (be->state) {
  274. case XenbusStateInitialising:
  275. switch (state) {
  276. case XenbusStateInitWait:
  277. case XenbusStateConnected:
  278. case XenbusStateClosing:
  279. backend_switch_state(be, XenbusStateInitWait);
  280. break;
  281. case XenbusStateClosed:
  282. backend_switch_state(be, XenbusStateClosed);
  283. break;
  284. default:
  285. BUG();
  286. }
  287. break;
  288. case XenbusStateClosed:
  289. switch (state) {
  290. case XenbusStateInitWait:
  291. case XenbusStateConnected:
  292. backend_switch_state(be, XenbusStateInitWait);
  293. break;
  294. case XenbusStateClosing:
  295. backend_switch_state(be, XenbusStateClosing);
  296. break;
  297. default:
  298. BUG();
  299. }
  300. break;
  301. case XenbusStateInitWait:
  302. switch (state) {
  303. case XenbusStateConnected:
  304. backend_connect(be);
  305. backend_switch_state(be, XenbusStateConnected);
  306. break;
  307. case XenbusStateClosing:
  308. case XenbusStateClosed:
  309. backend_switch_state(be, XenbusStateClosing);
  310. break;
  311. default:
  312. BUG();
  313. }
  314. break;
  315. case XenbusStateConnected:
  316. switch (state) {
  317. case XenbusStateInitWait:
  318. case XenbusStateClosing:
  319. case XenbusStateClosed:
  320. backend_disconnect(be);
  321. backend_switch_state(be, XenbusStateClosing);
  322. break;
  323. default:
  324. BUG();
  325. }
  326. break;
  327. case XenbusStateClosing:
  328. switch (state) {
  329. case XenbusStateInitWait:
  330. case XenbusStateConnected:
  331. case XenbusStateClosed:
  332. backend_switch_state(be, XenbusStateClosed);
  333. break;
  334. default:
  335. BUG();
  336. }
  337. break;
  338. default:
  339. BUG();
  340. }
  341. }
  342. }
  343. static void read_xenbus_frontend_xdp(struct backend_info *be,
  344. struct xenbus_device *dev)
  345. {
  346. struct xenvif *vif = be->vif;
  347. u16 headroom;
  348. int err;
  349. err = xenbus_scanf(XBT_NIL, dev->otherend,
  350. "xdp-headroom", "%hu", &headroom);
  351. if (err != 1) {
  352. vif->xdp_headroom = 0;
  353. return;
  354. }
  355. if (headroom > XEN_NETIF_MAX_XDP_HEADROOM)
  356. headroom = XEN_NETIF_MAX_XDP_HEADROOM;
  357. vif->xdp_headroom = headroom;
  358. }
  359. /*
  360. * Callback received when the frontend's state changes.
  361. */
  362. static void frontend_changed(struct xenbus_device *dev,
  363. enum xenbus_state frontend_state)
  364. {
  365. struct backend_info *be = dev_get_drvdata(&dev->dev);
  366. pr_debug("%s -> %s\n", dev->otherend, xenbus_strstate(frontend_state));
  367. be->frontend_state = frontend_state;
  368. switch (frontend_state) {
  369. case XenbusStateInitialising:
  370. set_backend_state(be, XenbusStateInitWait);
  371. break;
  372. case XenbusStateInitialised:
  373. break;
  374. case XenbusStateConnected:
  375. set_backend_state(be, XenbusStateConnected);
  376. break;
  377. case XenbusStateReconfiguring:
  378. read_xenbus_frontend_xdp(be, dev);
  379. xenbus_switch_state(dev, XenbusStateReconfigured);
  380. break;
  381. case XenbusStateClosing:
  382. set_backend_state(be, XenbusStateClosing);
  383. break;
  384. case XenbusStateClosed:
  385. set_backend_state(be, XenbusStateClosed);
  386. if (xenbus_dev_is_online(dev))
  387. break;
  388. fallthrough; /* if not online */
  389. case XenbusStateUnknown:
  390. set_backend_state(be, XenbusStateClosed);
  391. device_unregister(&dev->dev);
  392. break;
  393. default:
  394. xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
  395. frontend_state);
  396. break;
  397. }
  398. }
  399. static void xen_net_read_rate(struct xenbus_device *dev,
  400. unsigned long *bytes, unsigned long *usec)
  401. {
  402. char *s, *e;
  403. unsigned long b, u;
  404. char *ratestr;
  405. /* Default to unlimited bandwidth. */
  406. *bytes = ~0UL;
  407. *usec = 0;
  408. ratestr = xenbus_read(XBT_NIL, dev->nodename, "rate", NULL);
  409. if (IS_ERR(ratestr))
  410. return;
  411. s = ratestr;
  412. b = simple_strtoul(s, &e, 10);
  413. if ((s == e) || (*e != ','))
  414. goto fail;
  415. s = e + 1;
  416. u = simple_strtoul(s, &e, 10);
  417. if ((s == e) || (*e != '\0'))
  418. goto fail;
  419. *bytes = b;
  420. *usec = u;
  421. kfree(ratestr);
  422. return;
  423. fail:
  424. pr_warn("Failed to parse network rate limit. Traffic unlimited.\n");
  425. kfree(ratestr);
  426. }
  427. static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
  428. {
  429. char *s, *e, *macstr;
  430. int i;
  431. macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
  432. if (IS_ERR(macstr))
  433. return PTR_ERR(macstr);
  434. for (i = 0; i < ETH_ALEN; i++) {
  435. mac[i] = simple_strtoul(s, &e, 16);
  436. if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
  437. kfree(macstr);
  438. return -ENOENT;
  439. }
  440. s = e+1;
  441. }
  442. kfree(macstr);
  443. return 0;
  444. }
  445. static void xen_net_rate_changed(struct xenbus_watch *watch,
  446. const char *path, const char *token)
  447. {
  448. struct xenvif *vif = container_of(watch, struct xenvif, credit_watch);
  449. struct xenbus_device *dev = xenvif_to_xenbus_device(vif);
  450. unsigned long credit_bytes;
  451. unsigned long credit_usec;
  452. unsigned int queue_index;
  453. xen_net_read_rate(dev, &credit_bytes, &credit_usec);
  454. for (queue_index = 0; queue_index < vif->num_queues; queue_index++) {
  455. struct xenvif_queue *queue = &vif->queues[queue_index];
  456. queue->credit_bytes = credit_bytes;
  457. queue->credit_usec = credit_usec;
  458. if (!mod_timer_pending(&queue->credit_timeout, jiffies) &&
  459. queue->remaining_credit > queue->credit_bytes) {
  460. queue->remaining_credit = queue->credit_bytes;
  461. }
  462. }
  463. }
  464. static int xen_register_credit_watch(struct xenbus_device *dev,
  465. struct xenvif *vif)
  466. {
  467. int err = 0;
  468. char *node;
  469. unsigned maxlen = strlen(dev->nodename) + sizeof("/rate");
  470. if (vif->credit_watch.node)
  471. return -EADDRINUSE;
  472. node = kmalloc(maxlen, GFP_KERNEL);
  473. if (!node)
  474. return -ENOMEM;
  475. snprintf(node, maxlen, "%s/rate", dev->nodename);
  476. vif->credit_watch.node = node;
  477. vif->credit_watch.will_handle = NULL;
  478. vif->credit_watch.callback = xen_net_rate_changed;
  479. err = register_xenbus_watch(&vif->credit_watch);
  480. if (err) {
  481. pr_err("Failed to set watcher %s\n", vif->credit_watch.node);
  482. kfree(node);
  483. vif->credit_watch.node = NULL;
  484. vif->credit_watch.will_handle = NULL;
  485. vif->credit_watch.callback = NULL;
  486. }
  487. return err;
  488. }
  489. static void xen_unregister_credit_watch(struct xenvif *vif)
  490. {
  491. if (vif->credit_watch.node) {
  492. unregister_xenbus_watch(&vif->credit_watch);
  493. kfree(vif->credit_watch.node);
  494. vif->credit_watch.node = NULL;
  495. }
  496. }
  497. static void xen_mcast_ctrl_changed(struct xenbus_watch *watch,
  498. const char *path, const char *token)
  499. {
  500. struct xenvif *vif = container_of(watch, struct xenvif,
  501. mcast_ctrl_watch);
  502. struct xenbus_device *dev = xenvif_to_xenbus_device(vif);
  503. vif->multicast_control = !!xenbus_read_unsigned(dev->otherend,
  504. "request-multicast-control", 0);
  505. }
  506. static int xen_register_mcast_ctrl_watch(struct xenbus_device *dev,
  507. struct xenvif *vif)
  508. {
  509. int err = 0;
  510. char *node;
  511. unsigned maxlen = strlen(dev->otherend) +
  512. sizeof("/request-multicast-control");
  513. if (vif->mcast_ctrl_watch.node) {
  514. pr_err_ratelimited("Watch is already registered\n");
  515. return -EADDRINUSE;
  516. }
  517. node = kmalloc(maxlen, GFP_KERNEL);
  518. if (!node) {
  519. pr_err("Failed to allocate memory for watch\n");
  520. return -ENOMEM;
  521. }
  522. snprintf(node, maxlen, "%s/request-multicast-control",
  523. dev->otherend);
  524. vif->mcast_ctrl_watch.node = node;
  525. vif->mcast_ctrl_watch.will_handle = NULL;
  526. vif->mcast_ctrl_watch.callback = xen_mcast_ctrl_changed;
  527. err = register_xenbus_watch(&vif->mcast_ctrl_watch);
  528. if (err) {
  529. pr_err("Failed to set watcher %s\n",
  530. vif->mcast_ctrl_watch.node);
  531. kfree(node);
  532. vif->mcast_ctrl_watch.node = NULL;
  533. vif->mcast_ctrl_watch.will_handle = NULL;
  534. vif->mcast_ctrl_watch.callback = NULL;
  535. }
  536. return err;
  537. }
  538. static void xen_unregister_mcast_ctrl_watch(struct xenvif *vif)
  539. {
  540. if (vif->mcast_ctrl_watch.node) {
  541. unregister_xenbus_watch(&vif->mcast_ctrl_watch);
  542. kfree(vif->mcast_ctrl_watch.node);
  543. vif->mcast_ctrl_watch.node = NULL;
  544. }
  545. }
  546. static void xen_register_watchers(struct xenbus_device *dev,
  547. struct xenvif *vif)
  548. {
  549. xen_register_credit_watch(dev, vif);
  550. xen_register_mcast_ctrl_watch(dev, vif);
  551. }
  552. static void xen_unregister_watchers(struct xenvif *vif)
  553. {
  554. xen_unregister_mcast_ctrl_watch(vif);
  555. xen_unregister_credit_watch(vif);
  556. }
  557. static void unregister_hotplug_status_watch(struct backend_info *be)
  558. {
  559. if (be->have_hotplug_status_watch) {
  560. unregister_xenbus_watch(&be->hotplug_status_watch);
  561. kfree(be->hotplug_status_watch.node);
  562. }
  563. be->have_hotplug_status_watch = 0;
  564. }
  565. static void hotplug_status_changed(struct xenbus_watch *watch,
  566. const char *path,
  567. const char *token)
  568. {
  569. struct backend_info *be = container_of(watch,
  570. struct backend_info,
  571. hotplug_status_watch);
  572. char *str;
  573. unsigned int len;
  574. str = xenbus_read(XBT_NIL, be->dev->nodename, "hotplug-status", &len);
  575. if (IS_ERR(str))
  576. return;
  577. if (len == sizeof("connected")-1 && !memcmp(str, "connected", len)) {
  578. /* Complete any pending state change */
  579. xenbus_switch_state(be->dev, be->state);
  580. /* Not interested in this watch anymore. */
  581. unregister_hotplug_status_watch(be);
  582. }
  583. kfree(str);
  584. }
  585. static int connect_ctrl_ring(struct backend_info *be)
  586. {
  587. struct xenbus_device *dev = be->dev;
  588. struct xenvif *vif = be->vif;
  589. unsigned int val;
  590. grant_ref_t ring_ref;
  591. unsigned int evtchn;
  592. int err;
  593. err = xenbus_scanf(XBT_NIL, dev->otherend,
  594. "ctrl-ring-ref", "%u", &val);
  595. if (err < 0)
  596. goto done; /* The frontend does not have a control ring */
  597. ring_ref = val;
  598. err = xenbus_scanf(XBT_NIL, dev->otherend,
  599. "event-channel-ctrl", "%u", &val);
  600. if (err < 0) {
  601. xenbus_dev_fatal(dev, err,
  602. "reading %s/event-channel-ctrl",
  603. dev->otherend);
  604. goto fail;
  605. }
  606. evtchn = val;
  607. err = xenvif_connect_ctrl(vif, ring_ref, evtchn);
  608. if (err) {
  609. xenbus_dev_fatal(dev, err,
  610. "mapping shared-frame %u port %u",
  611. ring_ref, evtchn);
  612. goto fail;
  613. }
  614. done:
  615. return 0;
  616. fail:
  617. return err;
  618. }
  619. static void connect(struct backend_info *be)
  620. {
  621. int err;
  622. struct xenbus_device *dev = be->dev;
  623. unsigned long credit_bytes, credit_usec;
  624. unsigned int queue_index;
  625. unsigned int requested_num_queues;
  626. struct xenvif_queue *queue;
  627. /* Check whether the frontend requested multiple queues
  628. * and read the number requested.
  629. */
  630. requested_num_queues = xenbus_read_unsigned(dev->otherend,
  631. "multi-queue-num-queues", 1);
  632. if (requested_num_queues > xenvif_max_queues) {
  633. /* buggy or malicious guest */
  634. xenbus_dev_fatal(dev, -EINVAL,
  635. "guest requested %u queues, exceeding the maximum of %u.",
  636. requested_num_queues, xenvif_max_queues);
  637. return;
  638. }
  639. err = xen_net_read_mac(dev, be->vif->fe_dev_addr);
  640. if (err) {
  641. xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
  642. return;
  643. }
  644. xen_net_read_rate(dev, &credit_bytes, &credit_usec);
  645. xen_unregister_watchers(be->vif);
  646. xen_register_watchers(dev, be->vif);
  647. read_xenbus_vif_flags(be);
  648. err = connect_ctrl_ring(be);
  649. if (err) {
  650. xenbus_dev_fatal(dev, err, "connecting control ring");
  651. return;
  652. }
  653. /* Use the number of queues requested by the frontend */
  654. be->vif->queues = vzalloc(array_size(requested_num_queues,
  655. sizeof(struct xenvif_queue)));
  656. if (!be->vif->queues) {
  657. xenbus_dev_fatal(dev, -ENOMEM,
  658. "allocating queues");
  659. return;
  660. }
  661. be->vif->num_queues = requested_num_queues;
  662. be->vif->stalled_queues = requested_num_queues;
  663. for (queue_index = 0; queue_index < requested_num_queues; ++queue_index) {
  664. queue = &be->vif->queues[queue_index];
  665. queue->vif = be->vif;
  666. queue->id = queue_index;
  667. snprintf(queue->name, sizeof(queue->name), "%s-q%u",
  668. be->vif->dev->name, queue->id);
  669. err = xenvif_init_queue(queue);
  670. if (err) {
  671. /* xenvif_init_queue() cleans up after itself on
  672. * failure, but we need to clean up any previously
  673. * initialised queues. Set num_queues to i so that
  674. * earlier queues can be destroyed using the regular
  675. * disconnect logic.
  676. */
  677. be->vif->num_queues = queue_index;
  678. goto err;
  679. }
  680. queue->credit_bytes = credit_bytes;
  681. queue->remaining_credit = credit_bytes;
  682. queue->credit_usec = credit_usec;
  683. err = connect_data_rings(be, queue);
  684. if (err) {
  685. /* connect_data_rings() cleans up after itself on
  686. * failure, but we need to clean up after
  687. * xenvif_init_queue() here, and also clean up any
  688. * previously initialised queues.
  689. */
  690. xenvif_deinit_queue(queue);
  691. be->vif->num_queues = queue_index;
  692. goto err;
  693. }
  694. }
  695. #ifdef CONFIG_DEBUG_FS
  696. xenvif_debugfs_addif(be->vif);
  697. #endif /* CONFIG_DEBUG_FS */
  698. /* Initialisation completed, tell core driver the number of
  699. * active queues.
  700. */
  701. rtnl_lock();
  702. netif_set_real_num_tx_queues(be->vif->dev, requested_num_queues);
  703. netif_set_real_num_rx_queues(be->vif->dev, requested_num_queues);
  704. rtnl_unlock();
  705. xenvif_carrier_on(be->vif);
  706. unregister_hotplug_status_watch(be);
  707. err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, NULL,
  708. hotplug_status_changed,
  709. "%s/%s", dev->nodename, "hotplug-status");
  710. if (!err)
  711. be->have_hotplug_status_watch = 1;
  712. netif_tx_wake_all_queues(be->vif->dev);
  713. return;
  714. err:
  715. if (be->vif->num_queues > 0)
  716. xenvif_disconnect_data(be->vif); /* Clean up existing queues */
  717. for (queue_index = 0; queue_index < be->vif->num_queues; ++queue_index)
  718. xenvif_deinit_queue(&be->vif->queues[queue_index]);
  719. vfree(be->vif->queues);
  720. be->vif->queues = NULL;
  721. be->vif->num_queues = 0;
  722. xenvif_disconnect_ctrl(be->vif);
  723. return;
  724. }
  725. static int connect_data_rings(struct backend_info *be,
  726. struct xenvif_queue *queue)
  727. {
  728. struct xenbus_device *dev = be->dev;
  729. unsigned int num_queues = queue->vif->num_queues;
  730. unsigned long tx_ring_ref, rx_ring_ref;
  731. unsigned int tx_evtchn, rx_evtchn;
  732. int err;
  733. char *xspath;
  734. size_t xspathsize;
  735. const size_t xenstore_path_ext_size = 11; /* sufficient for "/queue-NNN" */
  736. /* If the frontend requested 1 queue, or we have fallen back
  737. * to single queue due to lack of frontend support for multi-
  738. * queue, expect the remaining XenStore keys in the toplevel
  739. * directory. Otherwise, expect them in a subdirectory called
  740. * queue-N.
  741. */
  742. if (num_queues == 1) {
  743. xspath = kstrdup(dev->otherend, GFP_KERNEL);
  744. if (!xspath) {
  745. xenbus_dev_fatal(dev, -ENOMEM,
  746. "reading ring references");
  747. return -ENOMEM;
  748. }
  749. } else {
  750. xspathsize = strlen(dev->otherend) + xenstore_path_ext_size;
  751. xspath = kzalloc(xspathsize, GFP_KERNEL);
  752. if (!xspath) {
  753. xenbus_dev_fatal(dev, -ENOMEM,
  754. "reading ring references");
  755. return -ENOMEM;
  756. }
  757. snprintf(xspath, xspathsize, "%s/queue-%u", dev->otherend,
  758. queue->id);
  759. }
  760. err = xenbus_gather(XBT_NIL, xspath,
  761. "tx-ring-ref", "%lu", &tx_ring_ref,
  762. "rx-ring-ref", "%lu", &rx_ring_ref, NULL);
  763. if (err) {
  764. xenbus_dev_fatal(dev, err,
  765. "reading %s/ring-ref",
  766. xspath);
  767. goto err;
  768. }
  769. /* Try split event channels first, then single event channel. */
  770. err = xenbus_gather(XBT_NIL, xspath,
  771. "event-channel-tx", "%u", &tx_evtchn,
  772. "event-channel-rx", "%u", &rx_evtchn, NULL);
  773. if (err < 0) {
  774. err = xenbus_scanf(XBT_NIL, xspath,
  775. "event-channel", "%u", &tx_evtchn);
  776. if (err < 0) {
  777. xenbus_dev_fatal(dev, err,
  778. "reading %s/event-channel(-tx/rx)",
  779. xspath);
  780. goto err;
  781. }
  782. rx_evtchn = tx_evtchn;
  783. }
  784. /* Map the shared frame, irq etc. */
  785. err = xenvif_connect_data(queue, tx_ring_ref, rx_ring_ref,
  786. tx_evtchn, rx_evtchn);
  787. if (err) {
  788. xenbus_dev_fatal(dev, err,
  789. "mapping shared-frames %lu/%lu port tx %u rx %u",
  790. tx_ring_ref, rx_ring_ref,
  791. tx_evtchn, rx_evtchn);
  792. goto err;
  793. }
  794. err = 0;
  795. err: /* Regular return falls through with err == 0 */
  796. kfree(xspath);
  797. return err;
  798. }
  799. static int read_xenbus_vif_flags(struct backend_info *be)
  800. {
  801. struct xenvif *vif = be->vif;
  802. struct xenbus_device *dev = be->dev;
  803. unsigned int rx_copy;
  804. int err;
  805. err = xenbus_scanf(XBT_NIL, dev->otherend, "request-rx-copy", "%u",
  806. &rx_copy);
  807. if (err == -ENOENT) {
  808. err = 0;
  809. rx_copy = 0;
  810. }
  811. if (err < 0) {
  812. xenbus_dev_fatal(dev, err, "reading %s/request-rx-copy",
  813. dev->otherend);
  814. return err;
  815. }
  816. if (!rx_copy)
  817. return -EOPNOTSUPP;
  818. if (!xenbus_read_unsigned(dev->otherend, "feature-rx-notify", 0)) {
  819. /* - Reduce drain timeout to poll more frequently for
  820. * Rx requests.
  821. * - Disable Rx stall detection.
  822. */
  823. be->vif->drain_timeout = msecs_to_jiffies(30);
  824. be->vif->stall_timeout = 0;
  825. }
  826. vif->can_sg = !!xenbus_read_unsigned(dev->otherend, "feature-sg", 0);
  827. vif->gso_mask = 0;
  828. if (xenbus_read_unsigned(dev->otherend, "feature-gso-tcpv4", 0))
  829. vif->gso_mask |= GSO_BIT(TCPV4);
  830. if (xenbus_read_unsigned(dev->otherend, "feature-gso-tcpv6", 0))
  831. vif->gso_mask |= GSO_BIT(TCPV6);
  832. vif->ip_csum = !xenbus_read_unsigned(dev->otherend,
  833. "feature-no-csum-offload", 0);
  834. vif->ipv6_csum = !!xenbus_read_unsigned(dev->otherend,
  835. "feature-ipv6-csum-offload", 0);
  836. read_xenbus_frontend_xdp(be, dev);
  837. return 0;
  838. }
  839. static int netback_remove(struct xenbus_device *dev)
  840. {
  841. struct backend_info *be = dev_get_drvdata(&dev->dev);
  842. unregister_hotplug_status_watch(be);
  843. xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status");
  844. if (be->vif) {
  845. kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
  846. backend_disconnect(be);
  847. xenvif_free(be->vif);
  848. be->vif = NULL;
  849. }
  850. kfree(be->hotplug_script);
  851. kfree(be);
  852. dev_set_drvdata(&dev->dev, NULL);
  853. return 0;
  854. }
  855. /*
  856. * Entry point to this code when a new device is created. Allocate the basic
  857. * structures and switch to InitWait.
  858. */
  859. static int netback_probe(struct xenbus_device *dev,
  860. const struct xenbus_device_id *id)
  861. {
  862. const char *message;
  863. struct xenbus_transaction xbt;
  864. int err;
  865. int sg;
  866. const char *script;
  867. struct backend_info *be = kzalloc(sizeof(*be), GFP_KERNEL);
  868. if (!be) {
  869. xenbus_dev_fatal(dev, -ENOMEM,
  870. "allocating backend structure");
  871. return -ENOMEM;
  872. }
  873. be->dev = dev;
  874. dev_set_drvdata(&dev->dev, be);
  875. sg = 1;
  876. do {
  877. err = xenbus_transaction_start(&xbt);
  878. if (err) {
  879. xenbus_dev_fatal(dev, err, "starting transaction");
  880. goto fail;
  881. }
  882. err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", sg);
  883. if (err) {
  884. message = "writing feature-sg";
  885. goto abort_transaction;
  886. }
  887. err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4",
  888. "%d", sg);
  889. if (err) {
  890. message = "writing feature-gso-tcpv4";
  891. goto abort_transaction;
  892. }
  893. err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv6",
  894. "%d", sg);
  895. if (err) {
  896. message = "writing feature-gso-tcpv6";
  897. goto abort_transaction;
  898. }
  899. /* We support partial checksum setup for IPv6 packets */
  900. err = xenbus_printf(xbt, dev->nodename,
  901. "feature-ipv6-csum-offload",
  902. "%d", 1);
  903. if (err) {
  904. message = "writing feature-ipv6-csum-offload";
  905. goto abort_transaction;
  906. }
  907. /* We support rx-copy path. */
  908. err = xenbus_printf(xbt, dev->nodename,
  909. "feature-rx-copy", "%d", 1);
  910. if (err) {
  911. message = "writing feature-rx-copy";
  912. goto abort_transaction;
  913. }
  914. /* we can adjust a headroom for netfront XDP processing */
  915. err = xenbus_printf(xbt, dev->nodename,
  916. "feature-xdp-headroom", "%d",
  917. provides_xdp_headroom);
  918. if (err) {
  919. message = "writing feature-xdp-headroom";
  920. goto abort_transaction;
  921. }
  922. /* We don't support rx-flip path (except old guests who
  923. * don't grok this feature flag).
  924. */
  925. err = xenbus_printf(xbt, dev->nodename,
  926. "feature-rx-flip", "%d", 0);
  927. if (err) {
  928. message = "writing feature-rx-flip";
  929. goto abort_transaction;
  930. }
  931. /* We support dynamic multicast-control. */
  932. err = xenbus_printf(xbt, dev->nodename,
  933. "feature-multicast-control", "%d", 1);
  934. if (err) {
  935. message = "writing feature-multicast-control";
  936. goto abort_transaction;
  937. }
  938. err = xenbus_printf(xbt, dev->nodename,
  939. "feature-dynamic-multicast-control",
  940. "%d", 1);
  941. if (err) {
  942. message = "writing feature-dynamic-multicast-control";
  943. goto abort_transaction;
  944. }
  945. err = xenbus_transaction_end(xbt, 0);
  946. } while (err == -EAGAIN);
  947. if (err) {
  948. xenbus_dev_fatal(dev, err, "completing transaction");
  949. goto fail;
  950. }
  951. /* Split event channels support, this is optional so it is not
  952. * put inside the above loop.
  953. */
  954. err = xenbus_printf(XBT_NIL, dev->nodename,
  955. "feature-split-event-channels",
  956. "%u", separate_tx_rx_irq);
  957. if (err)
  958. pr_debug("Error writing feature-split-event-channels\n");
  959. /* Multi-queue support: This is an optional feature. */
  960. err = xenbus_printf(XBT_NIL, dev->nodename,
  961. "multi-queue-max-queues", "%u", xenvif_max_queues);
  962. if (err)
  963. pr_debug("Error writing multi-queue-max-queues\n");
  964. err = xenbus_printf(XBT_NIL, dev->nodename,
  965. "feature-ctrl-ring",
  966. "%u", true);
  967. if (err)
  968. pr_debug("Error writing feature-ctrl-ring\n");
  969. backend_switch_state(be, XenbusStateInitWait);
  970. script = xenbus_read(XBT_NIL, dev->nodename, "script", NULL);
  971. if (IS_ERR(script)) {
  972. err = PTR_ERR(script);
  973. xenbus_dev_fatal(dev, err, "reading script");
  974. goto fail;
  975. }
  976. be->hotplug_script = script;
  977. /* This kicks hotplug scripts, so do it immediately. */
  978. err = backend_create_xenvif(be);
  979. if (err)
  980. goto fail;
  981. return 0;
  982. abort_transaction:
  983. xenbus_transaction_end(xbt, 1);
  984. xenbus_dev_fatal(dev, err, "%s", message);
  985. fail:
  986. pr_debug("failed\n");
  987. netback_remove(dev);
  988. return err;
  989. }
  990. static const struct xenbus_device_id netback_ids[] = {
  991. { "vif" },
  992. { "" }
  993. };
  994. static struct xenbus_driver netback_driver = {
  995. .ids = netback_ids,
  996. .probe = netback_probe,
  997. .remove = netback_remove,
  998. .uevent = netback_uevent,
  999. .otherend_changed = frontend_changed,
  1000. .allow_rebind = true,
  1001. };
  1002. int xenvif_xenbus_init(void)
  1003. {
  1004. return xenbus_register_backend(&netback_driver);
  1005. }
  1006. void xenvif_xenbus_fini(void)
  1007. {
  1008. return xenbus_unregister_driver(&netback_driver);
  1009. }