vdpa.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * vDPA bus.
  4. *
  5. * Copyright (c) 2020, Red Hat. All rights reserved.
  6. * Author: Jason Wang <[email protected]>
  7. *
  8. */
  9. #include <linux/module.h>
  10. #include <linux/idr.h>
  11. #include <linux/slab.h>
  12. #include <linux/vdpa.h>
  13. #include <uapi/linux/vdpa.h>
  14. #include <net/genetlink.h>
  15. #include <linux/mod_devicetable.h>
  16. #include <linux/virtio_ids.h>
  17. static LIST_HEAD(mdev_head);
  18. /* A global mutex that protects vdpa management device and device level operations. */
  19. static DECLARE_RWSEM(vdpa_dev_lock);
  20. static DEFINE_IDA(vdpa_index_ida);
  21. void vdpa_set_status(struct vdpa_device *vdev, u8 status)
  22. {
  23. down_write(&vdev->cf_lock);
  24. vdev->config->set_status(vdev, status);
  25. up_write(&vdev->cf_lock);
  26. }
  27. EXPORT_SYMBOL(vdpa_set_status);
  28. static struct genl_family vdpa_nl_family;
  29. static int vdpa_dev_probe(struct device *d)
  30. {
  31. struct vdpa_device *vdev = dev_to_vdpa(d);
  32. struct vdpa_driver *drv = drv_to_vdpa(vdev->dev.driver);
  33. const struct vdpa_config_ops *ops = vdev->config;
  34. u32 max_num, min_num = 1;
  35. int ret = 0;
  36. max_num = ops->get_vq_num_max(vdev);
  37. if (ops->get_vq_num_min)
  38. min_num = ops->get_vq_num_min(vdev);
  39. if (max_num < min_num)
  40. return -EINVAL;
  41. if (drv && drv->probe)
  42. ret = drv->probe(vdev);
  43. return ret;
  44. }
  45. static void vdpa_dev_remove(struct device *d)
  46. {
  47. struct vdpa_device *vdev = dev_to_vdpa(d);
  48. struct vdpa_driver *drv = drv_to_vdpa(vdev->dev.driver);
  49. if (drv && drv->remove)
  50. drv->remove(vdev);
  51. }
  52. static int vdpa_dev_match(struct device *dev, struct device_driver *drv)
  53. {
  54. struct vdpa_device *vdev = dev_to_vdpa(dev);
  55. /* Check override first, and if set, only use the named driver */
  56. if (vdev->driver_override)
  57. return strcmp(vdev->driver_override, drv->name) == 0;
  58. /* Currently devices must be supported by all vDPA bus drivers */
  59. return 1;
  60. }
  61. static ssize_t driver_override_store(struct device *dev,
  62. struct device_attribute *attr,
  63. const char *buf, size_t count)
  64. {
  65. struct vdpa_device *vdev = dev_to_vdpa(dev);
  66. int ret;
  67. ret = driver_set_override(dev, &vdev->driver_override, buf, count);
  68. if (ret)
  69. return ret;
  70. return count;
  71. }
  72. static ssize_t driver_override_show(struct device *dev,
  73. struct device_attribute *attr, char *buf)
  74. {
  75. struct vdpa_device *vdev = dev_to_vdpa(dev);
  76. ssize_t len;
  77. device_lock(dev);
  78. len = snprintf(buf, PAGE_SIZE, "%s\n", vdev->driver_override);
  79. device_unlock(dev);
  80. return len;
  81. }
  82. static DEVICE_ATTR_RW(driver_override);
  83. static struct attribute *vdpa_dev_attrs[] = {
  84. &dev_attr_driver_override.attr,
  85. NULL,
  86. };
  87. static const struct attribute_group vdpa_dev_group = {
  88. .attrs = vdpa_dev_attrs,
  89. };
  90. __ATTRIBUTE_GROUPS(vdpa_dev);
  91. static struct bus_type vdpa_bus = {
  92. .name = "vdpa",
  93. .dev_groups = vdpa_dev_groups,
  94. .match = vdpa_dev_match,
  95. .probe = vdpa_dev_probe,
  96. .remove = vdpa_dev_remove,
  97. };
  98. static void vdpa_release_dev(struct device *d)
  99. {
  100. struct vdpa_device *vdev = dev_to_vdpa(d);
  101. const struct vdpa_config_ops *ops = vdev->config;
  102. if (ops->free)
  103. ops->free(vdev);
  104. ida_simple_remove(&vdpa_index_ida, vdev->index);
  105. kfree(vdev->driver_override);
  106. kfree(vdev);
  107. }
  108. /**
  109. * __vdpa_alloc_device - allocate and initilaize a vDPA device
  110. * This allows driver to some prepartion after device is
  111. * initialized but before registered.
  112. * @parent: the parent device
  113. * @config: the bus operations that is supported by this device
  114. * @ngroups: number of groups supported by this device
  115. * @nas: number of address spaces supported by this device
  116. * @size: size of the parent structure that contains private data
  117. * @name: name of the vdpa device; optional.
  118. * @use_va: indicate whether virtual address must be used by this device
  119. *
  120. * Driver should use vdpa_alloc_device() wrapper macro instead of
  121. * using this directly.
  122. *
  123. * Return: Returns an error when parent/config/dma_dev is not set or fail to get
  124. * ida.
  125. */
  126. struct vdpa_device *__vdpa_alloc_device(struct device *parent,
  127. const struct vdpa_config_ops *config,
  128. unsigned int ngroups, unsigned int nas,
  129. size_t size, const char *name,
  130. bool use_va)
  131. {
  132. struct vdpa_device *vdev;
  133. int err = -EINVAL;
  134. if (!config)
  135. goto err;
  136. if (!!config->dma_map != !!config->dma_unmap)
  137. goto err;
  138. /* It should only work for the device that use on-chip IOMMU */
  139. if (use_va && !(config->dma_map || config->set_map))
  140. goto err;
  141. err = -ENOMEM;
  142. vdev = kzalloc(size, GFP_KERNEL);
  143. if (!vdev)
  144. goto err;
  145. err = ida_alloc(&vdpa_index_ida, GFP_KERNEL);
  146. if (err < 0)
  147. goto err_ida;
  148. vdev->dev.bus = &vdpa_bus;
  149. vdev->dev.parent = parent;
  150. vdev->dev.release = vdpa_release_dev;
  151. vdev->index = err;
  152. vdev->config = config;
  153. vdev->features_valid = false;
  154. vdev->use_va = use_va;
  155. vdev->ngroups = ngroups;
  156. vdev->nas = nas;
  157. if (name)
  158. err = dev_set_name(&vdev->dev, "%s", name);
  159. else
  160. err = dev_set_name(&vdev->dev, "vdpa%u", vdev->index);
  161. if (err)
  162. goto err_name;
  163. init_rwsem(&vdev->cf_lock);
  164. device_initialize(&vdev->dev);
  165. return vdev;
  166. err_name:
  167. ida_simple_remove(&vdpa_index_ida, vdev->index);
  168. err_ida:
  169. kfree(vdev);
  170. err:
  171. return ERR_PTR(err);
  172. }
  173. EXPORT_SYMBOL_GPL(__vdpa_alloc_device);
  174. static int vdpa_name_match(struct device *dev, const void *data)
  175. {
  176. struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev);
  177. return (strcmp(dev_name(&vdev->dev), data) == 0);
  178. }
  179. static int __vdpa_register_device(struct vdpa_device *vdev, u32 nvqs)
  180. {
  181. struct device *dev;
  182. vdev->nvqs = nvqs;
  183. lockdep_assert_held(&vdpa_dev_lock);
  184. dev = bus_find_device(&vdpa_bus, NULL, dev_name(&vdev->dev), vdpa_name_match);
  185. if (dev) {
  186. put_device(dev);
  187. return -EEXIST;
  188. }
  189. return device_add(&vdev->dev);
  190. }
  191. /**
  192. * _vdpa_register_device - register a vDPA device with vdpa lock held
  193. * Caller must have a succeed call of vdpa_alloc_device() before.
  194. * Caller must invoke this routine in the management device dev_add()
  195. * callback after setting up valid mgmtdev for this vdpa device.
  196. * @vdev: the vdpa device to be registered to vDPA bus
  197. * @nvqs: number of virtqueues supported by this device
  198. *
  199. * Return: Returns an error when fail to add device to vDPA bus
  200. */
  201. int _vdpa_register_device(struct vdpa_device *vdev, u32 nvqs)
  202. {
  203. if (!vdev->mdev)
  204. return -EINVAL;
  205. return __vdpa_register_device(vdev, nvqs);
  206. }
  207. EXPORT_SYMBOL_GPL(_vdpa_register_device);
  208. /**
  209. * vdpa_register_device - register a vDPA device
  210. * Callers must have a succeed call of vdpa_alloc_device() before.
  211. * @vdev: the vdpa device to be registered to vDPA bus
  212. * @nvqs: number of virtqueues supported by this device
  213. *
  214. * Return: Returns an error when fail to add to vDPA bus
  215. */
  216. int vdpa_register_device(struct vdpa_device *vdev, u32 nvqs)
  217. {
  218. int err;
  219. down_write(&vdpa_dev_lock);
  220. err = __vdpa_register_device(vdev, nvqs);
  221. up_write(&vdpa_dev_lock);
  222. return err;
  223. }
  224. EXPORT_SYMBOL_GPL(vdpa_register_device);
  225. /**
  226. * _vdpa_unregister_device - unregister a vDPA device
  227. * Caller must invoke this routine as part of management device dev_del()
  228. * callback.
  229. * @vdev: the vdpa device to be unregisted from vDPA bus
  230. */
  231. void _vdpa_unregister_device(struct vdpa_device *vdev)
  232. {
  233. lockdep_assert_held(&vdpa_dev_lock);
  234. WARN_ON(!vdev->mdev);
  235. device_unregister(&vdev->dev);
  236. }
  237. EXPORT_SYMBOL_GPL(_vdpa_unregister_device);
  238. /**
  239. * vdpa_unregister_device - unregister a vDPA device
  240. * @vdev: the vdpa device to be unregisted from vDPA bus
  241. */
  242. void vdpa_unregister_device(struct vdpa_device *vdev)
  243. {
  244. down_write(&vdpa_dev_lock);
  245. device_unregister(&vdev->dev);
  246. up_write(&vdpa_dev_lock);
  247. }
  248. EXPORT_SYMBOL_GPL(vdpa_unregister_device);
  249. /**
  250. * __vdpa_register_driver - register a vDPA device driver
  251. * @drv: the vdpa device driver to be registered
  252. * @owner: module owner of the driver
  253. *
  254. * Return: Returns an err when fail to do the registration
  255. */
  256. int __vdpa_register_driver(struct vdpa_driver *drv, struct module *owner)
  257. {
  258. drv->driver.bus = &vdpa_bus;
  259. drv->driver.owner = owner;
  260. return driver_register(&drv->driver);
  261. }
  262. EXPORT_SYMBOL_GPL(__vdpa_register_driver);
  263. /**
  264. * vdpa_unregister_driver - unregister a vDPA device driver
  265. * @drv: the vdpa device driver to be unregistered
  266. */
  267. void vdpa_unregister_driver(struct vdpa_driver *drv)
  268. {
  269. driver_unregister(&drv->driver);
  270. }
  271. EXPORT_SYMBOL_GPL(vdpa_unregister_driver);
  272. /**
  273. * vdpa_mgmtdev_register - register a vdpa management device
  274. *
  275. * @mdev: Pointer to vdpa management device
  276. * vdpa_mgmtdev_register() register a vdpa management device which supports
  277. * vdpa device management.
  278. * Return: Returns 0 on success or failure when required callback ops are not
  279. * initialized.
  280. */
  281. int vdpa_mgmtdev_register(struct vdpa_mgmt_dev *mdev)
  282. {
  283. if (!mdev->device || !mdev->ops || !mdev->ops->dev_add || !mdev->ops->dev_del)
  284. return -EINVAL;
  285. INIT_LIST_HEAD(&mdev->list);
  286. down_write(&vdpa_dev_lock);
  287. list_add_tail(&mdev->list, &mdev_head);
  288. up_write(&vdpa_dev_lock);
  289. return 0;
  290. }
  291. EXPORT_SYMBOL_GPL(vdpa_mgmtdev_register);
  292. static int vdpa_match_remove(struct device *dev, void *data)
  293. {
  294. struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev);
  295. struct vdpa_mgmt_dev *mdev = vdev->mdev;
  296. if (mdev == data)
  297. mdev->ops->dev_del(mdev, vdev);
  298. return 0;
  299. }
  300. void vdpa_mgmtdev_unregister(struct vdpa_mgmt_dev *mdev)
  301. {
  302. down_write(&vdpa_dev_lock);
  303. list_del(&mdev->list);
  304. /* Filter out all the entries belong to this management device and delete it. */
  305. bus_for_each_dev(&vdpa_bus, NULL, mdev, vdpa_match_remove);
  306. up_write(&vdpa_dev_lock);
  307. }
  308. EXPORT_SYMBOL_GPL(vdpa_mgmtdev_unregister);
  309. static void vdpa_get_config_unlocked(struct vdpa_device *vdev,
  310. unsigned int offset,
  311. void *buf, unsigned int len)
  312. {
  313. const struct vdpa_config_ops *ops = vdev->config;
  314. /*
  315. * Config accesses aren't supposed to trigger before features are set.
  316. * If it does happen we assume a legacy guest.
  317. */
  318. if (!vdev->features_valid)
  319. vdpa_set_features_unlocked(vdev, 0);
  320. ops->get_config(vdev, offset, buf, len);
  321. }
  322. /**
  323. * vdpa_get_config - Get one or more device configuration fields.
  324. * @vdev: vdpa device to operate on
  325. * @offset: starting byte offset of the field
  326. * @buf: buffer pointer to read to
  327. * @len: length of the configuration fields in bytes
  328. */
  329. void vdpa_get_config(struct vdpa_device *vdev, unsigned int offset,
  330. void *buf, unsigned int len)
  331. {
  332. down_read(&vdev->cf_lock);
  333. vdpa_get_config_unlocked(vdev, offset, buf, len);
  334. up_read(&vdev->cf_lock);
  335. }
  336. EXPORT_SYMBOL_GPL(vdpa_get_config);
  337. /**
  338. * vdpa_set_config - Set one or more device configuration fields.
  339. * @vdev: vdpa device to operate on
  340. * @offset: starting byte offset of the field
  341. * @buf: buffer pointer to read from
  342. * @length: length of the configuration fields in bytes
  343. */
  344. void vdpa_set_config(struct vdpa_device *vdev, unsigned int offset,
  345. const void *buf, unsigned int length)
  346. {
  347. down_write(&vdev->cf_lock);
  348. vdev->config->set_config(vdev, offset, buf, length);
  349. up_write(&vdev->cf_lock);
  350. }
  351. EXPORT_SYMBOL_GPL(vdpa_set_config);
  352. static bool mgmtdev_handle_match(const struct vdpa_mgmt_dev *mdev,
  353. const char *busname, const char *devname)
  354. {
  355. /* Bus name is optional for simulated management device, so ignore the
  356. * device with bus if bus attribute is provided.
  357. */
  358. if ((busname && !mdev->device->bus) || (!busname && mdev->device->bus))
  359. return false;
  360. if (!busname && strcmp(dev_name(mdev->device), devname) == 0)
  361. return true;
  362. if (busname && (strcmp(mdev->device->bus->name, busname) == 0) &&
  363. (strcmp(dev_name(mdev->device), devname) == 0))
  364. return true;
  365. return false;
  366. }
  367. static struct vdpa_mgmt_dev *vdpa_mgmtdev_get_from_attr(struct nlattr **attrs)
  368. {
  369. struct vdpa_mgmt_dev *mdev;
  370. const char *busname = NULL;
  371. const char *devname;
  372. if (!attrs[VDPA_ATTR_MGMTDEV_DEV_NAME])
  373. return ERR_PTR(-EINVAL);
  374. devname = nla_data(attrs[VDPA_ATTR_MGMTDEV_DEV_NAME]);
  375. if (attrs[VDPA_ATTR_MGMTDEV_BUS_NAME])
  376. busname = nla_data(attrs[VDPA_ATTR_MGMTDEV_BUS_NAME]);
  377. list_for_each_entry(mdev, &mdev_head, list) {
  378. if (mgmtdev_handle_match(mdev, busname, devname))
  379. return mdev;
  380. }
  381. return ERR_PTR(-ENODEV);
  382. }
  383. static int vdpa_nl_mgmtdev_handle_fill(struct sk_buff *msg, const struct vdpa_mgmt_dev *mdev)
  384. {
  385. if (mdev->device->bus &&
  386. nla_put_string(msg, VDPA_ATTR_MGMTDEV_BUS_NAME, mdev->device->bus->name))
  387. return -EMSGSIZE;
  388. if (nla_put_string(msg, VDPA_ATTR_MGMTDEV_DEV_NAME, dev_name(mdev->device)))
  389. return -EMSGSIZE;
  390. return 0;
  391. }
  392. static int vdpa_mgmtdev_fill(const struct vdpa_mgmt_dev *mdev, struct sk_buff *msg,
  393. u32 portid, u32 seq, int flags)
  394. {
  395. u64 supported_classes = 0;
  396. void *hdr;
  397. int i = 0;
  398. int err;
  399. hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags, VDPA_CMD_MGMTDEV_NEW);
  400. if (!hdr)
  401. return -EMSGSIZE;
  402. err = vdpa_nl_mgmtdev_handle_fill(msg, mdev);
  403. if (err)
  404. goto msg_err;
  405. while (mdev->id_table[i].device) {
  406. if (mdev->id_table[i].device <= 63)
  407. supported_classes |= BIT_ULL(mdev->id_table[i].device);
  408. i++;
  409. }
  410. if (nla_put_u64_64bit(msg, VDPA_ATTR_MGMTDEV_SUPPORTED_CLASSES,
  411. supported_classes, VDPA_ATTR_UNSPEC)) {
  412. err = -EMSGSIZE;
  413. goto msg_err;
  414. }
  415. if (nla_put_u32(msg, VDPA_ATTR_DEV_MGMTDEV_MAX_VQS,
  416. mdev->max_supported_vqs)) {
  417. err = -EMSGSIZE;
  418. goto msg_err;
  419. }
  420. if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_SUPPORTED_FEATURES,
  421. mdev->supported_features, VDPA_ATTR_PAD)) {
  422. err = -EMSGSIZE;
  423. goto msg_err;
  424. }
  425. genlmsg_end(msg, hdr);
  426. return 0;
  427. msg_err:
  428. genlmsg_cancel(msg, hdr);
  429. return err;
  430. }
  431. static int vdpa_nl_cmd_mgmtdev_get_doit(struct sk_buff *skb, struct genl_info *info)
  432. {
  433. struct vdpa_mgmt_dev *mdev;
  434. struct sk_buff *msg;
  435. int err;
  436. msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  437. if (!msg)
  438. return -ENOMEM;
  439. down_read(&vdpa_dev_lock);
  440. mdev = vdpa_mgmtdev_get_from_attr(info->attrs);
  441. if (IS_ERR(mdev)) {
  442. up_read(&vdpa_dev_lock);
  443. NL_SET_ERR_MSG_MOD(info->extack, "Fail to find the specified mgmt device");
  444. err = PTR_ERR(mdev);
  445. goto out;
  446. }
  447. err = vdpa_mgmtdev_fill(mdev, msg, info->snd_portid, info->snd_seq, 0);
  448. up_read(&vdpa_dev_lock);
  449. if (err)
  450. goto out;
  451. err = genlmsg_reply(msg, info);
  452. return err;
  453. out:
  454. nlmsg_free(msg);
  455. return err;
  456. }
  457. static int
  458. vdpa_nl_cmd_mgmtdev_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb)
  459. {
  460. struct vdpa_mgmt_dev *mdev;
  461. int start = cb->args[0];
  462. int idx = 0;
  463. int err;
  464. down_read(&vdpa_dev_lock);
  465. list_for_each_entry(mdev, &mdev_head, list) {
  466. if (idx < start) {
  467. idx++;
  468. continue;
  469. }
  470. err = vdpa_mgmtdev_fill(mdev, msg, NETLINK_CB(cb->skb).portid,
  471. cb->nlh->nlmsg_seq, NLM_F_MULTI);
  472. if (err)
  473. goto out;
  474. idx++;
  475. }
  476. out:
  477. up_read(&vdpa_dev_lock);
  478. cb->args[0] = idx;
  479. return msg->len;
  480. }
  481. #define VDPA_DEV_NET_ATTRS_MASK (BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR) | \
  482. BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MTU) | \
  483. BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP))
  484. static int vdpa_nl_cmd_dev_add_set_doit(struct sk_buff *skb, struct genl_info *info)
  485. {
  486. struct vdpa_dev_set_config config = {};
  487. struct nlattr **nl_attrs = info->attrs;
  488. struct vdpa_mgmt_dev *mdev;
  489. const u8 *macaddr;
  490. const char *name;
  491. int err = 0;
  492. if (!info->attrs[VDPA_ATTR_DEV_NAME])
  493. return -EINVAL;
  494. name = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
  495. if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MACADDR]) {
  496. macaddr = nla_data(nl_attrs[VDPA_ATTR_DEV_NET_CFG_MACADDR]);
  497. memcpy(config.net.mac, macaddr, sizeof(config.net.mac));
  498. config.mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR);
  499. }
  500. if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MTU]) {
  501. config.net.mtu =
  502. nla_get_u16(nl_attrs[VDPA_ATTR_DEV_NET_CFG_MTU]);
  503. config.mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MTU);
  504. }
  505. if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MAX_VQP]) {
  506. config.net.max_vq_pairs =
  507. nla_get_u16(nl_attrs[VDPA_ATTR_DEV_NET_CFG_MAX_VQP]);
  508. if (!config.net.max_vq_pairs) {
  509. NL_SET_ERR_MSG_MOD(info->extack,
  510. "At least one pair of VQs is required");
  511. return -EINVAL;
  512. }
  513. config.mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP);
  514. }
  515. if (nl_attrs[VDPA_ATTR_DEV_FEATURES]) {
  516. config.device_features =
  517. nla_get_u64(nl_attrs[VDPA_ATTR_DEV_FEATURES]);
  518. config.mask |= BIT_ULL(VDPA_ATTR_DEV_FEATURES);
  519. }
  520. /* Skip checking capability if user didn't prefer to configure any
  521. * device networking attributes. It is likely that user might have used
  522. * a device specific method to configure such attributes or using device
  523. * default attributes.
  524. */
  525. if ((config.mask & VDPA_DEV_NET_ATTRS_MASK) &&
  526. !netlink_capable(skb, CAP_NET_ADMIN))
  527. return -EPERM;
  528. down_write(&vdpa_dev_lock);
  529. mdev = vdpa_mgmtdev_get_from_attr(info->attrs);
  530. if (IS_ERR(mdev)) {
  531. NL_SET_ERR_MSG_MOD(info->extack, "Fail to find the specified management device");
  532. err = PTR_ERR(mdev);
  533. goto err;
  534. }
  535. if ((config.mask & mdev->config_attr_mask) != config.mask) {
  536. NL_SET_ERR_MSG_MOD(info->extack,
  537. "All provided attributes are not supported");
  538. err = -EOPNOTSUPP;
  539. goto err;
  540. }
  541. err = mdev->ops->dev_add(mdev, name, &config);
  542. err:
  543. up_write(&vdpa_dev_lock);
  544. return err;
  545. }
  546. static int vdpa_nl_cmd_dev_del_set_doit(struct sk_buff *skb, struct genl_info *info)
  547. {
  548. struct vdpa_mgmt_dev *mdev;
  549. struct vdpa_device *vdev;
  550. struct device *dev;
  551. const char *name;
  552. int err = 0;
  553. if (!info->attrs[VDPA_ATTR_DEV_NAME])
  554. return -EINVAL;
  555. name = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
  556. down_write(&vdpa_dev_lock);
  557. dev = bus_find_device(&vdpa_bus, NULL, name, vdpa_name_match);
  558. if (!dev) {
  559. NL_SET_ERR_MSG_MOD(info->extack, "device not found");
  560. err = -ENODEV;
  561. goto dev_err;
  562. }
  563. vdev = container_of(dev, struct vdpa_device, dev);
  564. if (!vdev->mdev) {
  565. NL_SET_ERR_MSG_MOD(info->extack, "Only user created device can be deleted by user");
  566. err = -EINVAL;
  567. goto mdev_err;
  568. }
  569. mdev = vdev->mdev;
  570. mdev->ops->dev_del(mdev, vdev);
  571. mdev_err:
  572. put_device(dev);
  573. dev_err:
  574. up_write(&vdpa_dev_lock);
  575. return err;
  576. }
  577. static int
  578. vdpa_dev_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid, u32 seq,
  579. int flags, struct netlink_ext_ack *extack)
  580. {
  581. u16 max_vq_size;
  582. u16 min_vq_size = 1;
  583. u32 device_id;
  584. u32 vendor_id;
  585. void *hdr;
  586. int err;
  587. hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags, VDPA_CMD_DEV_NEW);
  588. if (!hdr)
  589. return -EMSGSIZE;
  590. err = vdpa_nl_mgmtdev_handle_fill(msg, vdev->mdev);
  591. if (err)
  592. goto msg_err;
  593. device_id = vdev->config->get_device_id(vdev);
  594. vendor_id = vdev->config->get_vendor_id(vdev);
  595. max_vq_size = vdev->config->get_vq_num_max(vdev);
  596. if (vdev->config->get_vq_num_min)
  597. min_vq_size = vdev->config->get_vq_num_min(vdev);
  598. err = -EMSGSIZE;
  599. if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev)))
  600. goto msg_err;
  601. if (nla_put_u32(msg, VDPA_ATTR_DEV_ID, device_id))
  602. goto msg_err;
  603. if (nla_put_u32(msg, VDPA_ATTR_DEV_VENDOR_ID, vendor_id))
  604. goto msg_err;
  605. if (nla_put_u32(msg, VDPA_ATTR_DEV_MAX_VQS, vdev->nvqs))
  606. goto msg_err;
  607. if (nla_put_u16(msg, VDPA_ATTR_DEV_MAX_VQ_SIZE, max_vq_size))
  608. goto msg_err;
  609. if (nla_put_u16(msg, VDPA_ATTR_DEV_MIN_VQ_SIZE, min_vq_size))
  610. goto msg_err;
  611. genlmsg_end(msg, hdr);
  612. return 0;
  613. msg_err:
  614. genlmsg_cancel(msg, hdr);
  615. return err;
  616. }
  617. static int vdpa_nl_cmd_dev_get_doit(struct sk_buff *skb, struct genl_info *info)
  618. {
  619. struct vdpa_device *vdev;
  620. struct sk_buff *msg;
  621. const char *devname;
  622. struct device *dev;
  623. int err;
  624. if (!info->attrs[VDPA_ATTR_DEV_NAME])
  625. return -EINVAL;
  626. devname = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
  627. msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  628. if (!msg)
  629. return -ENOMEM;
  630. down_read(&vdpa_dev_lock);
  631. dev = bus_find_device(&vdpa_bus, NULL, devname, vdpa_name_match);
  632. if (!dev) {
  633. NL_SET_ERR_MSG_MOD(info->extack, "device not found");
  634. err = -ENODEV;
  635. goto err;
  636. }
  637. vdev = container_of(dev, struct vdpa_device, dev);
  638. if (!vdev->mdev) {
  639. err = -EINVAL;
  640. goto mdev_err;
  641. }
  642. err = vdpa_dev_fill(vdev, msg, info->snd_portid, info->snd_seq, 0, info->extack);
  643. if (err)
  644. goto mdev_err;
  645. err = genlmsg_reply(msg, info);
  646. put_device(dev);
  647. up_read(&vdpa_dev_lock);
  648. return err;
  649. mdev_err:
  650. put_device(dev);
  651. err:
  652. up_read(&vdpa_dev_lock);
  653. nlmsg_free(msg);
  654. return err;
  655. }
  656. struct vdpa_dev_dump_info {
  657. struct sk_buff *msg;
  658. struct netlink_callback *cb;
  659. int start_idx;
  660. int idx;
  661. };
  662. static int vdpa_dev_dump(struct device *dev, void *data)
  663. {
  664. struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev);
  665. struct vdpa_dev_dump_info *info = data;
  666. int err;
  667. if (!vdev->mdev)
  668. return 0;
  669. if (info->idx < info->start_idx) {
  670. info->idx++;
  671. return 0;
  672. }
  673. err = vdpa_dev_fill(vdev, info->msg, NETLINK_CB(info->cb->skb).portid,
  674. info->cb->nlh->nlmsg_seq, NLM_F_MULTI, info->cb->extack);
  675. if (err)
  676. return err;
  677. info->idx++;
  678. return 0;
  679. }
  680. static int vdpa_nl_cmd_dev_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb)
  681. {
  682. struct vdpa_dev_dump_info info;
  683. info.msg = msg;
  684. info.cb = cb;
  685. info.start_idx = cb->args[0];
  686. info.idx = 0;
  687. down_read(&vdpa_dev_lock);
  688. bus_for_each_dev(&vdpa_bus, NULL, &info, vdpa_dev_dump);
  689. up_read(&vdpa_dev_lock);
  690. cb->args[0] = info.idx;
  691. return msg->len;
  692. }
  693. static int vdpa_dev_net_mq_config_fill(struct sk_buff *msg, u64 features,
  694. const struct virtio_net_config *config)
  695. {
  696. u16 val_u16;
  697. if ((features & BIT_ULL(VIRTIO_NET_F_MQ)) == 0 &&
  698. (features & BIT_ULL(VIRTIO_NET_F_RSS)) == 0)
  699. return 0;
  700. val_u16 = __virtio16_to_cpu(true, config->max_virtqueue_pairs);
  701. return nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MAX_VQP, val_u16);
  702. }
  703. static int vdpa_dev_net_mtu_config_fill(struct sk_buff *msg, u64 features,
  704. const struct virtio_net_config *config)
  705. {
  706. u16 val_u16;
  707. if ((features & BIT_ULL(VIRTIO_NET_F_MTU)) == 0)
  708. return 0;
  709. val_u16 = __virtio16_to_cpu(true, config->mtu);
  710. return nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MTU, val_u16);
  711. }
  712. static int vdpa_dev_net_mac_config_fill(struct sk_buff *msg, u64 features,
  713. const struct virtio_net_config *config)
  714. {
  715. if ((features & BIT_ULL(VIRTIO_NET_F_MAC)) == 0)
  716. return 0;
  717. return nla_put(msg, VDPA_ATTR_DEV_NET_CFG_MACADDR,
  718. sizeof(config->mac), config->mac);
  719. }
  720. static int vdpa_dev_net_config_fill(struct vdpa_device *vdev, struct sk_buff *msg)
  721. {
  722. struct virtio_net_config config = {};
  723. u64 features_device;
  724. u16 val_u16;
  725. vdev->config->get_config(vdev, 0, &config, sizeof(config));
  726. val_u16 = __virtio16_to_cpu(true, config.status);
  727. if (nla_put_u16(msg, VDPA_ATTR_DEV_NET_STATUS, val_u16))
  728. return -EMSGSIZE;
  729. features_device = vdev->config->get_device_features(vdev);
  730. if (nla_put_u64_64bit(msg, VDPA_ATTR_VDPA_DEV_SUPPORTED_FEATURES, features_device,
  731. VDPA_ATTR_PAD))
  732. return -EMSGSIZE;
  733. if (vdpa_dev_net_mtu_config_fill(msg, features_device, &config))
  734. return -EMSGSIZE;
  735. if (vdpa_dev_net_mac_config_fill(msg, features_device, &config))
  736. return -EMSGSIZE;
  737. return vdpa_dev_net_mq_config_fill(msg, features_device, &config);
  738. }
  739. static int
  740. vdpa_dev_config_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid, u32 seq,
  741. int flags, struct netlink_ext_ack *extack)
  742. {
  743. u64 features_driver;
  744. u8 status = 0;
  745. u32 device_id;
  746. void *hdr;
  747. int err;
  748. down_read(&vdev->cf_lock);
  749. hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags,
  750. VDPA_CMD_DEV_CONFIG_GET);
  751. if (!hdr) {
  752. err = -EMSGSIZE;
  753. goto out;
  754. }
  755. if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev))) {
  756. err = -EMSGSIZE;
  757. goto msg_err;
  758. }
  759. device_id = vdev->config->get_device_id(vdev);
  760. if (nla_put_u32(msg, VDPA_ATTR_DEV_ID, device_id)) {
  761. err = -EMSGSIZE;
  762. goto msg_err;
  763. }
  764. /* only read driver features after the feature negotiation is done */
  765. status = vdev->config->get_status(vdev);
  766. if (status & VIRTIO_CONFIG_S_FEATURES_OK) {
  767. features_driver = vdev->config->get_driver_features(vdev);
  768. if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_NEGOTIATED_FEATURES, features_driver,
  769. VDPA_ATTR_PAD)) {
  770. err = -EMSGSIZE;
  771. goto msg_err;
  772. }
  773. }
  774. switch (device_id) {
  775. case VIRTIO_ID_NET:
  776. err = vdpa_dev_net_config_fill(vdev, msg);
  777. break;
  778. default:
  779. err = -EOPNOTSUPP;
  780. break;
  781. }
  782. if (err)
  783. goto msg_err;
  784. up_read(&vdev->cf_lock);
  785. genlmsg_end(msg, hdr);
  786. return 0;
  787. msg_err:
  788. genlmsg_cancel(msg, hdr);
  789. out:
  790. up_read(&vdev->cf_lock);
  791. return err;
  792. }
  793. static int vdpa_fill_stats_rec(struct vdpa_device *vdev, struct sk_buff *msg,
  794. struct genl_info *info, u32 index)
  795. {
  796. struct virtio_net_config config = {};
  797. u64 features;
  798. u16 max_vqp;
  799. u8 status;
  800. int err;
  801. status = vdev->config->get_status(vdev);
  802. if (!(status & VIRTIO_CONFIG_S_FEATURES_OK)) {
  803. NL_SET_ERR_MSG_MOD(info->extack, "feature negotiation not complete");
  804. return -EAGAIN;
  805. }
  806. vdpa_get_config_unlocked(vdev, 0, &config, sizeof(config));
  807. max_vqp = __virtio16_to_cpu(true, config.max_virtqueue_pairs);
  808. if (nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MAX_VQP, max_vqp))
  809. return -EMSGSIZE;
  810. features = vdev->config->get_driver_features(vdev);
  811. if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_NEGOTIATED_FEATURES,
  812. features, VDPA_ATTR_PAD))
  813. return -EMSGSIZE;
  814. if (nla_put_u32(msg, VDPA_ATTR_DEV_QUEUE_INDEX, index))
  815. return -EMSGSIZE;
  816. err = vdev->config->get_vendor_vq_stats(vdev, index, msg, info->extack);
  817. if (err)
  818. return err;
  819. return 0;
  820. }
  821. static int vendor_stats_fill(struct vdpa_device *vdev, struct sk_buff *msg,
  822. struct genl_info *info, u32 index)
  823. {
  824. int err;
  825. down_read(&vdev->cf_lock);
  826. if (!vdev->config->get_vendor_vq_stats) {
  827. err = -EOPNOTSUPP;
  828. goto out;
  829. }
  830. err = vdpa_fill_stats_rec(vdev, msg, info, index);
  831. out:
  832. up_read(&vdev->cf_lock);
  833. return err;
  834. }
  835. static int vdpa_dev_vendor_stats_fill(struct vdpa_device *vdev,
  836. struct sk_buff *msg,
  837. struct genl_info *info, u32 index)
  838. {
  839. u32 device_id;
  840. void *hdr;
  841. int err;
  842. u32 portid = info->snd_portid;
  843. u32 seq = info->snd_seq;
  844. u32 flags = 0;
  845. hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags,
  846. VDPA_CMD_DEV_VSTATS_GET);
  847. if (!hdr)
  848. return -EMSGSIZE;
  849. if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev))) {
  850. err = -EMSGSIZE;
  851. goto undo_msg;
  852. }
  853. device_id = vdev->config->get_device_id(vdev);
  854. if (nla_put_u32(msg, VDPA_ATTR_DEV_ID, device_id)) {
  855. err = -EMSGSIZE;
  856. goto undo_msg;
  857. }
  858. switch (device_id) {
  859. case VIRTIO_ID_NET:
  860. if (index > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX) {
  861. NL_SET_ERR_MSG_MOD(info->extack, "queue index excceeds max value");
  862. err = -ERANGE;
  863. break;
  864. }
  865. err = vendor_stats_fill(vdev, msg, info, index);
  866. break;
  867. default:
  868. err = -EOPNOTSUPP;
  869. break;
  870. }
  871. genlmsg_end(msg, hdr);
  872. return err;
  873. undo_msg:
  874. genlmsg_cancel(msg, hdr);
  875. return err;
  876. }
  877. static int vdpa_nl_cmd_dev_config_get_doit(struct sk_buff *skb, struct genl_info *info)
  878. {
  879. struct vdpa_device *vdev;
  880. struct sk_buff *msg;
  881. const char *devname;
  882. struct device *dev;
  883. int err;
  884. if (!info->attrs[VDPA_ATTR_DEV_NAME])
  885. return -EINVAL;
  886. devname = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
  887. msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  888. if (!msg)
  889. return -ENOMEM;
  890. down_read(&vdpa_dev_lock);
  891. dev = bus_find_device(&vdpa_bus, NULL, devname, vdpa_name_match);
  892. if (!dev) {
  893. NL_SET_ERR_MSG_MOD(info->extack, "device not found");
  894. err = -ENODEV;
  895. goto dev_err;
  896. }
  897. vdev = container_of(dev, struct vdpa_device, dev);
  898. if (!vdev->mdev) {
  899. NL_SET_ERR_MSG_MOD(info->extack, "unmanaged vdpa device");
  900. err = -EINVAL;
  901. goto mdev_err;
  902. }
  903. err = vdpa_dev_config_fill(vdev, msg, info->snd_portid, info->snd_seq,
  904. 0, info->extack);
  905. if (!err)
  906. err = genlmsg_reply(msg, info);
  907. mdev_err:
  908. put_device(dev);
  909. dev_err:
  910. up_read(&vdpa_dev_lock);
  911. if (err)
  912. nlmsg_free(msg);
  913. return err;
  914. }
  915. static int vdpa_dev_config_dump(struct device *dev, void *data)
  916. {
  917. struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev);
  918. struct vdpa_dev_dump_info *info = data;
  919. int err;
  920. if (!vdev->mdev)
  921. return 0;
  922. if (info->idx < info->start_idx) {
  923. info->idx++;
  924. return 0;
  925. }
  926. err = vdpa_dev_config_fill(vdev, info->msg, NETLINK_CB(info->cb->skb).portid,
  927. info->cb->nlh->nlmsg_seq, NLM_F_MULTI,
  928. info->cb->extack);
  929. if (err)
  930. return err;
  931. info->idx++;
  932. return 0;
  933. }
  934. static int
  935. vdpa_nl_cmd_dev_config_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb)
  936. {
  937. struct vdpa_dev_dump_info info;
  938. info.msg = msg;
  939. info.cb = cb;
  940. info.start_idx = cb->args[0];
  941. info.idx = 0;
  942. down_read(&vdpa_dev_lock);
  943. bus_for_each_dev(&vdpa_bus, NULL, &info, vdpa_dev_config_dump);
  944. up_read(&vdpa_dev_lock);
  945. cb->args[0] = info.idx;
  946. return msg->len;
  947. }
  948. static int vdpa_nl_cmd_dev_stats_get_doit(struct sk_buff *skb,
  949. struct genl_info *info)
  950. {
  951. struct vdpa_device *vdev;
  952. struct sk_buff *msg;
  953. const char *devname;
  954. struct device *dev;
  955. u32 index;
  956. int err;
  957. if (!info->attrs[VDPA_ATTR_DEV_NAME])
  958. return -EINVAL;
  959. if (!info->attrs[VDPA_ATTR_DEV_QUEUE_INDEX])
  960. return -EINVAL;
  961. devname = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
  962. msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  963. if (!msg)
  964. return -ENOMEM;
  965. index = nla_get_u32(info->attrs[VDPA_ATTR_DEV_QUEUE_INDEX]);
  966. down_read(&vdpa_dev_lock);
  967. dev = bus_find_device(&vdpa_bus, NULL, devname, vdpa_name_match);
  968. if (!dev) {
  969. NL_SET_ERR_MSG_MOD(info->extack, "device not found");
  970. err = -ENODEV;
  971. goto dev_err;
  972. }
  973. vdev = container_of(dev, struct vdpa_device, dev);
  974. if (!vdev->mdev) {
  975. NL_SET_ERR_MSG_MOD(info->extack, "unmanaged vdpa device");
  976. err = -EINVAL;
  977. goto mdev_err;
  978. }
  979. err = vdpa_dev_vendor_stats_fill(vdev, msg, info, index);
  980. if (err)
  981. goto mdev_err;
  982. err = genlmsg_reply(msg, info);
  983. put_device(dev);
  984. up_read(&vdpa_dev_lock);
  985. return err;
  986. mdev_err:
  987. put_device(dev);
  988. dev_err:
  989. nlmsg_free(msg);
  990. up_read(&vdpa_dev_lock);
  991. return err;
  992. }
  993. static const struct nla_policy vdpa_nl_policy[VDPA_ATTR_MAX + 1] = {
  994. [VDPA_ATTR_MGMTDEV_BUS_NAME] = { .type = NLA_NUL_STRING },
  995. [VDPA_ATTR_MGMTDEV_DEV_NAME] = { .type = NLA_STRING },
  996. [VDPA_ATTR_DEV_NAME] = { .type = NLA_STRING },
  997. [VDPA_ATTR_DEV_NET_CFG_MACADDR] = NLA_POLICY_ETH_ADDR,
  998. [VDPA_ATTR_DEV_NET_CFG_MAX_VQP] = { .type = NLA_U16 },
  999. /* virtio spec 1.1 section 5.1.4.1 for valid MTU range */
  1000. [VDPA_ATTR_DEV_NET_CFG_MTU] = NLA_POLICY_MIN(NLA_U16, 68),
  1001. [VDPA_ATTR_DEV_QUEUE_INDEX] = { .type = NLA_U32 },
  1002. [VDPA_ATTR_DEV_FEATURES] = { .type = NLA_U64 },
  1003. };
  1004. static const struct genl_ops vdpa_nl_ops[] = {
  1005. {
  1006. .cmd = VDPA_CMD_MGMTDEV_GET,
  1007. .doit = vdpa_nl_cmd_mgmtdev_get_doit,
  1008. .dumpit = vdpa_nl_cmd_mgmtdev_get_dumpit,
  1009. },
  1010. {
  1011. .cmd = VDPA_CMD_DEV_NEW,
  1012. .doit = vdpa_nl_cmd_dev_add_set_doit,
  1013. .flags = GENL_ADMIN_PERM,
  1014. },
  1015. {
  1016. .cmd = VDPA_CMD_DEV_DEL,
  1017. .doit = vdpa_nl_cmd_dev_del_set_doit,
  1018. .flags = GENL_ADMIN_PERM,
  1019. },
  1020. {
  1021. .cmd = VDPA_CMD_DEV_GET,
  1022. .doit = vdpa_nl_cmd_dev_get_doit,
  1023. .dumpit = vdpa_nl_cmd_dev_get_dumpit,
  1024. },
  1025. {
  1026. .cmd = VDPA_CMD_DEV_CONFIG_GET,
  1027. .doit = vdpa_nl_cmd_dev_config_get_doit,
  1028. .dumpit = vdpa_nl_cmd_dev_config_get_dumpit,
  1029. },
  1030. {
  1031. .cmd = VDPA_CMD_DEV_VSTATS_GET,
  1032. .doit = vdpa_nl_cmd_dev_stats_get_doit,
  1033. .flags = GENL_ADMIN_PERM,
  1034. },
  1035. };
  1036. static struct genl_family vdpa_nl_family __ro_after_init = {
  1037. .name = VDPA_GENL_NAME,
  1038. .version = VDPA_GENL_VERSION,
  1039. .maxattr = VDPA_ATTR_MAX,
  1040. .policy = vdpa_nl_policy,
  1041. .netnsok = false,
  1042. .module = THIS_MODULE,
  1043. .ops = vdpa_nl_ops,
  1044. .n_ops = ARRAY_SIZE(vdpa_nl_ops),
  1045. .resv_start_op = VDPA_CMD_DEV_VSTATS_GET + 1,
  1046. };
  1047. static int vdpa_init(void)
  1048. {
  1049. int err;
  1050. err = bus_register(&vdpa_bus);
  1051. if (err)
  1052. return err;
  1053. err = genl_register_family(&vdpa_nl_family);
  1054. if (err)
  1055. goto err;
  1056. return 0;
  1057. err:
  1058. bus_unregister(&vdpa_bus);
  1059. return err;
  1060. }
  1061. static void __exit vdpa_exit(void)
  1062. {
  1063. genl_unregister_family(&vdpa_nl_family);
  1064. bus_unregister(&vdpa_bus);
  1065. ida_destroy(&vdpa_index_ida);
  1066. }
  1067. core_initcall(vdpa_init);
  1068. module_exit(vdpa_exit);
  1069. MODULE_AUTHOR("Jason Wang <[email protected]>");
  1070. MODULE_LICENSE("GPL v2");