virtio_crypto_common.h 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152
  1. /* SPDX-License-Identifier: GPL-2.0-or-later */
  2. /* Common header for Virtio crypto device.
  3. *
  4. * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
  5. */
  6. #ifndef _VIRTIO_CRYPTO_COMMON_H
  7. #define _VIRTIO_CRYPTO_COMMON_H
  8. #include <linux/virtio.h>
  9. #include <linux/crypto.h>
  10. #include <linux/spinlock.h>
  11. #include <crypto/aead.h>
  12. #include <crypto/aes.h>
  13. #include <crypto/engine.h>
  14. #include <uapi/linux/virtio_crypto.h>
  15. /* Internal representation of a data virtqueue */
  16. struct data_queue {
  17. /* Virtqueue associated with this send _queue */
  18. struct virtqueue *vq;
  19. /* To protect the vq operations for the dataq */
  20. spinlock_t lock;
  21. /* Name of the tx queue: dataq.$index */
  22. char name[32];
  23. struct crypto_engine *engine;
  24. };
  25. struct virtio_crypto {
  26. struct virtio_device *vdev;
  27. struct virtqueue *ctrl_vq;
  28. struct data_queue *data_vq;
  29. /* Work struct for config space updates */
  30. struct work_struct config_work;
  31. /* To protect the vq operations for the controlq */
  32. spinlock_t ctrl_lock;
  33. /* Maximum of data queues supported by the device */
  34. u32 max_data_queues;
  35. /* Number of queue currently used by the driver */
  36. u32 curr_queue;
  37. /*
  38. * Specifies the services mask which the device support,
  39. * see VIRTIO_CRYPTO_SERVICE_*
  40. */
  41. u32 crypto_services;
  42. /* Detailed algorithms mask */
  43. u32 cipher_algo_l;
  44. u32 cipher_algo_h;
  45. u32 hash_algo;
  46. u32 mac_algo_l;
  47. u32 mac_algo_h;
  48. u32 aead_algo;
  49. u32 akcipher_algo;
  50. /* Maximum length of cipher key */
  51. u32 max_cipher_key_len;
  52. /* Maximum length of authenticated key */
  53. u32 max_auth_key_len;
  54. /* Maximum size of per request */
  55. u64 max_size;
  56. unsigned long status;
  57. atomic_t ref_count;
  58. struct list_head list;
  59. struct module *owner;
  60. uint8_t dev_id;
  61. /* Does the affinity hint is set for virtqueues? */
  62. bool affinity_hint_set;
  63. };
  64. struct virtio_crypto_sym_session_info {
  65. /* Backend session id, which come from the host side */
  66. __u64 session_id;
  67. };
  68. /*
  69. * Note: there are padding fields in request, clear them to zero before
  70. * sending to host to avoid to divulge any information.
  71. * Ex, virtio_crypto_ctrl_request::ctrl::u::destroy_session::padding[48]
  72. */
  73. struct virtio_crypto_ctrl_request {
  74. struct virtio_crypto_op_ctrl_req ctrl;
  75. struct virtio_crypto_session_input input;
  76. struct virtio_crypto_inhdr ctrl_status;
  77. struct completion compl;
  78. };
  79. struct virtio_crypto_request;
  80. typedef void (*virtio_crypto_data_callback)
  81. (struct virtio_crypto_request *vc_req, int len);
  82. struct virtio_crypto_request {
  83. uint8_t status;
  84. struct virtio_crypto_op_data_req *req_data;
  85. struct scatterlist **sgs;
  86. struct data_queue *dataq;
  87. virtio_crypto_data_callback alg_cb;
  88. };
  89. int virtcrypto_devmgr_add_dev(struct virtio_crypto *vcrypto_dev);
  90. struct list_head *virtcrypto_devmgr_get_head(void);
  91. void virtcrypto_devmgr_rm_dev(struct virtio_crypto *vcrypto_dev);
  92. struct virtio_crypto *virtcrypto_devmgr_get_first(void);
  93. int virtcrypto_dev_in_use(struct virtio_crypto *vcrypto_dev);
  94. int virtcrypto_dev_get(struct virtio_crypto *vcrypto_dev);
  95. void virtcrypto_dev_put(struct virtio_crypto *vcrypto_dev);
  96. int virtcrypto_dev_started(struct virtio_crypto *vcrypto_dev);
  97. bool virtcrypto_algo_is_supported(struct virtio_crypto *vcrypto_dev,
  98. uint32_t service,
  99. uint32_t algo);
  100. struct virtio_crypto *virtcrypto_get_dev_node(int node,
  101. uint32_t service,
  102. uint32_t algo);
  103. int virtcrypto_dev_start(struct virtio_crypto *vcrypto);
  104. void virtcrypto_dev_stop(struct virtio_crypto *vcrypto);
  105. int virtio_crypto_skcipher_crypt_req(
  106. struct crypto_engine *engine, void *vreq);
  107. void
  108. virtcrypto_clear_request(struct virtio_crypto_request *vc_req);
  109. static inline int virtio_crypto_get_current_node(void)
  110. {
  111. int cpu, node;
  112. cpu = get_cpu();
  113. node = topology_physical_package_id(cpu);
  114. put_cpu();
  115. return node;
  116. }
  117. int virtio_crypto_skcipher_algs_register(struct virtio_crypto *vcrypto);
  118. void virtio_crypto_skcipher_algs_unregister(struct virtio_crypto *vcrypto);
  119. int virtio_crypto_akcipher_algs_register(struct virtio_crypto *vcrypto);
  120. void virtio_crypto_akcipher_algs_unregister(struct virtio_crypto *vcrypto);
  121. int virtio_crypto_ctrl_vq_request(struct virtio_crypto *vcrypto, struct scatterlist *sgs[],
  122. unsigned int out_sgs, unsigned int in_sgs,
  123. struct virtio_crypto_ctrl_request *vc_ctrl_req);
  124. #endif /* _VIRTIO_CRYPTO_COMMON_H */