ifcvf_base.c 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Intel IFC VF NIC driver for virtio dataplane offloading
  4. *
  5. * Copyright (C) 2020 Intel Corporation.
  6. *
  7. * Author: Zhu Lingshan <[email protected]>
  8. *
  9. */
  10. #include "ifcvf_base.h"
  11. u16 ifcvf_set_vq_vector(struct ifcvf_hw *hw, u16 qid, int vector)
  12. {
  13. struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
  14. vp_iowrite16(qid, &cfg->queue_select);
  15. vp_iowrite16(vector, &cfg->queue_msix_vector);
  16. return vp_ioread16(&cfg->queue_msix_vector);
  17. }
  18. u16 ifcvf_set_config_vector(struct ifcvf_hw *hw, int vector)
  19. {
  20. struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
  21. vp_iowrite16(vector, &cfg->msix_config);
  22. return vp_ioread16(&cfg->msix_config);
  23. }
  24. static void __iomem *get_cap_addr(struct ifcvf_hw *hw,
  25. struct virtio_pci_cap *cap)
  26. {
  27. u32 length, offset;
  28. u8 bar;
  29. length = le32_to_cpu(cap->length);
  30. offset = le32_to_cpu(cap->offset);
  31. bar = cap->bar;
  32. if (bar >= IFCVF_PCI_MAX_RESOURCE) {
  33. IFCVF_DBG(hw->pdev,
  34. "Invalid bar number %u to get capabilities\n", bar);
  35. return NULL;
  36. }
  37. if (offset + length > pci_resource_len(hw->pdev, bar)) {
  38. IFCVF_DBG(hw->pdev,
  39. "offset(%u) + len(%u) overflows bar%u's capability\n",
  40. offset, length, bar);
  41. return NULL;
  42. }
  43. return hw->base[bar] + offset;
  44. }
  45. static int ifcvf_read_config_range(struct pci_dev *dev,
  46. uint32_t *val, int size, int where)
  47. {
  48. int ret, i;
  49. for (i = 0; i < size; i += 4) {
  50. ret = pci_read_config_dword(dev, where + i, val + i / 4);
  51. if (ret < 0)
  52. return ret;
  53. }
  54. return 0;
  55. }
  56. int ifcvf_init_hw(struct ifcvf_hw *hw, struct pci_dev *pdev)
  57. {
  58. struct virtio_pci_cap cap;
  59. u16 notify_off;
  60. int ret;
  61. u8 pos;
  62. u32 i;
  63. ret = pci_read_config_byte(pdev, PCI_CAPABILITY_LIST, &pos);
  64. if (ret < 0) {
  65. IFCVF_ERR(pdev, "Failed to read PCI capability list\n");
  66. return -EIO;
  67. }
  68. hw->pdev = pdev;
  69. while (pos) {
  70. ret = ifcvf_read_config_range(pdev, (u32 *)&cap,
  71. sizeof(cap), pos);
  72. if (ret < 0) {
  73. IFCVF_ERR(pdev,
  74. "Failed to get PCI capability at %x\n", pos);
  75. break;
  76. }
  77. if (cap.cap_vndr != PCI_CAP_ID_VNDR)
  78. goto next;
  79. switch (cap.cfg_type) {
  80. case VIRTIO_PCI_CAP_COMMON_CFG:
  81. hw->common_cfg = get_cap_addr(hw, &cap);
  82. IFCVF_DBG(pdev, "hw->common_cfg = %p\n",
  83. hw->common_cfg);
  84. break;
  85. case VIRTIO_PCI_CAP_NOTIFY_CFG:
  86. pci_read_config_dword(pdev, pos + sizeof(cap),
  87. &hw->notify_off_multiplier);
  88. hw->notify_bar = cap.bar;
  89. hw->notify_base = get_cap_addr(hw, &cap);
  90. hw->notify_base_pa = pci_resource_start(pdev, cap.bar) +
  91. le32_to_cpu(cap.offset);
  92. IFCVF_DBG(pdev, "hw->notify_base = %p\n",
  93. hw->notify_base);
  94. break;
  95. case VIRTIO_PCI_CAP_ISR_CFG:
  96. hw->isr = get_cap_addr(hw, &cap);
  97. IFCVF_DBG(pdev, "hw->isr = %p\n", hw->isr);
  98. break;
  99. case VIRTIO_PCI_CAP_DEVICE_CFG:
  100. hw->dev_cfg = get_cap_addr(hw, &cap);
  101. hw->cap_dev_config_size = le32_to_cpu(cap.length);
  102. IFCVF_DBG(pdev, "hw->dev_cfg = %p\n", hw->dev_cfg);
  103. break;
  104. }
  105. next:
  106. pos = cap.cap_next;
  107. }
  108. if (hw->common_cfg == NULL || hw->notify_base == NULL ||
  109. hw->isr == NULL || hw->dev_cfg == NULL) {
  110. IFCVF_ERR(pdev, "Incomplete PCI capabilities\n");
  111. return -EIO;
  112. }
  113. hw->nr_vring = vp_ioread16(&hw->common_cfg->num_queues);
  114. for (i = 0; i < hw->nr_vring; i++) {
  115. vp_iowrite16(i, &hw->common_cfg->queue_select);
  116. notify_off = vp_ioread16(&hw->common_cfg->queue_notify_off);
  117. hw->vring[i].notify_addr = hw->notify_base +
  118. notify_off * hw->notify_off_multiplier;
  119. hw->vring[i].notify_pa = hw->notify_base_pa +
  120. notify_off * hw->notify_off_multiplier;
  121. hw->vring[i].irq = -EINVAL;
  122. }
  123. hw->lm_cfg = hw->base[IFCVF_LM_BAR];
  124. IFCVF_DBG(pdev,
  125. "PCI capability mapping: common cfg: %p, notify base: %p\n, isr cfg: %p, device cfg: %p, multiplier: %u\n",
  126. hw->common_cfg, hw->notify_base, hw->isr,
  127. hw->dev_cfg, hw->notify_off_multiplier);
  128. hw->vqs_reused_irq = -EINVAL;
  129. hw->config_irq = -EINVAL;
  130. return 0;
  131. }
  132. u8 ifcvf_get_status(struct ifcvf_hw *hw)
  133. {
  134. return vp_ioread8(&hw->common_cfg->device_status);
  135. }
  136. void ifcvf_set_status(struct ifcvf_hw *hw, u8 status)
  137. {
  138. vp_iowrite8(status, &hw->common_cfg->device_status);
  139. }
  140. void ifcvf_reset(struct ifcvf_hw *hw)
  141. {
  142. hw->config_cb.callback = NULL;
  143. hw->config_cb.private = NULL;
  144. ifcvf_set_status(hw, 0);
  145. /* flush set_status, make sure VF is stopped, reset */
  146. ifcvf_get_status(hw);
  147. }
  148. static void ifcvf_add_status(struct ifcvf_hw *hw, u8 status)
  149. {
  150. if (status != 0)
  151. status |= ifcvf_get_status(hw);
  152. ifcvf_set_status(hw, status);
  153. ifcvf_get_status(hw);
  154. }
  155. u64 ifcvf_get_hw_features(struct ifcvf_hw *hw)
  156. {
  157. struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
  158. u32 features_lo, features_hi;
  159. u64 features;
  160. vp_iowrite32(0, &cfg->device_feature_select);
  161. features_lo = vp_ioread32(&cfg->device_feature);
  162. vp_iowrite32(1, &cfg->device_feature_select);
  163. features_hi = vp_ioread32(&cfg->device_feature);
  164. features = ((u64)features_hi << 32) | features_lo;
  165. return features;
  166. }
  167. u64 ifcvf_get_features(struct ifcvf_hw *hw)
  168. {
  169. return hw->hw_features;
  170. }
  171. int ifcvf_verify_min_features(struct ifcvf_hw *hw, u64 features)
  172. {
  173. if (!(features & BIT_ULL(VIRTIO_F_ACCESS_PLATFORM)) && features) {
  174. IFCVF_ERR(hw->pdev, "VIRTIO_F_ACCESS_PLATFORM is not negotiated\n");
  175. return -EINVAL;
  176. }
  177. return 0;
  178. }
  179. u32 ifcvf_get_config_size(struct ifcvf_hw *hw)
  180. {
  181. u32 net_config_size = sizeof(struct virtio_net_config);
  182. u32 blk_config_size = sizeof(struct virtio_blk_config);
  183. u32 cap_size = hw->cap_dev_config_size;
  184. u32 config_size;
  185. /* If the onboard device config space size is greater than
  186. * the size of struct virtio_net/blk_config, only the spec
  187. * implementing contents size is returned, this is very
  188. * unlikely, defensive programming.
  189. */
  190. switch (hw->dev_type) {
  191. case VIRTIO_ID_NET:
  192. config_size = min(cap_size, net_config_size);
  193. break;
  194. case VIRTIO_ID_BLOCK:
  195. config_size = min(cap_size, blk_config_size);
  196. break;
  197. default:
  198. config_size = 0;
  199. IFCVF_ERR(hw->pdev, "VIRTIO ID %u not supported\n", hw->dev_type);
  200. }
  201. return config_size;
  202. }
  203. void ifcvf_read_dev_config(struct ifcvf_hw *hw, u64 offset,
  204. void *dst, int length)
  205. {
  206. u8 old_gen, new_gen, *p;
  207. int i;
  208. WARN_ON(offset + length > hw->config_size);
  209. do {
  210. old_gen = vp_ioread8(&hw->common_cfg->config_generation);
  211. p = dst;
  212. for (i = 0; i < length; i++)
  213. *p++ = vp_ioread8(hw->dev_cfg + offset + i);
  214. new_gen = vp_ioread8(&hw->common_cfg->config_generation);
  215. } while (old_gen != new_gen);
  216. }
  217. void ifcvf_write_dev_config(struct ifcvf_hw *hw, u64 offset,
  218. const void *src, int length)
  219. {
  220. const u8 *p;
  221. int i;
  222. p = src;
  223. WARN_ON(offset + length > hw->config_size);
  224. for (i = 0; i < length; i++)
  225. vp_iowrite8(*p++, hw->dev_cfg + offset + i);
  226. }
  227. static void ifcvf_set_features(struct ifcvf_hw *hw, u64 features)
  228. {
  229. struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
  230. vp_iowrite32(0, &cfg->guest_feature_select);
  231. vp_iowrite32((u32)features, &cfg->guest_feature);
  232. vp_iowrite32(1, &cfg->guest_feature_select);
  233. vp_iowrite32(features >> 32, &cfg->guest_feature);
  234. }
  235. static int ifcvf_config_features(struct ifcvf_hw *hw)
  236. {
  237. ifcvf_set_features(hw, hw->req_features);
  238. ifcvf_add_status(hw, VIRTIO_CONFIG_S_FEATURES_OK);
  239. if (!(ifcvf_get_status(hw) & VIRTIO_CONFIG_S_FEATURES_OK)) {
  240. IFCVF_ERR(hw->pdev, "Failed to set FEATURES_OK status\n");
  241. return -EIO;
  242. }
  243. return 0;
  244. }
  245. u16 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid)
  246. {
  247. struct ifcvf_lm_cfg __iomem *ifcvf_lm;
  248. void __iomem *avail_idx_addr;
  249. u16 last_avail_idx;
  250. u32 q_pair_id;
  251. ifcvf_lm = (struct ifcvf_lm_cfg __iomem *)hw->lm_cfg;
  252. q_pair_id = qid / 2;
  253. avail_idx_addr = &ifcvf_lm->vring_lm_cfg[q_pair_id].idx_addr[qid % 2];
  254. last_avail_idx = vp_ioread16(avail_idx_addr);
  255. return last_avail_idx;
  256. }
  257. int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num)
  258. {
  259. struct ifcvf_lm_cfg __iomem *ifcvf_lm;
  260. void __iomem *avail_idx_addr;
  261. u32 q_pair_id;
  262. ifcvf_lm = (struct ifcvf_lm_cfg __iomem *)hw->lm_cfg;
  263. q_pair_id = qid / 2;
  264. avail_idx_addr = &ifcvf_lm->vring_lm_cfg[q_pair_id].idx_addr[qid % 2];
  265. hw->vring[qid].last_avail_idx = num;
  266. vp_iowrite16(num, avail_idx_addr);
  267. return 0;
  268. }
  269. static int ifcvf_hw_enable(struct ifcvf_hw *hw)
  270. {
  271. struct virtio_pci_common_cfg __iomem *cfg;
  272. u32 i;
  273. cfg = hw->common_cfg;
  274. for (i = 0; i < hw->nr_vring; i++) {
  275. if (!hw->vring[i].ready)
  276. break;
  277. vp_iowrite16(i, &cfg->queue_select);
  278. vp_iowrite64_twopart(hw->vring[i].desc, &cfg->queue_desc_lo,
  279. &cfg->queue_desc_hi);
  280. vp_iowrite64_twopart(hw->vring[i].avail, &cfg->queue_avail_lo,
  281. &cfg->queue_avail_hi);
  282. vp_iowrite64_twopart(hw->vring[i].used, &cfg->queue_used_lo,
  283. &cfg->queue_used_hi);
  284. vp_iowrite16(hw->vring[i].size, &cfg->queue_size);
  285. ifcvf_set_vq_state(hw, i, hw->vring[i].last_avail_idx);
  286. vp_iowrite16(1, &cfg->queue_enable);
  287. }
  288. return 0;
  289. }
  290. static void ifcvf_hw_disable(struct ifcvf_hw *hw)
  291. {
  292. u32 i;
  293. ifcvf_set_config_vector(hw, VIRTIO_MSI_NO_VECTOR);
  294. for (i = 0; i < hw->nr_vring; i++) {
  295. ifcvf_set_vq_vector(hw, i, VIRTIO_MSI_NO_VECTOR);
  296. }
  297. }
  298. int ifcvf_start_hw(struct ifcvf_hw *hw)
  299. {
  300. ifcvf_reset(hw);
  301. ifcvf_add_status(hw, VIRTIO_CONFIG_S_ACKNOWLEDGE);
  302. ifcvf_add_status(hw, VIRTIO_CONFIG_S_DRIVER);
  303. if (ifcvf_config_features(hw) < 0)
  304. return -EINVAL;
  305. if (ifcvf_hw_enable(hw) < 0)
  306. return -EINVAL;
  307. ifcvf_add_status(hw, VIRTIO_CONFIG_S_DRIVER_OK);
  308. return 0;
  309. }
  310. void ifcvf_stop_hw(struct ifcvf_hw *hw)
  311. {
  312. ifcvf_hw_disable(hw);
  313. ifcvf_reset(hw);
  314. }
  315. void ifcvf_notify_queue(struct ifcvf_hw *hw, u16 qid)
  316. {
  317. vp_iowrite16(qid, hw->vring[qid].notify_addr);
  318. }