dfl-afu-error.c 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Driver for FPGA Accelerated Function Unit (AFU) Error Reporting
  4. *
  5. * Copyright 2019 Intel Corporation, Inc.
  6. *
  7. * Authors:
  8. * Wu Hao <[email protected]>
  9. * Xiao Guangrong <[email protected]>
  10. * Joseph Grecco <[email protected]>
  11. * Enno Luebbers <[email protected]>
  12. * Tim Whisonant <[email protected]>
  13. * Ananda Ravuri <[email protected]>
  14. * Mitchel Henry <[email protected]>
  15. */
  16. #include <linux/fpga-dfl.h>
  17. #include <linux/uaccess.h>
  18. #include "dfl-afu.h"
  19. #define PORT_ERROR_MASK 0x8
  20. #define PORT_ERROR 0x10
  21. #define PORT_FIRST_ERROR 0x18
  22. #define PORT_MALFORMED_REQ0 0x20
  23. #define PORT_MALFORMED_REQ1 0x28
  24. #define ERROR_MASK GENMASK_ULL(63, 0)
  25. /* mask or unmask port errors by the error mask register. */
  26. static void __afu_port_err_mask(struct device *dev, bool mask)
  27. {
  28. void __iomem *base;
  29. base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
  30. writeq(mask ? ERROR_MASK : 0, base + PORT_ERROR_MASK);
  31. }
  32. static void afu_port_err_mask(struct device *dev, bool mask)
  33. {
  34. struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
  35. mutex_lock(&pdata->lock);
  36. __afu_port_err_mask(dev, mask);
  37. mutex_unlock(&pdata->lock);
  38. }
  39. /* clear port errors. */
  40. static int afu_port_err_clear(struct device *dev, u64 err)
  41. {
  42. struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
  43. struct platform_device *pdev = to_platform_device(dev);
  44. void __iomem *base_err, *base_hdr;
  45. int enable_ret = 0, ret = -EBUSY;
  46. u64 v;
  47. base_err = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
  48. base_hdr = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
  49. mutex_lock(&pdata->lock);
  50. /*
  51. * clear Port Errors
  52. *
  53. * - Check for AP6 State
  54. * - Halt Port by keeping Port in reset
  55. * - Set PORT Error mask to all 1 to mask errors
  56. * - Clear all errors
  57. * - Set Port mask to all 0 to enable errors
  58. * - All errors start capturing new errors
  59. * - Enable Port by pulling the port out of reset
  60. */
  61. /* if device is still in AP6 power state, can not clear any error. */
  62. v = readq(base_hdr + PORT_HDR_STS);
  63. if (FIELD_GET(PORT_STS_PWR_STATE, v) == PORT_STS_PWR_STATE_AP6) {
  64. dev_err(dev, "Could not clear errors, device in AP6 state.\n");
  65. goto done;
  66. }
  67. /* Halt Port by keeping Port in reset */
  68. ret = __afu_port_disable(pdev);
  69. if (ret)
  70. goto done;
  71. /* Mask all errors */
  72. __afu_port_err_mask(dev, true);
  73. /* Clear errors if err input matches with current port errors.*/
  74. v = readq(base_err + PORT_ERROR);
  75. if (v == err) {
  76. writeq(v, base_err + PORT_ERROR);
  77. v = readq(base_err + PORT_FIRST_ERROR);
  78. writeq(v, base_err + PORT_FIRST_ERROR);
  79. } else {
  80. dev_warn(dev, "%s: received 0x%llx, expected 0x%llx\n",
  81. __func__, v, err);
  82. ret = -EINVAL;
  83. }
  84. /* Clear mask */
  85. __afu_port_err_mask(dev, false);
  86. /* Enable the Port by clearing the reset */
  87. enable_ret = __afu_port_enable(pdev);
  88. done:
  89. mutex_unlock(&pdata->lock);
  90. return enable_ret ? enable_ret : ret;
  91. }
  92. static ssize_t errors_show(struct device *dev, struct device_attribute *attr,
  93. char *buf)
  94. {
  95. struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
  96. void __iomem *base;
  97. u64 error;
  98. base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
  99. mutex_lock(&pdata->lock);
  100. error = readq(base + PORT_ERROR);
  101. mutex_unlock(&pdata->lock);
  102. return sprintf(buf, "0x%llx\n", (unsigned long long)error);
  103. }
  104. static ssize_t errors_store(struct device *dev, struct device_attribute *attr,
  105. const char *buff, size_t count)
  106. {
  107. u64 value;
  108. int ret;
  109. if (kstrtou64(buff, 0, &value))
  110. return -EINVAL;
  111. ret = afu_port_err_clear(dev, value);
  112. return ret ? ret : count;
  113. }
  114. static DEVICE_ATTR_RW(errors);
  115. static ssize_t first_error_show(struct device *dev,
  116. struct device_attribute *attr, char *buf)
  117. {
  118. struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
  119. void __iomem *base;
  120. u64 error;
  121. base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
  122. mutex_lock(&pdata->lock);
  123. error = readq(base + PORT_FIRST_ERROR);
  124. mutex_unlock(&pdata->lock);
  125. return sprintf(buf, "0x%llx\n", (unsigned long long)error);
  126. }
  127. static DEVICE_ATTR_RO(first_error);
  128. static ssize_t first_malformed_req_show(struct device *dev,
  129. struct device_attribute *attr,
  130. char *buf)
  131. {
  132. struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
  133. void __iomem *base;
  134. u64 req0, req1;
  135. base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
  136. mutex_lock(&pdata->lock);
  137. req0 = readq(base + PORT_MALFORMED_REQ0);
  138. req1 = readq(base + PORT_MALFORMED_REQ1);
  139. mutex_unlock(&pdata->lock);
  140. return sprintf(buf, "0x%016llx%016llx\n",
  141. (unsigned long long)req1, (unsigned long long)req0);
  142. }
  143. static DEVICE_ATTR_RO(first_malformed_req);
  144. static struct attribute *port_err_attrs[] = {
  145. &dev_attr_errors.attr,
  146. &dev_attr_first_error.attr,
  147. &dev_attr_first_malformed_req.attr,
  148. NULL,
  149. };
  150. static umode_t port_err_attrs_visible(struct kobject *kobj,
  151. struct attribute *attr, int n)
  152. {
  153. struct device *dev = kobj_to_dev(kobj);
  154. /*
  155. * sysfs entries are visible only if related private feature is
  156. * enumerated.
  157. */
  158. if (!dfl_get_feature_by_id(dev, PORT_FEATURE_ID_ERROR))
  159. return 0;
  160. return attr->mode;
  161. }
  162. const struct attribute_group port_err_group = {
  163. .name = "errors",
  164. .attrs = port_err_attrs,
  165. .is_visible = port_err_attrs_visible,
  166. };
  167. static int port_err_init(struct platform_device *pdev,
  168. struct dfl_feature *feature)
  169. {
  170. afu_port_err_mask(&pdev->dev, false);
  171. return 0;
  172. }
  173. static void port_err_uinit(struct platform_device *pdev,
  174. struct dfl_feature *feature)
  175. {
  176. afu_port_err_mask(&pdev->dev, true);
  177. }
  178. static long
  179. port_err_ioctl(struct platform_device *pdev, struct dfl_feature *feature,
  180. unsigned int cmd, unsigned long arg)
  181. {
  182. switch (cmd) {
  183. case DFL_FPGA_PORT_ERR_GET_IRQ_NUM:
  184. return dfl_feature_ioctl_get_num_irqs(pdev, feature, arg);
  185. case DFL_FPGA_PORT_ERR_SET_IRQ:
  186. return dfl_feature_ioctl_set_irq(pdev, feature, arg);
  187. default:
  188. dev_dbg(&pdev->dev, "%x cmd not handled", cmd);
  189. return -ENODEV;
  190. }
  191. }
  192. const struct dfl_feature_id port_err_id_table[] = {
  193. {.id = PORT_FEATURE_ID_ERROR,},
  194. {0,}
  195. };
  196. const struct dfl_feature_ops port_err_ops = {
  197. .init = port_err_init,
  198. .uinit = port_err_uinit,
  199. .ioctl = port_err_ioctl,
  200. };