request.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. // Copyright (c) 2021 Intel Corporation
  3. #include <linux/bug.h>
  4. #include <linux/export.h>
  5. #include <linux/pci.h>
  6. #include <linux/peci.h>
  7. #include <linux/slab.h>
  8. #include <linux/types.h>
  9. #include <asm/unaligned.h>
  10. #include "internal.h"
  11. #define PECI_GET_DIB_CMD 0xf7
  12. #define PECI_GET_DIB_WR_LEN 1
  13. #define PECI_GET_DIB_RD_LEN 8
  14. #define PECI_GET_TEMP_CMD 0x01
  15. #define PECI_GET_TEMP_WR_LEN 1
  16. #define PECI_GET_TEMP_RD_LEN 2
  17. #define PECI_RDPKGCFG_CMD 0xa1
  18. #define PECI_RDPKGCFG_WR_LEN 5
  19. #define PECI_RDPKGCFG_RD_LEN_BASE 1
  20. #define PECI_WRPKGCFG_CMD 0xa5
  21. #define PECI_WRPKGCFG_WR_LEN_BASE 6
  22. #define PECI_WRPKGCFG_RD_LEN 1
  23. #define PECI_RDIAMSR_CMD 0xb1
  24. #define PECI_RDIAMSR_WR_LEN 5
  25. #define PECI_RDIAMSR_RD_LEN 9
  26. #define PECI_WRIAMSR_CMD 0xb5
  27. #define PECI_RDIAMSREX_CMD 0xd1
  28. #define PECI_RDIAMSREX_WR_LEN 6
  29. #define PECI_RDIAMSREX_RD_LEN 9
  30. #define PECI_RDPCICFG_CMD 0x61
  31. #define PECI_RDPCICFG_WR_LEN 6
  32. #define PECI_RDPCICFG_RD_LEN 5
  33. #define PECI_RDPCICFG_RD_LEN_MAX 24
  34. #define PECI_WRPCICFG_CMD 0x65
  35. #define PECI_RDPCICFGLOCAL_CMD 0xe1
  36. #define PECI_RDPCICFGLOCAL_WR_LEN 5
  37. #define PECI_RDPCICFGLOCAL_RD_LEN_BASE 1
  38. #define PECI_WRPCICFGLOCAL_CMD 0xe5
  39. #define PECI_WRPCICFGLOCAL_WR_LEN_BASE 6
  40. #define PECI_WRPCICFGLOCAL_RD_LEN 1
  41. #define PECI_ENDPTCFG_TYPE_LOCAL_PCI 0x03
  42. #define PECI_ENDPTCFG_TYPE_PCI 0x04
  43. #define PECI_ENDPTCFG_TYPE_MMIO 0x05
  44. #define PECI_ENDPTCFG_ADDR_TYPE_PCI 0x04
  45. #define PECI_ENDPTCFG_ADDR_TYPE_MMIO_D 0x05
  46. #define PECI_ENDPTCFG_ADDR_TYPE_MMIO_Q 0x06
  47. #define PECI_RDENDPTCFG_CMD 0xc1
  48. #define PECI_RDENDPTCFG_PCI_WR_LEN 12
  49. #define PECI_RDENDPTCFG_MMIO_WR_LEN_BASE 10
  50. #define PECI_RDENDPTCFG_MMIO_D_WR_LEN 14
  51. #define PECI_RDENDPTCFG_MMIO_Q_WR_LEN 18
  52. #define PECI_RDENDPTCFG_RD_LEN_BASE 1
  53. #define PECI_WRENDPTCFG_CMD 0xc5
  54. #define PECI_WRENDPTCFG_PCI_WR_LEN_BASE 13
  55. #define PECI_WRENDPTCFG_MMIO_D_WR_LEN_BASE 15
  56. #define PECI_WRENDPTCFG_MMIO_Q_WR_LEN_BASE 19
  57. #define PECI_WRENDPTCFG_RD_LEN 1
  58. /* Device Specific Completion Code (CC) Definition */
  59. #define PECI_CC_SUCCESS 0x40
  60. #define PECI_CC_NEED_RETRY 0x80
  61. #define PECI_CC_OUT_OF_RESOURCE 0x81
  62. #define PECI_CC_UNAVAIL_RESOURCE 0x82
  63. #define PECI_CC_INVALID_REQ 0x90
  64. #define PECI_CC_MCA_ERROR 0x91
  65. #define PECI_CC_CATASTROPHIC_MCA_ERROR 0x93
  66. #define PECI_CC_FATAL_MCA_ERROR 0x94
  67. #define PECI_CC_PARITY_ERR_GPSB_OR_PMSB 0x98
  68. #define PECI_CC_PARITY_ERR_GPSB_OR_PMSB_IERR 0x9B
  69. #define PECI_CC_PARITY_ERR_GPSB_OR_PMSB_MCA 0x9C
  70. #define PECI_RETRY_BIT BIT(0)
  71. #define PECI_RETRY_TIMEOUT msecs_to_jiffies(700)
  72. #define PECI_RETRY_INTERVAL_MIN msecs_to_jiffies(1)
  73. #define PECI_RETRY_INTERVAL_MAX msecs_to_jiffies(128)
  74. static u8 peci_request_data_cc(struct peci_request *req)
  75. {
  76. return req->rx.buf[0];
  77. }
  78. /**
  79. * peci_request_status() - return -errno based on PECI completion code
  80. * @req: the PECI request that contains response data with completion code
  81. *
  82. * It can't be used for Ping(), GetDIB() and GetTemp() - for those commands we
  83. * don't expect completion code in the response.
  84. *
  85. * Return: -errno
  86. */
  87. int peci_request_status(struct peci_request *req)
  88. {
  89. u8 cc = peci_request_data_cc(req);
  90. if (cc != PECI_CC_SUCCESS)
  91. dev_dbg(&req->device->dev, "ret: %#02x\n", cc);
  92. switch (cc) {
  93. case PECI_CC_SUCCESS:
  94. return 0;
  95. case PECI_CC_NEED_RETRY:
  96. case PECI_CC_OUT_OF_RESOURCE:
  97. case PECI_CC_UNAVAIL_RESOURCE:
  98. return -EAGAIN;
  99. case PECI_CC_INVALID_REQ:
  100. return -EINVAL;
  101. case PECI_CC_MCA_ERROR:
  102. case PECI_CC_CATASTROPHIC_MCA_ERROR:
  103. case PECI_CC_FATAL_MCA_ERROR:
  104. case PECI_CC_PARITY_ERR_GPSB_OR_PMSB:
  105. case PECI_CC_PARITY_ERR_GPSB_OR_PMSB_IERR:
  106. case PECI_CC_PARITY_ERR_GPSB_OR_PMSB_MCA:
  107. return -EIO;
  108. }
  109. WARN_ONCE(1, "Unknown PECI completion code: %#02x\n", cc);
  110. return -EIO;
  111. }
  112. EXPORT_SYMBOL_NS_GPL(peci_request_status, PECI);
  113. static int peci_request_xfer(struct peci_request *req)
  114. {
  115. struct peci_device *device = req->device;
  116. struct peci_controller *controller = to_peci_controller(device->dev.parent);
  117. int ret;
  118. mutex_lock(&controller->bus_lock);
  119. ret = controller->ops->xfer(controller, device->addr, req);
  120. mutex_unlock(&controller->bus_lock);
  121. return ret;
  122. }
  123. static int peci_request_xfer_retry(struct peci_request *req)
  124. {
  125. long wait_interval = PECI_RETRY_INTERVAL_MIN;
  126. struct peci_device *device = req->device;
  127. struct peci_controller *controller = to_peci_controller(device->dev.parent);
  128. unsigned long start = jiffies;
  129. int ret;
  130. /* Don't try to use it for ping */
  131. if (WARN_ON(req->tx.len == 0))
  132. return 0;
  133. do {
  134. ret = peci_request_xfer(req);
  135. if (ret) {
  136. dev_dbg(&controller->dev, "xfer error: %d\n", ret);
  137. return ret;
  138. }
  139. if (peci_request_status(req) != -EAGAIN)
  140. return 0;
  141. /* Set the retry bit to indicate a retry attempt */
  142. req->tx.buf[1] |= PECI_RETRY_BIT;
  143. if (schedule_timeout_interruptible(wait_interval))
  144. return -ERESTARTSYS;
  145. wait_interval = min_t(long, wait_interval * 2, PECI_RETRY_INTERVAL_MAX);
  146. } while (time_before(jiffies, start + PECI_RETRY_TIMEOUT));
  147. dev_dbg(&controller->dev, "request timed out\n");
  148. return -ETIMEDOUT;
  149. }
  150. /**
  151. * peci_request_alloc() - allocate &struct peci_requests
  152. * @device: PECI device to which request is going to be sent
  153. * @tx_len: TX length
  154. * @rx_len: RX length
  155. *
  156. * Return: A pointer to a newly allocated &struct peci_request on success or NULL otherwise.
  157. */
  158. struct peci_request *peci_request_alloc(struct peci_device *device, u8 tx_len, u8 rx_len)
  159. {
  160. struct peci_request *req;
  161. /*
  162. * TX and RX buffers are fixed length members of peci_request, this is
  163. * just a warn for developers to make sure to expand the buffers (or
  164. * change the allocation method) if we go over the current limit.
  165. */
  166. if (WARN_ON_ONCE(tx_len > PECI_REQUEST_MAX_BUF_SIZE || rx_len > PECI_REQUEST_MAX_BUF_SIZE))
  167. return NULL;
  168. /*
  169. * PECI controllers that we are using now don't support DMA, this
  170. * should be converted to DMA API once support for controllers that do
  171. * allow it is added to avoid an extra copy.
  172. */
  173. req = kzalloc(sizeof(*req), GFP_KERNEL);
  174. if (!req)
  175. return NULL;
  176. req->device = device;
  177. req->tx.len = tx_len;
  178. req->rx.len = rx_len;
  179. return req;
  180. }
  181. EXPORT_SYMBOL_NS_GPL(peci_request_alloc, PECI);
  182. /**
  183. * peci_request_free() - free peci_request
  184. * @req: the PECI request to be freed
  185. */
  186. void peci_request_free(struct peci_request *req)
  187. {
  188. kfree(req);
  189. }
  190. EXPORT_SYMBOL_NS_GPL(peci_request_free, PECI);
  191. struct peci_request *peci_xfer_get_dib(struct peci_device *device)
  192. {
  193. struct peci_request *req;
  194. int ret;
  195. req = peci_request_alloc(device, PECI_GET_DIB_WR_LEN, PECI_GET_DIB_RD_LEN);
  196. if (!req)
  197. return ERR_PTR(-ENOMEM);
  198. req->tx.buf[0] = PECI_GET_DIB_CMD;
  199. ret = peci_request_xfer(req);
  200. if (ret) {
  201. peci_request_free(req);
  202. return ERR_PTR(ret);
  203. }
  204. return req;
  205. }
  206. EXPORT_SYMBOL_NS_GPL(peci_xfer_get_dib, PECI);
  207. struct peci_request *peci_xfer_get_temp(struct peci_device *device)
  208. {
  209. struct peci_request *req;
  210. int ret;
  211. req = peci_request_alloc(device, PECI_GET_TEMP_WR_LEN, PECI_GET_TEMP_RD_LEN);
  212. if (!req)
  213. return ERR_PTR(-ENOMEM);
  214. req->tx.buf[0] = PECI_GET_TEMP_CMD;
  215. ret = peci_request_xfer(req);
  216. if (ret) {
  217. peci_request_free(req);
  218. return ERR_PTR(ret);
  219. }
  220. return req;
  221. }
  222. EXPORT_SYMBOL_NS_GPL(peci_xfer_get_temp, PECI);
  223. static struct peci_request *
  224. __pkg_cfg_read(struct peci_device *device, u8 index, u16 param, u8 len)
  225. {
  226. struct peci_request *req;
  227. int ret;
  228. req = peci_request_alloc(device, PECI_RDPKGCFG_WR_LEN, PECI_RDPKGCFG_RD_LEN_BASE + len);
  229. if (!req)
  230. return ERR_PTR(-ENOMEM);
  231. req->tx.buf[0] = PECI_RDPKGCFG_CMD;
  232. req->tx.buf[1] = 0;
  233. req->tx.buf[2] = index;
  234. put_unaligned_le16(param, &req->tx.buf[3]);
  235. ret = peci_request_xfer_retry(req);
  236. if (ret) {
  237. peci_request_free(req);
  238. return ERR_PTR(ret);
  239. }
  240. return req;
  241. }
  242. static u32 __get_pci_addr(u8 bus, u8 dev, u8 func, u16 reg)
  243. {
  244. return reg | PCI_DEVID(bus, PCI_DEVFN(dev, func)) << 12;
  245. }
  246. static struct peci_request *
  247. __pci_cfg_local_read(struct peci_device *device, u8 bus, u8 dev, u8 func, u16 reg, u8 len)
  248. {
  249. struct peci_request *req;
  250. u32 pci_addr;
  251. int ret;
  252. req = peci_request_alloc(device, PECI_RDPCICFGLOCAL_WR_LEN,
  253. PECI_RDPCICFGLOCAL_RD_LEN_BASE + len);
  254. if (!req)
  255. return ERR_PTR(-ENOMEM);
  256. pci_addr = __get_pci_addr(bus, dev, func, reg);
  257. req->tx.buf[0] = PECI_RDPCICFGLOCAL_CMD;
  258. req->tx.buf[1] = 0;
  259. put_unaligned_le24(pci_addr, &req->tx.buf[2]);
  260. ret = peci_request_xfer_retry(req);
  261. if (ret) {
  262. peci_request_free(req);
  263. return ERR_PTR(ret);
  264. }
  265. return req;
  266. }
  267. static struct peci_request *
  268. __ep_pci_cfg_read(struct peci_device *device, u8 msg_type, u8 seg,
  269. u8 bus, u8 dev, u8 func, u16 reg, u8 len)
  270. {
  271. struct peci_request *req;
  272. u32 pci_addr;
  273. int ret;
  274. req = peci_request_alloc(device, PECI_RDENDPTCFG_PCI_WR_LEN,
  275. PECI_RDENDPTCFG_RD_LEN_BASE + len);
  276. if (!req)
  277. return ERR_PTR(-ENOMEM);
  278. pci_addr = __get_pci_addr(bus, dev, func, reg);
  279. req->tx.buf[0] = PECI_RDENDPTCFG_CMD;
  280. req->tx.buf[1] = 0;
  281. req->tx.buf[2] = msg_type;
  282. req->tx.buf[3] = 0;
  283. req->tx.buf[4] = 0;
  284. req->tx.buf[5] = 0;
  285. req->tx.buf[6] = PECI_ENDPTCFG_ADDR_TYPE_PCI;
  286. req->tx.buf[7] = seg; /* PCI Segment */
  287. put_unaligned_le32(pci_addr, &req->tx.buf[8]);
  288. ret = peci_request_xfer_retry(req);
  289. if (ret) {
  290. peci_request_free(req);
  291. return ERR_PTR(ret);
  292. }
  293. return req;
  294. }
  295. static struct peci_request *
  296. __ep_mmio_read(struct peci_device *device, u8 bar, u8 addr_type, u8 seg,
  297. u8 bus, u8 dev, u8 func, u64 offset, u8 tx_len, u8 len)
  298. {
  299. struct peci_request *req;
  300. int ret;
  301. req = peci_request_alloc(device, tx_len, PECI_RDENDPTCFG_RD_LEN_BASE + len);
  302. if (!req)
  303. return ERR_PTR(-ENOMEM);
  304. req->tx.buf[0] = PECI_RDENDPTCFG_CMD;
  305. req->tx.buf[1] = 0;
  306. req->tx.buf[2] = PECI_ENDPTCFG_TYPE_MMIO;
  307. req->tx.buf[3] = 0; /* Endpoint ID */
  308. req->tx.buf[4] = 0; /* Reserved */
  309. req->tx.buf[5] = bar;
  310. req->tx.buf[6] = addr_type;
  311. req->tx.buf[7] = seg; /* PCI Segment */
  312. req->tx.buf[8] = PCI_DEVFN(dev, func);
  313. req->tx.buf[9] = bus; /* PCI Bus */
  314. if (addr_type == PECI_ENDPTCFG_ADDR_TYPE_MMIO_D)
  315. put_unaligned_le32(offset, &req->tx.buf[10]);
  316. else
  317. put_unaligned_le64(offset, &req->tx.buf[10]);
  318. ret = peci_request_xfer_retry(req);
  319. if (ret) {
  320. peci_request_free(req);
  321. return ERR_PTR(ret);
  322. }
  323. return req;
  324. }
  325. u8 peci_request_data_readb(struct peci_request *req)
  326. {
  327. return req->rx.buf[1];
  328. }
  329. EXPORT_SYMBOL_NS_GPL(peci_request_data_readb, PECI);
  330. u16 peci_request_data_readw(struct peci_request *req)
  331. {
  332. return get_unaligned_le16(&req->rx.buf[1]);
  333. }
  334. EXPORT_SYMBOL_NS_GPL(peci_request_data_readw, PECI);
  335. u32 peci_request_data_readl(struct peci_request *req)
  336. {
  337. return get_unaligned_le32(&req->rx.buf[1]);
  338. }
  339. EXPORT_SYMBOL_NS_GPL(peci_request_data_readl, PECI);
  340. u64 peci_request_data_readq(struct peci_request *req)
  341. {
  342. return get_unaligned_le64(&req->rx.buf[1]);
  343. }
  344. EXPORT_SYMBOL_NS_GPL(peci_request_data_readq, PECI);
  345. u64 peci_request_dib_read(struct peci_request *req)
  346. {
  347. return get_unaligned_le64(&req->rx.buf[0]);
  348. }
  349. EXPORT_SYMBOL_NS_GPL(peci_request_dib_read, PECI);
  350. s16 peci_request_temp_read(struct peci_request *req)
  351. {
  352. return get_unaligned_le16(&req->rx.buf[0]);
  353. }
  354. EXPORT_SYMBOL_NS_GPL(peci_request_temp_read, PECI);
  355. #define __read_pkg_config(x, type) \
  356. struct peci_request *peci_xfer_pkg_cfg_##x(struct peci_device *device, u8 index, u16 param) \
  357. { \
  358. return __pkg_cfg_read(device, index, param, sizeof(type)); \
  359. } \
  360. EXPORT_SYMBOL_NS_GPL(peci_xfer_pkg_cfg_##x, PECI)
  361. __read_pkg_config(readb, u8);
  362. __read_pkg_config(readw, u16);
  363. __read_pkg_config(readl, u32);
  364. __read_pkg_config(readq, u64);
  365. #define __read_pci_config_local(x, type) \
  366. struct peci_request * \
  367. peci_xfer_pci_cfg_local_##x(struct peci_device *device, u8 bus, u8 dev, u8 func, u16 reg) \
  368. { \
  369. return __pci_cfg_local_read(device, bus, dev, func, reg, sizeof(type)); \
  370. } \
  371. EXPORT_SYMBOL_NS_GPL(peci_xfer_pci_cfg_local_##x, PECI)
  372. __read_pci_config_local(readb, u8);
  373. __read_pci_config_local(readw, u16);
  374. __read_pci_config_local(readl, u32);
  375. #define __read_ep_pci_config(x, msg_type, type) \
  376. struct peci_request * \
  377. peci_xfer_ep_pci_cfg_##x(struct peci_device *device, u8 seg, u8 bus, u8 dev, u8 func, u16 reg) \
  378. { \
  379. return __ep_pci_cfg_read(device, msg_type, seg, bus, dev, func, reg, sizeof(type)); \
  380. } \
  381. EXPORT_SYMBOL_NS_GPL(peci_xfer_ep_pci_cfg_##x, PECI)
  382. __read_ep_pci_config(local_readb, PECI_ENDPTCFG_TYPE_LOCAL_PCI, u8);
  383. __read_ep_pci_config(local_readw, PECI_ENDPTCFG_TYPE_LOCAL_PCI, u16);
  384. __read_ep_pci_config(local_readl, PECI_ENDPTCFG_TYPE_LOCAL_PCI, u32);
  385. __read_ep_pci_config(readb, PECI_ENDPTCFG_TYPE_PCI, u8);
  386. __read_ep_pci_config(readw, PECI_ENDPTCFG_TYPE_PCI, u16);
  387. __read_ep_pci_config(readl, PECI_ENDPTCFG_TYPE_PCI, u32);
  388. #define __read_ep_mmio(x, y, addr_type, type1, type2) \
  389. struct peci_request *peci_xfer_ep_mmio##y##_##x(struct peci_device *device, u8 bar, u8 seg, \
  390. u8 bus, u8 dev, u8 func, u64 offset) \
  391. { \
  392. return __ep_mmio_read(device, bar, addr_type, seg, bus, dev, func, \
  393. offset, PECI_RDENDPTCFG_MMIO_WR_LEN_BASE + sizeof(type1), \
  394. sizeof(type2)); \
  395. } \
  396. EXPORT_SYMBOL_NS_GPL(peci_xfer_ep_mmio##y##_##x, PECI)
  397. __read_ep_mmio(readl, 32, PECI_ENDPTCFG_ADDR_TYPE_MMIO_D, u32, u32);
  398. __read_ep_mmio(readl, 64, PECI_ENDPTCFG_ADDR_TYPE_MMIO_Q, u64, u32);