dma_port.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Thunderbolt DMA configuration based mailbox support
  4. *
  5. * Copyright (C) 2017, Intel Corporation
  6. * Authors: Michael Jamet <[email protected]>
  7. * Mika Westerberg <[email protected]>
  8. */
  9. #include <linux/delay.h>
  10. #include <linux/slab.h>
  11. #include "dma_port.h"
  12. #include "tb_regs.h"
  13. #define DMA_PORT_CAP 0x3e
  14. #define MAIL_DATA 1
  15. #define MAIL_DATA_DWORDS 16
  16. #define MAIL_IN 17
  17. #define MAIL_IN_CMD_SHIFT 28
  18. #define MAIL_IN_CMD_MASK GENMASK(31, 28)
  19. #define MAIL_IN_CMD_FLASH_WRITE 0x0
  20. #define MAIL_IN_CMD_FLASH_UPDATE_AUTH 0x1
  21. #define MAIL_IN_CMD_FLASH_READ 0x2
  22. #define MAIL_IN_CMD_POWER_CYCLE 0x4
  23. #define MAIL_IN_DWORDS_SHIFT 24
  24. #define MAIL_IN_DWORDS_MASK GENMASK(27, 24)
  25. #define MAIL_IN_ADDRESS_SHIFT 2
  26. #define MAIL_IN_ADDRESS_MASK GENMASK(23, 2)
  27. #define MAIL_IN_CSS BIT(1)
  28. #define MAIL_IN_OP_REQUEST BIT(0)
  29. #define MAIL_OUT 18
  30. #define MAIL_OUT_STATUS_RESPONSE BIT(29)
  31. #define MAIL_OUT_STATUS_CMD_SHIFT 4
  32. #define MAIL_OUT_STATUS_CMD_MASK GENMASK(7, 4)
  33. #define MAIL_OUT_STATUS_MASK GENMASK(3, 0)
  34. #define MAIL_OUT_STATUS_COMPLETED 0
  35. #define MAIL_OUT_STATUS_ERR_AUTH 1
  36. #define MAIL_OUT_STATUS_ERR_ACCESS 2
  37. #define DMA_PORT_TIMEOUT 5000 /* ms */
  38. #define DMA_PORT_RETRIES 3
  39. /**
  40. * struct tb_dma_port - DMA control port
  41. * @sw: Switch the DMA port belongs to
  42. * @port: Switch port number where DMA capability is found
  43. * @base: Start offset of the mailbox registers
  44. * @buf: Temporary buffer to store a single block
  45. */
  46. struct tb_dma_port {
  47. struct tb_switch *sw;
  48. u8 port;
  49. u32 base;
  50. u8 *buf;
  51. };
  52. /*
  53. * When the switch is in safe mode it supports very little functionality
  54. * so we don't validate that much here.
  55. */
  56. static bool dma_port_match(const struct tb_cfg_request *req,
  57. const struct ctl_pkg *pkg)
  58. {
  59. u64 route = tb_cfg_get_route(pkg->buffer) & ~BIT_ULL(63);
  60. if (pkg->frame.eof == TB_CFG_PKG_ERROR)
  61. return true;
  62. if (pkg->frame.eof != req->response_type)
  63. return false;
  64. if (route != tb_cfg_get_route(req->request))
  65. return false;
  66. if (pkg->frame.size != req->response_size)
  67. return false;
  68. return true;
  69. }
  70. static bool dma_port_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
  71. {
  72. memcpy(req->response, pkg->buffer, req->response_size);
  73. return true;
  74. }
  75. static int dma_port_read(struct tb_ctl *ctl, void *buffer, u64 route,
  76. u32 port, u32 offset, u32 length, int timeout_msec)
  77. {
  78. struct cfg_read_pkg request = {
  79. .header = tb_cfg_make_header(route),
  80. .addr = {
  81. .seq = 1,
  82. .port = port,
  83. .space = TB_CFG_PORT,
  84. .offset = offset,
  85. .length = length,
  86. },
  87. };
  88. struct tb_cfg_request *req;
  89. struct cfg_write_pkg reply;
  90. struct tb_cfg_result res;
  91. req = tb_cfg_request_alloc();
  92. if (!req)
  93. return -ENOMEM;
  94. req->match = dma_port_match;
  95. req->copy = dma_port_copy;
  96. req->request = &request;
  97. req->request_size = sizeof(request);
  98. req->request_type = TB_CFG_PKG_READ;
  99. req->response = &reply;
  100. req->response_size = 12 + 4 * length;
  101. req->response_type = TB_CFG_PKG_READ;
  102. res = tb_cfg_request_sync(ctl, req, timeout_msec);
  103. tb_cfg_request_put(req);
  104. if (res.err)
  105. return res.err;
  106. memcpy(buffer, &reply.data, 4 * length);
  107. return 0;
  108. }
  109. static int dma_port_write(struct tb_ctl *ctl, const void *buffer, u64 route,
  110. u32 port, u32 offset, u32 length, int timeout_msec)
  111. {
  112. struct cfg_write_pkg request = {
  113. .header = tb_cfg_make_header(route),
  114. .addr = {
  115. .seq = 1,
  116. .port = port,
  117. .space = TB_CFG_PORT,
  118. .offset = offset,
  119. .length = length,
  120. },
  121. };
  122. struct tb_cfg_request *req;
  123. struct cfg_read_pkg reply;
  124. struct tb_cfg_result res;
  125. memcpy(&request.data, buffer, length * 4);
  126. req = tb_cfg_request_alloc();
  127. if (!req)
  128. return -ENOMEM;
  129. req->match = dma_port_match;
  130. req->copy = dma_port_copy;
  131. req->request = &request;
  132. req->request_size = 12 + 4 * length;
  133. req->request_type = TB_CFG_PKG_WRITE;
  134. req->response = &reply;
  135. req->response_size = sizeof(reply);
  136. req->response_type = TB_CFG_PKG_WRITE;
  137. res = tb_cfg_request_sync(ctl, req, timeout_msec);
  138. tb_cfg_request_put(req);
  139. return res.err;
  140. }
  141. static int dma_find_port(struct tb_switch *sw)
  142. {
  143. static const int ports[] = { 3, 5, 7 };
  144. int i;
  145. /*
  146. * The DMA (NHI) port is either 3, 5 or 7 depending on the
  147. * controller. Try all of them.
  148. */
  149. for (i = 0; i < ARRAY_SIZE(ports); i++) {
  150. u32 type;
  151. int ret;
  152. ret = dma_port_read(sw->tb->ctl, &type, tb_route(sw), ports[i],
  153. 2, 1, DMA_PORT_TIMEOUT);
  154. if (!ret && (type & 0xffffff) == TB_TYPE_NHI)
  155. return ports[i];
  156. }
  157. return -ENODEV;
  158. }
  159. /**
  160. * dma_port_alloc() - Finds DMA control port from a switch pointed by route
  161. * @sw: Switch from where find the DMA port
  162. *
  163. * Function checks if the switch NHI port supports DMA configuration
  164. * based mailbox capability and if it does, allocates and initializes
  165. * DMA port structure. Returns %NULL if the capabity was not found.
  166. *
  167. * The DMA control port is functional also when the switch is in safe
  168. * mode.
  169. */
  170. struct tb_dma_port *dma_port_alloc(struct tb_switch *sw)
  171. {
  172. struct tb_dma_port *dma;
  173. int port;
  174. port = dma_find_port(sw);
  175. if (port < 0)
  176. return NULL;
  177. dma = kzalloc(sizeof(*dma), GFP_KERNEL);
  178. if (!dma)
  179. return NULL;
  180. dma->buf = kmalloc_array(MAIL_DATA_DWORDS, sizeof(u32), GFP_KERNEL);
  181. if (!dma->buf) {
  182. kfree(dma);
  183. return NULL;
  184. }
  185. dma->sw = sw;
  186. dma->port = port;
  187. dma->base = DMA_PORT_CAP;
  188. return dma;
  189. }
  190. /**
  191. * dma_port_free() - Release DMA control port structure
  192. * @dma: DMA control port
  193. */
  194. void dma_port_free(struct tb_dma_port *dma)
  195. {
  196. if (dma) {
  197. kfree(dma->buf);
  198. kfree(dma);
  199. }
  200. }
  201. static int dma_port_wait_for_completion(struct tb_dma_port *dma,
  202. unsigned int timeout)
  203. {
  204. unsigned long end = jiffies + msecs_to_jiffies(timeout);
  205. struct tb_switch *sw = dma->sw;
  206. do {
  207. int ret;
  208. u32 in;
  209. ret = dma_port_read(sw->tb->ctl, &in, tb_route(sw), dma->port,
  210. dma->base + MAIL_IN, 1, 50);
  211. if (ret) {
  212. if (ret != -ETIMEDOUT)
  213. return ret;
  214. } else if (!(in & MAIL_IN_OP_REQUEST)) {
  215. return 0;
  216. }
  217. usleep_range(50, 100);
  218. } while (time_before(jiffies, end));
  219. return -ETIMEDOUT;
  220. }
  221. static int status_to_errno(u32 status)
  222. {
  223. switch (status & MAIL_OUT_STATUS_MASK) {
  224. case MAIL_OUT_STATUS_COMPLETED:
  225. return 0;
  226. case MAIL_OUT_STATUS_ERR_AUTH:
  227. return -EINVAL;
  228. case MAIL_OUT_STATUS_ERR_ACCESS:
  229. return -EACCES;
  230. }
  231. return -EIO;
  232. }
  233. static int dma_port_request(struct tb_dma_port *dma, u32 in,
  234. unsigned int timeout)
  235. {
  236. struct tb_switch *sw = dma->sw;
  237. u32 out;
  238. int ret;
  239. ret = dma_port_write(sw->tb->ctl, &in, tb_route(sw), dma->port,
  240. dma->base + MAIL_IN, 1, DMA_PORT_TIMEOUT);
  241. if (ret)
  242. return ret;
  243. ret = dma_port_wait_for_completion(dma, timeout);
  244. if (ret)
  245. return ret;
  246. ret = dma_port_read(sw->tb->ctl, &out, tb_route(sw), dma->port,
  247. dma->base + MAIL_OUT, 1, DMA_PORT_TIMEOUT);
  248. if (ret)
  249. return ret;
  250. return status_to_errno(out);
  251. }
  252. static int dma_port_flash_read_block(void *data, unsigned int dwaddress,
  253. void *buf, size_t dwords)
  254. {
  255. struct tb_dma_port *dma = data;
  256. struct tb_switch *sw = dma->sw;
  257. int ret;
  258. u32 in;
  259. in = MAIL_IN_CMD_FLASH_READ << MAIL_IN_CMD_SHIFT;
  260. if (dwords < MAIL_DATA_DWORDS)
  261. in |= (dwords << MAIL_IN_DWORDS_SHIFT) & MAIL_IN_DWORDS_MASK;
  262. in |= (dwaddress << MAIL_IN_ADDRESS_SHIFT) & MAIL_IN_ADDRESS_MASK;
  263. in |= MAIL_IN_OP_REQUEST;
  264. ret = dma_port_request(dma, in, DMA_PORT_TIMEOUT);
  265. if (ret)
  266. return ret;
  267. return dma_port_read(sw->tb->ctl, buf, tb_route(sw), dma->port,
  268. dma->base + MAIL_DATA, dwords, DMA_PORT_TIMEOUT);
  269. }
  270. static int dma_port_flash_write_block(void *data, unsigned int dwaddress,
  271. const void *buf, size_t dwords)
  272. {
  273. struct tb_dma_port *dma = data;
  274. struct tb_switch *sw = dma->sw;
  275. int ret;
  276. u32 in;
  277. /* Write the block to MAIL_DATA registers */
  278. ret = dma_port_write(sw->tb->ctl, buf, tb_route(sw), dma->port,
  279. dma->base + MAIL_DATA, dwords, DMA_PORT_TIMEOUT);
  280. if (ret)
  281. return ret;
  282. in = MAIL_IN_CMD_FLASH_WRITE << MAIL_IN_CMD_SHIFT;
  283. /* CSS header write is always done to the same magic address */
  284. if (dwaddress >= DMA_PORT_CSS_ADDRESS)
  285. in |= MAIL_IN_CSS;
  286. in |= ((dwords - 1) << MAIL_IN_DWORDS_SHIFT) & MAIL_IN_DWORDS_MASK;
  287. in |= (dwaddress << MAIL_IN_ADDRESS_SHIFT) & MAIL_IN_ADDRESS_MASK;
  288. in |= MAIL_IN_OP_REQUEST;
  289. return dma_port_request(dma, in, DMA_PORT_TIMEOUT);
  290. }
  291. /**
  292. * dma_port_flash_read() - Read from active flash region
  293. * @dma: DMA control port
  294. * @address: Address relative to the start of active region
  295. * @buf: Buffer where the data is read
  296. * @size: Size of the buffer
  297. */
  298. int dma_port_flash_read(struct tb_dma_port *dma, unsigned int address,
  299. void *buf, size_t size)
  300. {
  301. return tb_nvm_read_data(address, buf, size, DMA_PORT_RETRIES,
  302. dma_port_flash_read_block, dma);
  303. }
  304. /**
  305. * dma_port_flash_write() - Write to non-active flash region
  306. * @dma: DMA control port
  307. * @address: Address relative to the start of non-active region
  308. * @buf: Data to write
  309. * @size: Size of the buffer
  310. *
  311. * Writes block of data to the non-active flash region of the switch. If
  312. * the address is given as %DMA_PORT_CSS_ADDRESS the block is written
  313. * using CSS command.
  314. */
  315. int dma_port_flash_write(struct tb_dma_port *dma, unsigned int address,
  316. const void *buf, size_t size)
  317. {
  318. if (address >= DMA_PORT_CSS_ADDRESS && size > DMA_PORT_CSS_MAX_SIZE)
  319. return -E2BIG;
  320. return tb_nvm_write_data(address, buf, size, DMA_PORT_RETRIES,
  321. dma_port_flash_write_block, dma);
  322. }
  323. /**
  324. * dma_port_flash_update_auth() - Starts flash authenticate cycle
  325. * @dma: DMA control port
  326. *
  327. * Starts the flash update authentication cycle. If the image in the
  328. * non-active area was valid, the switch starts upgrade process where
  329. * active and non-active area get swapped in the end. Caller should call
  330. * dma_port_flash_update_auth_status() to get status of this command.
  331. * This is because if the switch in question is root switch the
  332. * thunderbolt host controller gets reset as well.
  333. */
  334. int dma_port_flash_update_auth(struct tb_dma_port *dma)
  335. {
  336. u32 in;
  337. in = MAIL_IN_CMD_FLASH_UPDATE_AUTH << MAIL_IN_CMD_SHIFT;
  338. in |= MAIL_IN_OP_REQUEST;
  339. return dma_port_request(dma, in, 150);
  340. }
  341. /**
  342. * dma_port_flash_update_auth_status() - Reads status of update auth command
  343. * @dma: DMA control port
  344. * @status: Status code of the operation
  345. *
  346. * The function checks if there is status available from the last update
  347. * auth command. Returns %0 if there is no status and no further
  348. * action is required. If there is status, %1 is returned instead and
  349. * @status holds the failure code.
  350. *
  351. * Negative return means there was an error reading status from the
  352. * switch.
  353. */
  354. int dma_port_flash_update_auth_status(struct tb_dma_port *dma, u32 *status)
  355. {
  356. struct tb_switch *sw = dma->sw;
  357. u32 out, cmd;
  358. int ret;
  359. ret = dma_port_read(sw->tb->ctl, &out, tb_route(sw), dma->port,
  360. dma->base + MAIL_OUT, 1, DMA_PORT_TIMEOUT);
  361. if (ret)
  362. return ret;
  363. /* Check if the status relates to flash update auth */
  364. cmd = (out & MAIL_OUT_STATUS_CMD_MASK) >> MAIL_OUT_STATUS_CMD_SHIFT;
  365. if (cmd == MAIL_IN_CMD_FLASH_UPDATE_AUTH) {
  366. if (status)
  367. *status = out & MAIL_OUT_STATUS_MASK;
  368. /* Reset is needed in any case */
  369. return 1;
  370. }
  371. return 0;
  372. }
  373. /**
  374. * dma_port_power_cycle() - Power cycles the switch
  375. * @dma: DMA control port
  376. *
  377. * Triggers power cycle to the switch.
  378. */
  379. int dma_port_power_cycle(struct tb_dma_port *dma)
  380. {
  381. u32 in;
  382. in = MAIL_IN_CMD_POWER_CYCLE << MAIL_IN_CMD_SHIFT;
  383. in |= MAIL_IN_OP_REQUEST;
  384. return dma_port_request(dma, in, 150);
  385. }