ctl.c 27 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Thunderbolt driver - control channel and configuration commands
  4. *
  5. * Copyright (c) 2014 Andreas Noever <[email protected]>
  6. * Copyright (C) 2018, Intel Corporation
  7. */
  8. #include <linux/crc32.h>
  9. #include <linux/delay.h>
  10. #include <linux/slab.h>
  11. #include <linux/pci.h>
  12. #include <linux/dmapool.h>
  13. #include <linux/workqueue.h>
  14. #include "ctl.h"
  15. #define TB_CTL_RX_PKG_COUNT 10
  16. #define TB_CTL_RETRIES 4
  17. /**
  18. * struct tb_ctl - Thunderbolt control channel
  19. * @nhi: Pointer to the NHI structure
  20. * @tx: Transmit ring
  21. * @rx: Receive ring
  22. * @frame_pool: DMA pool for control messages
  23. * @rx_packets: Received control messages
  24. * @request_queue_lock: Lock protecting @request_queue
  25. * @request_queue: List of outstanding requests
  26. * @running: Is the control channel running at the moment
  27. * @timeout_msec: Default timeout for non-raw control messages
  28. * @callback: Callback called when hotplug message is received
  29. * @callback_data: Data passed to @callback
  30. */
  31. struct tb_ctl {
  32. struct tb_nhi *nhi;
  33. struct tb_ring *tx;
  34. struct tb_ring *rx;
  35. struct dma_pool *frame_pool;
  36. struct ctl_pkg *rx_packets[TB_CTL_RX_PKG_COUNT];
  37. struct mutex request_queue_lock;
  38. struct list_head request_queue;
  39. bool running;
  40. int timeout_msec;
  41. event_cb callback;
  42. void *callback_data;
  43. };
  44. #define tb_ctl_WARN(ctl, format, arg...) \
  45. dev_WARN(&(ctl)->nhi->pdev->dev, format, ## arg)
  46. #define tb_ctl_err(ctl, format, arg...) \
  47. dev_err(&(ctl)->nhi->pdev->dev, format, ## arg)
  48. #define tb_ctl_warn(ctl, format, arg...) \
  49. dev_warn(&(ctl)->nhi->pdev->dev, format, ## arg)
  50. #define tb_ctl_info(ctl, format, arg...) \
  51. dev_info(&(ctl)->nhi->pdev->dev, format, ## arg)
  52. #define tb_ctl_dbg(ctl, format, arg...) \
  53. dev_dbg(&(ctl)->nhi->pdev->dev, format, ## arg)
  54. static DECLARE_WAIT_QUEUE_HEAD(tb_cfg_request_cancel_queue);
  55. /* Serializes access to request kref_get/put */
  56. static DEFINE_MUTEX(tb_cfg_request_lock);
  57. /**
  58. * tb_cfg_request_alloc() - Allocates a new config request
  59. *
  60. * This is refcounted object so when you are done with this, call
  61. * tb_cfg_request_put() to it.
  62. */
  63. struct tb_cfg_request *tb_cfg_request_alloc(void)
  64. {
  65. struct tb_cfg_request *req;
  66. req = kzalloc(sizeof(*req), GFP_KERNEL);
  67. if (!req)
  68. return NULL;
  69. kref_init(&req->kref);
  70. return req;
  71. }
  72. /**
  73. * tb_cfg_request_get() - Increase refcount of a request
  74. * @req: Request whose refcount is increased
  75. */
  76. void tb_cfg_request_get(struct tb_cfg_request *req)
  77. {
  78. mutex_lock(&tb_cfg_request_lock);
  79. kref_get(&req->kref);
  80. mutex_unlock(&tb_cfg_request_lock);
  81. }
  82. static void tb_cfg_request_destroy(struct kref *kref)
  83. {
  84. struct tb_cfg_request *req = container_of(kref, typeof(*req), kref);
  85. kfree(req);
  86. }
  87. /**
  88. * tb_cfg_request_put() - Decrease refcount and possibly release the request
  89. * @req: Request whose refcount is decreased
  90. *
  91. * Call this function when you are done with the request. When refcount
  92. * goes to %0 the object is released.
  93. */
  94. void tb_cfg_request_put(struct tb_cfg_request *req)
  95. {
  96. mutex_lock(&tb_cfg_request_lock);
  97. kref_put(&req->kref, tb_cfg_request_destroy);
  98. mutex_unlock(&tb_cfg_request_lock);
  99. }
  100. static int tb_cfg_request_enqueue(struct tb_ctl *ctl,
  101. struct tb_cfg_request *req)
  102. {
  103. WARN_ON(test_bit(TB_CFG_REQUEST_ACTIVE, &req->flags));
  104. WARN_ON(req->ctl);
  105. mutex_lock(&ctl->request_queue_lock);
  106. if (!ctl->running) {
  107. mutex_unlock(&ctl->request_queue_lock);
  108. return -ENOTCONN;
  109. }
  110. req->ctl = ctl;
  111. list_add_tail(&req->list, &ctl->request_queue);
  112. set_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
  113. mutex_unlock(&ctl->request_queue_lock);
  114. return 0;
  115. }
  116. static void tb_cfg_request_dequeue(struct tb_cfg_request *req)
  117. {
  118. struct tb_ctl *ctl = req->ctl;
  119. mutex_lock(&ctl->request_queue_lock);
  120. list_del(&req->list);
  121. clear_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
  122. if (test_bit(TB_CFG_REQUEST_CANCELED, &req->flags))
  123. wake_up(&tb_cfg_request_cancel_queue);
  124. mutex_unlock(&ctl->request_queue_lock);
  125. }
  126. static bool tb_cfg_request_is_active(struct tb_cfg_request *req)
  127. {
  128. return test_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
  129. }
  130. static struct tb_cfg_request *
  131. tb_cfg_request_find(struct tb_ctl *ctl, struct ctl_pkg *pkg)
  132. {
  133. struct tb_cfg_request *req = NULL, *iter;
  134. mutex_lock(&pkg->ctl->request_queue_lock);
  135. list_for_each_entry(iter, &pkg->ctl->request_queue, list) {
  136. tb_cfg_request_get(iter);
  137. if (iter->match(iter, pkg)) {
  138. req = iter;
  139. break;
  140. }
  141. tb_cfg_request_put(iter);
  142. }
  143. mutex_unlock(&pkg->ctl->request_queue_lock);
  144. return req;
  145. }
  146. /* utility functions */
  147. static int check_header(const struct ctl_pkg *pkg, u32 len,
  148. enum tb_cfg_pkg_type type, u64 route)
  149. {
  150. struct tb_cfg_header *header = pkg->buffer;
  151. /* check frame, TODO: frame flags */
  152. if (WARN(len != pkg->frame.size,
  153. "wrong framesize (expected %#x, got %#x)\n",
  154. len, pkg->frame.size))
  155. return -EIO;
  156. if (WARN(type != pkg->frame.eof, "wrong eof (expected %#x, got %#x)\n",
  157. type, pkg->frame.eof))
  158. return -EIO;
  159. if (WARN(pkg->frame.sof, "wrong sof (expected 0x0, got %#x)\n",
  160. pkg->frame.sof))
  161. return -EIO;
  162. /* check header */
  163. if (WARN(header->unknown != 1 << 9,
  164. "header->unknown is %#x\n", header->unknown))
  165. return -EIO;
  166. if (WARN(route != tb_cfg_get_route(header),
  167. "wrong route (expected %llx, got %llx)",
  168. route, tb_cfg_get_route(header)))
  169. return -EIO;
  170. return 0;
  171. }
  172. static int check_config_address(struct tb_cfg_address addr,
  173. enum tb_cfg_space space, u32 offset,
  174. u32 length)
  175. {
  176. if (WARN(addr.zero, "addr.zero is %#x\n", addr.zero))
  177. return -EIO;
  178. if (WARN(space != addr.space, "wrong space (expected %x, got %x\n)",
  179. space, addr.space))
  180. return -EIO;
  181. if (WARN(offset != addr.offset, "wrong offset (expected %x, got %x\n)",
  182. offset, addr.offset))
  183. return -EIO;
  184. if (WARN(length != addr.length, "wrong space (expected %x, got %x\n)",
  185. length, addr.length))
  186. return -EIO;
  187. /*
  188. * We cannot check addr->port as it is set to the upstream port of the
  189. * sender.
  190. */
  191. return 0;
  192. }
  193. static struct tb_cfg_result decode_error(const struct ctl_pkg *response)
  194. {
  195. struct cfg_error_pkg *pkg = response->buffer;
  196. struct tb_ctl *ctl = response->ctl;
  197. struct tb_cfg_result res = { 0 };
  198. res.response_route = tb_cfg_get_route(&pkg->header);
  199. res.response_port = 0;
  200. res.err = check_header(response, sizeof(*pkg), TB_CFG_PKG_ERROR,
  201. tb_cfg_get_route(&pkg->header));
  202. if (res.err)
  203. return res;
  204. if (pkg->zero1)
  205. tb_ctl_warn(ctl, "pkg->zero1 is %#x\n", pkg->zero1);
  206. if (pkg->zero2)
  207. tb_ctl_warn(ctl, "pkg->zero2 is %#x\n", pkg->zero2);
  208. if (pkg->zero3)
  209. tb_ctl_warn(ctl, "pkg->zero3 is %#x\n", pkg->zero3);
  210. res.err = 1;
  211. res.tb_error = pkg->error;
  212. res.response_port = pkg->port;
  213. return res;
  214. }
  215. static struct tb_cfg_result parse_header(const struct ctl_pkg *pkg, u32 len,
  216. enum tb_cfg_pkg_type type, u64 route)
  217. {
  218. struct tb_cfg_header *header = pkg->buffer;
  219. struct tb_cfg_result res = { 0 };
  220. if (pkg->frame.eof == TB_CFG_PKG_ERROR)
  221. return decode_error(pkg);
  222. res.response_port = 0; /* will be updated later for cfg_read/write */
  223. res.response_route = tb_cfg_get_route(header);
  224. res.err = check_header(pkg, len, type, route);
  225. return res;
  226. }
  227. static void tb_cfg_print_error(struct tb_ctl *ctl,
  228. const struct tb_cfg_result *res)
  229. {
  230. WARN_ON(res->err != 1);
  231. switch (res->tb_error) {
  232. case TB_CFG_ERROR_PORT_NOT_CONNECTED:
  233. /* Port is not connected. This can happen during surprise
  234. * removal. Do not warn. */
  235. return;
  236. case TB_CFG_ERROR_INVALID_CONFIG_SPACE:
  237. /*
  238. * Invalid cfg_space/offset/length combination in
  239. * cfg_read/cfg_write.
  240. */
  241. tb_ctl_dbg(ctl, "%llx:%x: invalid config space or offset\n",
  242. res->response_route, res->response_port);
  243. return;
  244. case TB_CFG_ERROR_NO_SUCH_PORT:
  245. /*
  246. * - The route contains a non-existent port.
  247. * - The route contains a non-PHY port (e.g. PCIe).
  248. * - The port in cfg_read/cfg_write does not exist.
  249. */
  250. tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Invalid port\n",
  251. res->response_route, res->response_port);
  252. return;
  253. case TB_CFG_ERROR_LOOP:
  254. tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Route contains a loop\n",
  255. res->response_route, res->response_port);
  256. return;
  257. case TB_CFG_ERROR_LOCK:
  258. tb_ctl_warn(ctl, "%llx:%x: downstream port is locked\n",
  259. res->response_route, res->response_port);
  260. return;
  261. default:
  262. /* 5,6,7,9 and 11 are also valid error codes */
  263. tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Unknown error\n",
  264. res->response_route, res->response_port);
  265. return;
  266. }
  267. }
  268. static __be32 tb_crc(const void *data, size_t len)
  269. {
  270. return cpu_to_be32(~__crc32c_le(~0, data, len));
  271. }
  272. static void tb_ctl_pkg_free(struct ctl_pkg *pkg)
  273. {
  274. if (pkg) {
  275. dma_pool_free(pkg->ctl->frame_pool,
  276. pkg->buffer, pkg->frame.buffer_phy);
  277. kfree(pkg);
  278. }
  279. }
  280. static struct ctl_pkg *tb_ctl_pkg_alloc(struct tb_ctl *ctl)
  281. {
  282. struct ctl_pkg *pkg = kzalloc(sizeof(*pkg), GFP_KERNEL);
  283. if (!pkg)
  284. return NULL;
  285. pkg->ctl = ctl;
  286. pkg->buffer = dma_pool_alloc(ctl->frame_pool, GFP_KERNEL,
  287. &pkg->frame.buffer_phy);
  288. if (!pkg->buffer) {
  289. kfree(pkg);
  290. return NULL;
  291. }
  292. return pkg;
  293. }
  294. /* RX/TX handling */
  295. static void tb_ctl_tx_callback(struct tb_ring *ring, struct ring_frame *frame,
  296. bool canceled)
  297. {
  298. struct ctl_pkg *pkg = container_of(frame, typeof(*pkg), frame);
  299. tb_ctl_pkg_free(pkg);
  300. }
  301. /*
  302. * tb_cfg_tx() - transmit a packet on the control channel
  303. *
  304. * len must be a multiple of four.
  305. *
  306. * Return: Returns 0 on success or an error code on failure.
  307. */
  308. static int tb_ctl_tx(struct tb_ctl *ctl, const void *data, size_t len,
  309. enum tb_cfg_pkg_type type)
  310. {
  311. int res;
  312. struct ctl_pkg *pkg;
  313. if (len % 4 != 0) { /* required for le->be conversion */
  314. tb_ctl_WARN(ctl, "TX: invalid size: %zu\n", len);
  315. return -EINVAL;
  316. }
  317. if (len > TB_FRAME_SIZE - 4) { /* checksum is 4 bytes */
  318. tb_ctl_WARN(ctl, "TX: packet too large: %zu/%d\n",
  319. len, TB_FRAME_SIZE - 4);
  320. return -EINVAL;
  321. }
  322. pkg = tb_ctl_pkg_alloc(ctl);
  323. if (!pkg)
  324. return -ENOMEM;
  325. pkg->frame.callback = tb_ctl_tx_callback;
  326. pkg->frame.size = len + 4;
  327. pkg->frame.sof = type;
  328. pkg->frame.eof = type;
  329. cpu_to_be32_array(pkg->buffer, data, len / 4);
  330. *(__be32 *) (pkg->buffer + len) = tb_crc(pkg->buffer, len);
  331. res = tb_ring_tx(ctl->tx, &pkg->frame);
  332. if (res) /* ring is stopped */
  333. tb_ctl_pkg_free(pkg);
  334. return res;
  335. }
  336. /*
  337. * tb_ctl_handle_event() - acknowledge a plug event, invoke ctl->callback
  338. */
  339. static bool tb_ctl_handle_event(struct tb_ctl *ctl, enum tb_cfg_pkg_type type,
  340. struct ctl_pkg *pkg, size_t size)
  341. {
  342. return ctl->callback(ctl->callback_data, type, pkg->buffer, size);
  343. }
  344. static void tb_ctl_rx_submit(struct ctl_pkg *pkg)
  345. {
  346. tb_ring_rx(pkg->ctl->rx, &pkg->frame); /*
  347. * We ignore failures during stop.
  348. * All rx packets are referenced
  349. * from ctl->rx_packets, so we do
  350. * not loose them.
  351. */
  352. }
  353. static int tb_async_error(const struct ctl_pkg *pkg)
  354. {
  355. const struct cfg_error_pkg *error = pkg->buffer;
  356. if (pkg->frame.eof != TB_CFG_PKG_ERROR)
  357. return false;
  358. switch (error->error) {
  359. case TB_CFG_ERROR_LINK_ERROR:
  360. case TB_CFG_ERROR_HEC_ERROR_DETECTED:
  361. case TB_CFG_ERROR_FLOW_CONTROL_ERROR:
  362. return true;
  363. default:
  364. return false;
  365. }
  366. }
  367. static void tb_ctl_rx_callback(struct tb_ring *ring, struct ring_frame *frame,
  368. bool canceled)
  369. {
  370. struct ctl_pkg *pkg = container_of(frame, typeof(*pkg), frame);
  371. struct tb_cfg_request *req;
  372. __be32 crc32;
  373. if (canceled)
  374. return; /*
  375. * ring is stopped, packet is referenced from
  376. * ctl->rx_packets.
  377. */
  378. if (frame->size < 4 || frame->size % 4 != 0) {
  379. tb_ctl_err(pkg->ctl, "RX: invalid size %#x, dropping packet\n",
  380. frame->size);
  381. goto rx;
  382. }
  383. frame->size -= 4; /* remove checksum */
  384. crc32 = tb_crc(pkg->buffer, frame->size);
  385. be32_to_cpu_array(pkg->buffer, pkg->buffer, frame->size / 4);
  386. switch (frame->eof) {
  387. case TB_CFG_PKG_READ:
  388. case TB_CFG_PKG_WRITE:
  389. case TB_CFG_PKG_ERROR:
  390. case TB_CFG_PKG_OVERRIDE:
  391. case TB_CFG_PKG_RESET:
  392. if (*(__be32 *)(pkg->buffer + frame->size) != crc32) {
  393. tb_ctl_err(pkg->ctl,
  394. "RX: checksum mismatch, dropping packet\n");
  395. goto rx;
  396. }
  397. if (tb_async_error(pkg)) {
  398. tb_ctl_handle_event(pkg->ctl, frame->eof,
  399. pkg, frame->size);
  400. goto rx;
  401. }
  402. break;
  403. case TB_CFG_PKG_EVENT:
  404. case TB_CFG_PKG_XDOMAIN_RESP:
  405. case TB_CFG_PKG_XDOMAIN_REQ:
  406. if (*(__be32 *)(pkg->buffer + frame->size) != crc32) {
  407. tb_ctl_err(pkg->ctl,
  408. "RX: checksum mismatch, dropping packet\n");
  409. goto rx;
  410. }
  411. fallthrough;
  412. case TB_CFG_PKG_ICM_EVENT:
  413. if (tb_ctl_handle_event(pkg->ctl, frame->eof, pkg, frame->size))
  414. goto rx;
  415. break;
  416. default:
  417. break;
  418. }
  419. /*
  420. * The received packet will be processed only if there is an
  421. * active request and that the packet is what is expected. This
  422. * prevents packets such as replies coming after timeout has
  423. * triggered from messing with the active requests.
  424. */
  425. req = tb_cfg_request_find(pkg->ctl, pkg);
  426. if (req) {
  427. if (req->copy(req, pkg))
  428. schedule_work(&req->work);
  429. tb_cfg_request_put(req);
  430. }
  431. rx:
  432. tb_ctl_rx_submit(pkg);
  433. }
  434. static void tb_cfg_request_work(struct work_struct *work)
  435. {
  436. struct tb_cfg_request *req = container_of(work, typeof(*req), work);
  437. if (!test_bit(TB_CFG_REQUEST_CANCELED, &req->flags))
  438. req->callback(req->callback_data);
  439. tb_cfg_request_dequeue(req);
  440. tb_cfg_request_put(req);
  441. }
  442. /**
  443. * tb_cfg_request() - Start control request not waiting for it to complete
  444. * @ctl: Control channel to use
  445. * @req: Request to start
  446. * @callback: Callback called when the request is completed
  447. * @callback_data: Data to be passed to @callback
  448. *
  449. * This queues @req on the given control channel without waiting for it
  450. * to complete. When the request completes @callback is called.
  451. */
  452. int tb_cfg_request(struct tb_ctl *ctl, struct tb_cfg_request *req,
  453. void (*callback)(void *), void *callback_data)
  454. {
  455. int ret;
  456. req->flags = 0;
  457. req->callback = callback;
  458. req->callback_data = callback_data;
  459. INIT_WORK(&req->work, tb_cfg_request_work);
  460. INIT_LIST_HEAD(&req->list);
  461. tb_cfg_request_get(req);
  462. ret = tb_cfg_request_enqueue(ctl, req);
  463. if (ret)
  464. goto err_put;
  465. ret = tb_ctl_tx(ctl, req->request, req->request_size,
  466. req->request_type);
  467. if (ret)
  468. goto err_dequeue;
  469. if (!req->response)
  470. schedule_work(&req->work);
  471. return 0;
  472. err_dequeue:
  473. tb_cfg_request_dequeue(req);
  474. err_put:
  475. tb_cfg_request_put(req);
  476. return ret;
  477. }
  478. /**
  479. * tb_cfg_request_cancel() - Cancel a control request
  480. * @req: Request to cancel
  481. * @err: Error to assign to the request
  482. *
  483. * This function can be used to cancel ongoing request. It will wait
  484. * until the request is not active anymore.
  485. */
  486. void tb_cfg_request_cancel(struct tb_cfg_request *req, int err)
  487. {
  488. set_bit(TB_CFG_REQUEST_CANCELED, &req->flags);
  489. schedule_work(&req->work);
  490. wait_event(tb_cfg_request_cancel_queue, !tb_cfg_request_is_active(req));
  491. req->result.err = err;
  492. }
  493. static void tb_cfg_request_complete(void *data)
  494. {
  495. complete(data);
  496. }
  497. /**
  498. * tb_cfg_request_sync() - Start control request and wait until it completes
  499. * @ctl: Control channel to use
  500. * @req: Request to start
  501. * @timeout_msec: Timeout how long to wait @req to complete
  502. *
  503. * Starts a control request and waits until it completes. If timeout
  504. * triggers the request is canceled before function returns. Note the
  505. * caller needs to make sure only one message for given switch is active
  506. * at a time.
  507. */
  508. struct tb_cfg_result tb_cfg_request_sync(struct tb_ctl *ctl,
  509. struct tb_cfg_request *req,
  510. int timeout_msec)
  511. {
  512. unsigned long timeout = msecs_to_jiffies(timeout_msec);
  513. struct tb_cfg_result res = { 0 };
  514. DECLARE_COMPLETION_ONSTACK(done);
  515. int ret;
  516. ret = tb_cfg_request(ctl, req, tb_cfg_request_complete, &done);
  517. if (ret) {
  518. res.err = ret;
  519. return res;
  520. }
  521. if (!wait_for_completion_timeout(&done, timeout))
  522. tb_cfg_request_cancel(req, -ETIMEDOUT);
  523. flush_work(&req->work);
  524. return req->result;
  525. }
  526. /* public interface, alloc/start/stop/free */
  527. /**
  528. * tb_ctl_alloc() - allocate a control channel
  529. * @nhi: Pointer to NHI
  530. * @timeout_msec: Default timeout used with non-raw control messages
  531. * @cb: Callback called for plug events
  532. * @cb_data: Data passed to @cb
  533. *
  534. * cb will be invoked once for every hot plug event.
  535. *
  536. * Return: Returns a pointer on success or NULL on failure.
  537. */
  538. struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, int timeout_msec, event_cb cb,
  539. void *cb_data)
  540. {
  541. int i;
  542. struct tb_ctl *ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
  543. if (!ctl)
  544. return NULL;
  545. ctl->nhi = nhi;
  546. ctl->timeout_msec = timeout_msec;
  547. ctl->callback = cb;
  548. ctl->callback_data = cb_data;
  549. mutex_init(&ctl->request_queue_lock);
  550. INIT_LIST_HEAD(&ctl->request_queue);
  551. ctl->frame_pool = dma_pool_create("thunderbolt_ctl", &nhi->pdev->dev,
  552. TB_FRAME_SIZE, 4, 0);
  553. if (!ctl->frame_pool)
  554. goto err;
  555. ctl->tx = tb_ring_alloc_tx(nhi, 0, 10, RING_FLAG_NO_SUSPEND);
  556. if (!ctl->tx)
  557. goto err;
  558. ctl->rx = tb_ring_alloc_rx(nhi, 0, 10, RING_FLAG_NO_SUSPEND, 0, 0xffff,
  559. 0xffff, NULL, NULL);
  560. if (!ctl->rx)
  561. goto err;
  562. for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++) {
  563. ctl->rx_packets[i] = tb_ctl_pkg_alloc(ctl);
  564. if (!ctl->rx_packets[i])
  565. goto err;
  566. ctl->rx_packets[i]->frame.callback = tb_ctl_rx_callback;
  567. }
  568. tb_ctl_dbg(ctl, "control channel created\n");
  569. return ctl;
  570. err:
  571. tb_ctl_free(ctl);
  572. return NULL;
  573. }
  574. /**
  575. * tb_ctl_free() - free a control channel
  576. * @ctl: Control channel to free
  577. *
  578. * Must be called after tb_ctl_stop.
  579. *
  580. * Must NOT be called from ctl->callback.
  581. */
  582. void tb_ctl_free(struct tb_ctl *ctl)
  583. {
  584. int i;
  585. if (!ctl)
  586. return;
  587. if (ctl->rx)
  588. tb_ring_free(ctl->rx);
  589. if (ctl->tx)
  590. tb_ring_free(ctl->tx);
  591. /* free RX packets */
  592. for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++)
  593. tb_ctl_pkg_free(ctl->rx_packets[i]);
  594. dma_pool_destroy(ctl->frame_pool);
  595. kfree(ctl);
  596. }
  597. /**
  598. * tb_ctl_start() - start/resume the control channel
  599. * @ctl: Control channel to start
  600. */
  601. void tb_ctl_start(struct tb_ctl *ctl)
  602. {
  603. int i;
  604. tb_ctl_dbg(ctl, "control channel starting...\n");
  605. tb_ring_start(ctl->tx); /* is used to ack hotplug packets, start first */
  606. tb_ring_start(ctl->rx);
  607. for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++)
  608. tb_ctl_rx_submit(ctl->rx_packets[i]);
  609. ctl->running = true;
  610. }
  611. /**
  612. * tb_ctl_stop() - pause the control channel
  613. * @ctl: Control channel to stop
  614. *
  615. * All invocations of ctl->callback will have finished after this method
  616. * returns.
  617. *
  618. * Must NOT be called from ctl->callback.
  619. */
  620. void tb_ctl_stop(struct tb_ctl *ctl)
  621. {
  622. mutex_lock(&ctl->request_queue_lock);
  623. ctl->running = false;
  624. mutex_unlock(&ctl->request_queue_lock);
  625. tb_ring_stop(ctl->rx);
  626. tb_ring_stop(ctl->tx);
  627. if (!list_empty(&ctl->request_queue))
  628. tb_ctl_WARN(ctl, "dangling request in request_queue\n");
  629. INIT_LIST_HEAD(&ctl->request_queue);
  630. tb_ctl_dbg(ctl, "control channel stopped\n");
  631. }
  632. /* public interface, commands */
  633. /**
  634. * tb_cfg_ack_plug() - Ack hot plug/unplug event
  635. * @ctl: Control channel to use
  636. * @route: Router that originated the event
  637. * @port: Port where the hot plug/unplug happened
  638. * @unplug: Ack hot plug or unplug
  639. *
  640. * Call this as response for hot plug/unplug event to ack it.
  641. * Returns %0 on success or an error code on failure.
  642. */
  643. int tb_cfg_ack_plug(struct tb_ctl *ctl, u64 route, u32 port, bool unplug)
  644. {
  645. struct cfg_error_pkg pkg = {
  646. .header = tb_cfg_make_header(route),
  647. .port = port,
  648. .error = TB_CFG_ERROR_ACK_PLUG_EVENT,
  649. .pg = unplug ? TB_CFG_ERROR_PG_HOT_UNPLUG
  650. : TB_CFG_ERROR_PG_HOT_PLUG,
  651. };
  652. tb_ctl_dbg(ctl, "acking hot %splug event on %llx:%x\n",
  653. unplug ? "un" : "", route, port);
  654. return tb_ctl_tx(ctl, &pkg, sizeof(pkg), TB_CFG_PKG_ERROR);
  655. }
  656. static bool tb_cfg_match(const struct tb_cfg_request *req,
  657. const struct ctl_pkg *pkg)
  658. {
  659. u64 route = tb_cfg_get_route(pkg->buffer) & ~BIT_ULL(63);
  660. if (pkg->frame.eof == TB_CFG_PKG_ERROR)
  661. return true;
  662. if (pkg->frame.eof != req->response_type)
  663. return false;
  664. if (route != tb_cfg_get_route(req->request))
  665. return false;
  666. if (pkg->frame.size != req->response_size)
  667. return false;
  668. if (pkg->frame.eof == TB_CFG_PKG_READ ||
  669. pkg->frame.eof == TB_CFG_PKG_WRITE) {
  670. const struct cfg_read_pkg *req_hdr = req->request;
  671. const struct cfg_read_pkg *res_hdr = pkg->buffer;
  672. if (req_hdr->addr.seq != res_hdr->addr.seq)
  673. return false;
  674. }
  675. return true;
  676. }
  677. static bool tb_cfg_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
  678. {
  679. struct tb_cfg_result res;
  680. /* Now make sure it is in expected format */
  681. res = parse_header(pkg, req->response_size, req->response_type,
  682. tb_cfg_get_route(req->request));
  683. if (!res.err)
  684. memcpy(req->response, pkg->buffer, req->response_size);
  685. req->result = res;
  686. /* Always complete when first response is received */
  687. return true;
  688. }
  689. /**
  690. * tb_cfg_reset() - send a reset packet and wait for a response
  691. * @ctl: Control channel pointer
  692. * @route: Router string for the router to send reset
  693. *
  694. * If the switch at route is incorrectly configured then we will not receive a
  695. * reply (even though the switch will reset). The caller should check for
  696. * -ETIMEDOUT and attempt to reconfigure the switch.
  697. */
  698. struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route)
  699. {
  700. struct cfg_reset_pkg request = { .header = tb_cfg_make_header(route) };
  701. struct tb_cfg_result res = { 0 };
  702. struct tb_cfg_header reply;
  703. struct tb_cfg_request *req;
  704. req = tb_cfg_request_alloc();
  705. if (!req) {
  706. res.err = -ENOMEM;
  707. return res;
  708. }
  709. req->match = tb_cfg_match;
  710. req->copy = tb_cfg_copy;
  711. req->request = &request;
  712. req->request_size = sizeof(request);
  713. req->request_type = TB_CFG_PKG_RESET;
  714. req->response = &reply;
  715. req->response_size = sizeof(reply);
  716. req->response_type = TB_CFG_PKG_RESET;
  717. res = tb_cfg_request_sync(ctl, req, ctl->timeout_msec);
  718. tb_cfg_request_put(req);
  719. return res;
  720. }
  721. /**
  722. * tb_cfg_read_raw() - read from config space into buffer
  723. * @ctl: Pointer to the control channel
  724. * @buffer: Buffer where the data is read
  725. * @route: Route string of the router
  726. * @port: Port number when reading from %TB_CFG_PORT, %0 otherwise
  727. * @space: Config space selector
  728. * @offset: Dword word offset of the register to start reading
  729. * @length: Number of dwords to read
  730. * @timeout_msec: Timeout in ms how long to wait for the response
  731. *
  732. * Reads from router config space without translating the possible error.
  733. */
  734. struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer,
  735. u64 route, u32 port, enum tb_cfg_space space,
  736. u32 offset, u32 length, int timeout_msec)
  737. {
  738. struct tb_cfg_result res = { 0 };
  739. struct cfg_read_pkg request = {
  740. .header = tb_cfg_make_header(route),
  741. .addr = {
  742. .port = port,
  743. .space = space,
  744. .offset = offset,
  745. .length = length,
  746. },
  747. };
  748. struct cfg_write_pkg reply;
  749. int retries = 0;
  750. while (retries < TB_CTL_RETRIES) {
  751. struct tb_cfg_request *req;
  752. req = tb_cfg_request_alloc();
  753. if (!req) {
  754. res.err = -ENOMEM;
  755. return res;
  756. }
  757. request.addr.seq = retries++;
  758. req->match = tb_cfg_match;
  759. req->copy = tb_cfg_copy;
  760. req->request = &request;
  761. req->request_size = sizeof(request);
  762. req->request_type = TB_CFG_PKG_READ;
  763. req->response = &reply;
  764. req->response_size = 12 + 4 * length;
  765. req->response_type = TB_CFG_PKG_READ;
  766. res = tb_cfg_request_sync(ctl, req, timeout_msec);
  767. tb_cfg_request_put(req);
  768. if (res.err != -ETIMEDOUT)
  769. break;
  770. /* Wait a bit (arbitrary time) until we send a retry */
  771. usleep_range(10, 100);
  772. }
  773. if (res.err)
  774. return res;
  775. res.response_port = reply.addr.port;
  776. res.err = check_config_address(reply.addr, space, offset, length);
  777. if (!res.err)
  778. memcpy(buffer, &reply.data, 4 * length);
  779. return res;
  780. }
  781. /**
  782. * tb_cfg_write_raw() - write from buffer into config space
  783. * @ctl: Pointer to the control channel
  784. * @buffer: Data to write
  785. * @route: Route string of the router
  786. * @port: Port number when writing to %TB_CFG_PORT, %0 otherwise
  787. * @space: Config space selector
  788. * @offset: Dword word offset of the register to start writing
  789. * @length: Number of dwords to write
  790. * @timeout_msec: Timeout in ms how long to wait for the response
  791. *
  792. * Writes to router config space without translating the possible error.
  793. */
  794. struct tb_cfg_result tb_cfg_write_raw(struct tb_ctl *ctl, const void *buffer,
  795. u64 route, u32 port, enum tb_cfg_space space,
  796. u32 offset, u32 length, int timeout_msec)
  797. {
  798. struct tb_cfg_result res = { 0 };
  799. struct cfg_write_pkg request = {
  800. .header = tb_cfg_make_header(route),
  801. .addr = {
  802. .port = port,
  803. .space = space,
  804. .offset = offset,
  805. .length = length,
  806. },
  807. };
  808. struct cfg_read_pkg reply;
  809. int retries = 0;
  810. memcpy(&request.data, buffer, length * 4);
  811. while (retries < TB_CTL_RETRIES) {
  812. struct tb_cfg_request *req;
  813. req = tb_cfg_request_alloc();
  814. if (!req) {
  815. res.err = -ENOMEM;
  816. return res;
  817. }
  818. request.addr.seq = retries++;
  819. req->match = tb_cfg_match;
  820. req->copy = tb_cfg_copy;
  821. req->request = &request;
  822. req->request_size = 12 + 4 * length;
  823. req->request_type = TB_CFG_PKG_WRITE;
  824. req->response = &reply;
  825. req->response_size = sizeof(reply);
  826. req->response_type = TB_CFG_PKG_WRITE;
  827. res = tb_cfg_request_sync(ctl, req, timeout_msec);
  828. tb_cfg_request_put(req);
  829. if (res.err != -ETIMEDOUT)
  830. break;
  831. /* Wait a bit (arbitrary time) until we send a retry */
  832. usleep_range(10, 100);
  833. }
  834. if (res.err)
  835. return res;
  836. res.response_port = reply.addr.port;
  837. res.err = check_config_address(reply.addr, space, offset, length);
  838. return res;
  839. }
  840. static int tb_cfg_get_error(struct tb_ctl *ctl, enum tb_cfg_space space,
  841. const struct tb_cfg_result *res)
  842. {
  843. /*
  844. * For unimplemented ports access to port config space may return
  845. * TB_CFG_ERROR_INVALID_CONFIG_SPACE (alternatively their type is
  846. * set to TB_TYPE_INACTIVE). In the former case return -ENODEV so
  847. * that the caller can mark the port as disabled.
  848. */
  849. if (space == TB_CFG_PORT &&
  850. res->tb_error == TB_CFG_ERROR_INVALID_CONFIG_SPACE)
  851. return -ENODEV;
  852. tb_cfg_print_error(ctl, res);
  853. if (res->tb_error == TB_CFG_ERROR_LOCK)
  854. return -EACCES;
  855. else if (res->tb_error == TB_CFG_ERROR_PORT_NOT_CONNECTED)
  856. return -ENOTCONN;
  857. return -EIO;
  858. }
  859. int tb_cfg_read(struct tb_ctl *ctl, void *buffer, u64 route, u32 port,
  860. enum tb_cfg_space space, u32 offset, u32 length)
  861. {
  862. struct tb_cfg_result res = tb_cfg_read_raw(ctl, buffer, route, port,
  863. space, offset, length, ctl->timeout_msec);
  864. switch (res.err) {
  865. case 0:
  866. /* Success */
  867. break;
  868. case 1:
  869. /* Thunderbolt error, tb_error holds the actual number */
  870. return tb_cfg_get_error(ctl, space, &res);
  871. case -ETIMEDOUT:
  872. tb_ctl_warn(ctl, "%llx: timeout reading config space %u from %#x\n",
  873. route, space, offset);
  874. break;
  875. default:
  876. WARN(1, "tb_cfg_read: %d\n", res.err);
  877. break;
  878. }
  879. return res.err;
  880. }
  881. int tb_cfg_write(struct tb_ctl *ctl, const void *buffer, u64 route, u32 port,
  882. enum tb_cfg_space space, u32 offset, u32 length)
  883. {
  884. struct tb_cfg_result res = tb_cfg_write_raw(ctl, buffer, route, port,
  885. space, offset, length, ctl->timeout_msec);
  886. switch (res.err) {
  887. case 0:
  888. /* Success */
  889. break;
  890. case 1:
  891. /* Thunderbolt error, tb_error holds the actual number */
  892. return tb_cfg_get_error(ctl, space, &res);
  893. case -ETIMEDOUT:
  894. tb_ctl_warn(ctl, "%llx: timeout writing config space %u to %#x\n",
  895. route, space, offset);
  896. break;
  897. default:
  898. WARN(1, "tb_cfg_write: %d\n", res.err);
  899. break;
  900. }
  901. return res.err;
  902. }
  903. /**
  904. * tb_cfg_get_upstream_port() - get upstream port number of switch at route
  905. * @ctl: Pointer to the control channel
  906. * @route: Route string of the router
  907. *
  908. * Reads the first dword from the switches TB_CFG_SWITCH config area and
  909. * returns the port number from which the reply originated.
  910. *
  911. * Return: Returns the upstream port number on success or an error code on
  912. * failure.
  913. */
  914. int tb_cfg_get_upstream_port(struct tb_ctl *ctl, u64 route)
  915. {
  916. u32 dummy;
  917. struct tb_cfg_result res = tb_cfg_read_raw(ctl, &dummy, route, 0,
  918. TB_CFG_SWITCH, 0, 1,
  919. ctl->timeout_msec);
  920. if (res.err == 1)
  921. return -EIO;
  922. if (res.err)
  923. return res.err;
  924. return res.response_port;
  925. }