cmd_v2.c 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316
  1. // SPDX-License-Identifier: BSD-3-Clause
  2. /*
  3. * Copyright (c) 2020, MIPI Alliance, Inc.
  4. *
  5. * Author: Nicolas Pitre <[email protected]>
  6. *
  7. * I3C HCI v2.0 Command Descriptor Handling
  8. *
  9. * Note: The I3C HCI v2.0 spec is still in flux. The code here will change.
  10. */
  11. #include <linux/bitfield.h>
  12. #include <linux/i3c/master.h>
  13. #include "hci.h"
  14. #include "cmd.h"
  15. #include "xfer_mode_rate.h"
  16. /*
  17. * Unified Data Transfer Command
  18. */
  19. #define CMD_0_ATTR_U FIELD_PREP(CMD_0_ATTR, 0x4)
  20. #define CMD_U3_HDR_TSP_ML_CTRL(v) FIELD_PREP(W3_MASK(107, 104), v)
  21. #define CMD_U3_IDB4(v) FIELD_PREP(W3_MASK(103, 96), v)
  22. #define CMD_U3_HDR_CMD(v) FIELD_PREP(W3_MASK(103, 96), v)
  23. #define CMD_U2_IDB3(v) FIELD_PREP(W2_MASK( 95, 88), v)
  24. #define CMD_U2_HDR_BT(v) FIELD_PREP(W2_MASK( 95, 88), v)
  25. #define CMD_U2_IDB2(v) FIELD_PREP(W2_MASK( 87, 80), v)
  26. #define CMD_U2_BT_CMD2(v) FIELD_PREP(W2_MASK( 87, 80), v)
  27. #define CMD_U2_IDB1(v) FIELD_PREP(W2_MASK( 79, 72), v)
  28. #define CMD_U2_BT_CMD1(v) FIELD_PREP(W2_MASK( 79, 72), v)
  29. #define CMD_U2_IDB0(v) FIELD_PREP(W2_MASK( 71, 64), v)
  30. #define CMD_U2_BT_CMD0(v) FIELD_PREP(W2_MASK( 71, 64), v)
  31. #define CMD_U1_ERR_HANDLING(v) FIELD_PREP(W1_MASK( 63, 62), v)
  32. #define CMD_U1_ADD_FUNC(v) FIELD_PREP(W1_MASK( 61, 56), v)
  33. #define CMD_U1_COMBO_XFER W1_BIT_( 55)
  34. #define CMD_U1_DATA_LENGTH(v) FIELD_PREP(W1_MASK( 53, 32), v)
  35. #define CMD_U0_TOC W0_BIT_( 31)
  36. #define CMD_U0_ROC W0_BIT_( 30)
  37. #define CMD_U0_MAY_YIELD W0_BIT_( 29)
  38. #define CMD_U0_NACK_RCNT(v) FIELD_PREP(W0_MASK( 28, 27), v)
  39. #define CMD_U0_IDB_COUNT(v) FIELD_PREP(W0_MASK( 26, 24), v)
  40. #define CMD_U0_MODE_INDEX(v) FIELD_PREP(W0_MASK( 22, 18), v)
  41. #define CMD_U0_XFER_RATE(v) FIELD_PREP(W0_MASK( 17, 15), v)
  42. #define CMD_U0_DEV_ADDRESS(v) FIELD_PREP(W0_MASK( 14, 8), v)
  43. #define CMD_U0_RnW W0_BIT_( 7)
  44. #define CMD_U0_TID(v) FIELD_PREP(W0_MASK( 6, 3), v)
  45. /*
  46. * Address Assignment Command
  47. */
  48. #define CMD_0_ATTR_A FIELD_PREP(CMD_0_ATTR, 0x2)
  49. #define CMD_A1_DATA_LENGTH(v) FIELD_PREP(W1_MASK( 53, 32), v)
  50. #define CMD_A0_TOC W0_BIT_( 31)
  51. #define CMD_A0_ROC W0_BIT_( 30)
  52. #define CMD_A0_XFER_RATE(v) FIELD_PREP(W0_MASK( 17, 15), v)
  53. #define CMD_A0_ASSIGN_ADDRESS(v) FIELD_PREP(W0_MASK( 14, 8), v)
  54. #define CMD_A0_TID(v) FIELD_PREP(W0_MASK( 6, 3), v)
  55. static unsigned int get_i3c_rate_idx(struct i3c_hci *hci)
  56. {
  57. struct i3c_bus *bus = i3c_master_get_bus(&hci->master);
  58. if (bus->scl_rate.i3c >= 12000000)
  59. return XFERRATE_I3C_SDR0;
  60. if (bus->scl_rate.i3c > 8000000)
  61. return XFERRATE_I3C_SDR1;
  62. if (bus->scl_rate.i3c > 6000000)
  63. return XFERRATE_I3C_SDR2;
  64. if (bus->scl_rate.i3c > 4000000)
  65. return XFERRATE_I3C_SDR3;
  66. if (bus->scl_rate.i3c > 2000000)
  67. return XFERRATE_I3C_SDR4;
  68. return XFERRATE_I3C_SDR_FM_FMP;
  69. }
  70. static unsigned int get_i2c_rate_idx(struct i3c_hci *hci)
  71. {
  72. struct i3c_bus *bus = i3c_master_get_bus(&hci->master);
  73. if (bus->scl_rate.i2c >= 1000000)
  74. return XFERRATE_I2C_FMP;
  75. return XFERRATE_I2C_FM;
  76. }
  77. static void hci_cmd_v2_prep_private_xfer(struct i3c_hci *hci,
  78. struct hci_xfer *xfer,
  79. u8 addr, unsigned int mode,
  80. unsigned int rate)
  81. {
  82. u8 *data = xfer->data;
  83. unsigned int data_len = xfer->data_len;
  84. bool rnw = xfer->rnw;
  85. xfer->cmd_tid = hci_get_tid();
  86. if (!rnw && data_len <= 5) {
  87. xfer->cmd_desc[0] =
  88. CMD_0_ATTR_U |
  89. CMD_U0_TID(xfer->cmd_tid) |
  90. CMD_U0_DEV_ADDRESS(addr) |
  91. CMD_U0_XFER_RATE(rate) |
  92. CMD_U0_MODE_INDEX(mode) |
  93. CMD_U0_IDB_COUNT(data_len);
  94. xfer->cmd_desc[1] =
  95. CMD_U1_DATA_LENGTH(0);
  96. xfer->cmd_desc[2] = 0;
  97. xfer->cmd_desc[3] = 0;
  98. switch (data_len) {
  99. case 5:
  100. xfer->cmd_desc[3] |= CMD_U3_IDB4(data[4]);
  101. fallthrough;
  102. case 4:
  103. xfer->cmd_desc[2] |= CMD_U2_IDB3(data[3]);
  104. fallthrough;
  105. case 3:
  106. xfer->cmd_desc[2] |= CMD_U2_IDB2(data[2]);
  107. fallthrough;
  108. case 2:
  109. xfer->cmd_desc[2] |= CMD_U2_IDB1(data[1]);
  110. fallthrough;
  111. case 1:
  112. xfer->cmd_desc[2] |= CMD_U2_IDB0(data[0]);
  113. fallthrough;
  114. case 0:
  115. break;
  116. }
  117. /* we consumed all the data with the cmd descriptor */
  118. xfer->data = NULL;
  119. } else {
  120. xfer->cmd_desc[0] =
  121. CMD_0_ATTR_U |
  122. CMD_U0_TID(xfer->cmd_tid) |
  123. (rnw ? CMD_U0_RnW : 0) |
  124. CMD_U0_DEV_ADDRESS(addr) |
  125. CMD_U0_XFER_RATE(rate) |
  126. CMD_U0_MODE_INDEX(mode);
  127. xfer->cmd_desc[1] =
  128. CMD_U1_DATA_LENGTH(data_len);
  129. xfer->cmd_desc[2] = 0;
  130. xfer->cmd_desc[3] = 0;
  131. }
  132. }
  133. static int hci_cmd_v2_prep_ccc(struct i3c_hci *hci, struct hci_xfer *xfer,
  134. u8 ccc_addr, u8 ccc_cmd, bool raw)
  135. {
  136. unsigned int mode = XFERMODE_IDX_I3C_SDR;
  137. unsigned int rate = get_i3c_rate_idx(hci);
  138. u8 *data = xfer->data;
  139. unsigned int data_len = xfer->data_len;
  140. bool rnw = xfer->rnw;
  141. if (raw && ccc_addr != I3C_BROADCAST_ADDR) {
  142. hci_cmd_v2_prep_private_xfer(hci, xfer, ccc_addr, mode, rate);
  143. return 0;
  144. }
  145. xfer->cmd_tid = hci_get_tid();
  146. if (!rnw && data_len <= 4) {
  147. xfer->cmd_desc[0] =
  148. CMD_0_ATTR_U |
  149. CMD_U0_TID(xfer->cmd_tid) |
  150. CMD_U0_DEV_ADDRESS(ccc_addr) |
  151. CMD_U0_XFER_RATE(rate) |
  152. CMD_U0_MODE_INDEX(mode) |
  153. CMD_U0_IDB_COUNT(data_len + (!raw ? 0 : 1));
  154. xfer->cmd_desc[1] =
  155. CMD_U1_DATA_LENGTH(0);
  156. xfer->cmd_desc[2] =
  157. CMD_U2_IDB0(ccc_cmd);
  158. xfer->cmd_desc[3] = 0;
  159. switch (data_len) {
  160. case 4:
  161. xfer->cmd_desc[3] |= CMD_U3_IDB4(data[3]);
  162. fallthrough;
  163. case 3:
  164. xfer->cmd_desc[2] |= CMD_U2_IDB3(data[2]);
  165. fallthrough;
  166. case 2:
  167. xfer->cmd_desc[2] |= CMD_U2_IDB2(data[1]);
  168. fallthrough;
  169. case 1:
  170. xfer->cmd_desc[2] |= CMD_U2_IDB1(data[0]);
  171. fallthrough;
  172. case 0:
  173. break;
  174. }
  175. /* we consumed all the data with the cmd descriptor */
  176. xfer->data = NULL;
  177. } else {
  178. xfer->cmd_desc[0] =
  179. CMD_0_ATTR_U |
  180. CMD_U0_TID(xfer->cmd_tid) |
  181. (rnw ? CMD_U0_RnW : 0) |
  182. CMD_U0_DEV_ADDRESS(ccc_addr) |
  183. CMD_U0_XFER_RATE(rate) |
  184. CMD_U0_MODE_INDEX(mode) |
  185. CMD_U0_IDB_COUNT(!raw ? 0 : 1);
  186. xfer->cmd_desc[1] =
  187. CMD_U1_DATA_LENGTH(data_len);
  188. xfer->cmd_desc[2] =
  189. CMD_U2_IDB0(ccc_cmd);
  190. xfer->cmd_desc[3] = 0;
  191. }
  192. return 0;
  193. }
  194. static void hci_cmd_v2_prep_i3c_xfer(struct i3c_hci *hci,
  195. struct i3c_dev_desc *dev,
  196. struct hci_xfer *xfer)
  197. {
  198. unsigned int mode = XFERMODE_IDX_I3C_SDR;
  199. unsigned int rate = get_i3c_rate_idx(hci);
  200. u8 addr = dev->info.dyn_addr;
  201. hci_cmd_v2_prep_private_xfer(hci, xfer, addr, mode, rate);
  202. }
  203. static void hci_cmd_v2_prep_i2c_xfer(struct i3c_hci *hci,
  204. struct i2c_dev_desc *dev,
  205. struct hci_xfer *xfer)
  206. {
  207. unsigned int mode = XFERMODE_IDX_I2C;
  208. unsigned int rate = get_i2c_rate_idx(hci);
  209. u8 addr = dev->addr;
  210. hci_cmd_v2_prep_private_xfer(hci, xfer, addr, mode, rate);
  211. }
  212. static int hci_cmd_v2_daa(struct i3c_hci *hci)
  213. {
  214. struct hci_xfer *xfer;
  215. int ret;
  216. u8 next_addr = 0;
  217. u32 device_id[2];
  218. u64 pid;
  219. unsigned int dcr, bcr;
  220. DECLARE_COMPLETION_ONSTACK(done);
  221. xfer = hci_alloc_xfer(2);
  222. if (!xfer)
  223. return -ENOMEM;
  224. xfer[0].data = &device_id;
  225. xfer[0].data_len = 8;
  226. xfer[0].rnw = true;
  227. xfer[0].cmd_desc[1] = CMD_A1_DATA_LENGTH(8);
  228. xfer[1].completion = &done;
  229. for (;;) {
  230. ret = i3c_master_get_free_addr(&hci->master, next_addr);
  231. if (ret < 0)
  232. break;
  233. next_addr = ret;
  234. DBG("next_addr = 0x%02x", next_addr);
  235. xfer[0].cmd_tid = hci_get_tid();
  236. xfer[0].cmd_desc[0] =
  237. CMD_0_ATTR_A |
  238. CMD_A0_TID(xfer[0].cmd_tid) |
  239. CMD_A0_ROC;
  240. xfer[1].cmd_tid = hci_get_tid();
  241. xfer[1].cmd_desc[0] =
  242. CMD_0_ATTR_A |
  243. CMD_A0_TID(xfer[1].cmd_tid) |
  244. CMD_A0_ASSIGN_ADDRESS(next_addr) |
  245. CMD_A0_ROC |
  246. CMD_A0_TOC;
  247. hci->io->queue_xfer(hci, xfer, 2);
  248. if (!wait_for_completion_timeout(&done, HZ) &&
  249. hci->io->dequeue_xfer(hci, xfer, 2)) {
  250. ret = -ETIME;
  251. break;
  252. }
  253. if (RESP_STATUS(xfer[0].response) != RESP_SUCCESS) {
  254. ret = 0; /* no more devices to be assigned */
  255. break;
  256. }
  257. if (RESP_STATUS(xfer[1].response) != RESP_SUCCESS) {
  258. ret = -EIO;
  259. break;
  260. }
  261. pid = FIELD_GET(W1_MASK(47, 32), device_id[1]);
  262. pid = (pid << 32) | device_id[0];
  263. bcr = FIELD_GET(W1_MASK(55, 48), device_id[1]);
  264. dcr = FIELD_GET(W1_MASK(63, 56), device_id[1]);
  265. DBG("assigned address %#x to device PID=0x%llx DCR=%#x BCR=%#x",
  266. next_addr, pid, dcr, bcr);
  267. /*
  268. * TODO: Extend the subsystem layer to allow for registering
  269. * new device and provide BCR/DCR/PID at the same time.
  270. */
  271. ret = i3c_master_add_i3c_dev_locked(&hci->master, next_addr);
  272. if (ret)
  273. break;
  274. }
  275. hci_free_xfer(xfer, 2);
  276. return ret;
  277. }
  278. const struct hci_cmd_ops mipi_i3c_hci_cmd_v2 = {
  279. .prep_ccc = hci_cmd_v2_prep_ccc,
  280. .prep_i3c_xfer = hci_cmd_v2_prep_i3c_xfer,
  281. .prep_i2c_xfer = hci_cmd_v2_prep_i2c_xfer,
  282. .perform_daa = hci_cmd_v2_daa,
  283. };