ext_caps.c 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308
  1. // SPDX-License-Identifier: BSD-3-Clause
  2. /*
  3. * Copyright (c) 2020, MIPI Alliance, Inc.
  4. *
  5. * Author: Nicolas Pitre <[email protected]>
  6. */
  7. #include <linux/bitfield.h>
  8. #include <linux/device.h>
  9. #include <linux/errno.h>
  10. #include <linux/i3c/master.h>
  11. #include <linux/kernel.h>
  12. #include <linux/io.h>
  13. #include "hci.h"
  14. #include "ext_caps.h"
  15. #include "xfer_mode_rate.h"
  16. /* Extended Capability Header */
  17. #define CAP_HEADER_LENGTH GENMASK(23, 8)
  18. #define CAP_HEADER_ID GENMASK(7, 0)
  19. static int hci_extcap_hardware_id(struct i3c_hci *hci, void __iomem *base)
  20. {
  21. hci->vendor_mipi_id = readl(base + 0x04);
  22. hci->vendor_version_id = readl(base + 0x08);
  23. hci->vendor_product_id = readl(base + 0x0c);
  24. dev_info(&hci->master.dev, "vendor MIPI ID: %#x\n", hci->vendor_mipi_id);
  25. dev_info(&hci->master.dev, "vendor version ID: %#x\n", hci->vendor_version_id);
  26. dev_info(&hci->master.dev, "vendor product ID: %#x\n", hci->vendor_product_id);
  27. /* ought to go in a table if this grows too much */
  28. switch (hci->vendor_mipi_id) {
  29. case MIPI_VENDOR_NXP:
  30. hci->quirks |= HCI_QUIRK_RAW_CCC;
  31. DBG("raw CCC quirks set");
  32. break;
  33. }
  34. return 0;
  35. }
  36. static int hci_extcap_master_config(struct i3c_hci *hci, void __iomem *base)
  37. {
  38. u32 master_config = readl(base + 0x04);
  39. unsigned int operation_mode = FIELD_GET(GENMASK(5, 4), master_config);
  40. static const char * const functionality[] = {
  41. "(unknown)", "master only", "target only",
  42. "primary/secondary master" };
  43. dev_info(&hci->master.dev, "operation mode: %s\n", functionality[operation_mode]);
  44. if (operation_mode & 0x1)
  45. return 0;
  46. dev_err(&hci->master.dev, "only master mode is currently supported\n");
  47. return -EOPNOTSUPP;
  48. }
  49. static int hci_extcap_multi_bus(struct i3c_hci *hci, void __iomem *base)
  50. {
  51. u32 bus_instance = readl(base + 0x04);
  52. unsigned int count = FIELD_GET(GENMASK(3, 0), bus_instance);
  53. dev_info(&hci->master.dev, "%d bus instances\n", count);
  54. return 0;
  55. }
  56. static int hci_extcap_xfer_modes(struct i3c_hci *hci, void __iomem *base)
  57. {
  58. u32 header = readl(base);
  59. u32 entries = FIELD_GET(CAP_HEADER_LENGTH, header) - 1;
  60. unsigned int index;
  61. dev_info(&hci->master.dev, "transfer mode table has %d entries\n",
  62. entries);
  63. base += 4; /* skip header */
  64. for (index = 0; index < entries; index++) {
  65. u32 mode_entry = readl(base);
  66. DBG("mode %d: 0x%08x", index, mode_entry);
  67. /* TODO: will be needed when I3C core does more than SDR */
  68. base += 4;
  69. }
  70. return 0;
  71. }
  72. static int hci_extcap_xfer_rates(struct i3c_hci *hci, void __iomem *base)
  73. {
  74. u32 header = readl(base);
  75. u32 entries = FIELD_GET(CAP_HEADER_LENGTH, header) - 1;
  76. u32 rate_entry;
  77. unsigned int index, rate, rate_id, mode_id;
  78. base += 4; /* skip header */
  79. dev_info(&hci->master.dev, "available data rates:\n");
  80. for (index = 0; index < entries; index++) {
  81. rate_entry = readl(base);
  82. DBG("entry %d: 0x%08x", index, rate_entry);
  83. rate = FIELD_GET(XFERRATE_ACTUAL_RATE_KHZ, rate_entry);
  84. rate_id = FIELD_GET(XFERRATE_RATE_ID, rate_entry);
  85. mode_id = FIELD_GET(XFERRATE_MODE_ID, rate_entry);
  86. dev_info(&hci->master.dev, "rate %d for %s = %d kHz\n",
  87. rate_id,
  88. mode_id == XFERRATE_MODE_I3C ? "I3C" :
  89. mode_id == XFERRATE_MODE_I2C ? "I2C" :
  90. "unknown mode",
  91. rate);
  92. base += 4;
  93. }
  94. return 0;
  95. }
  96. static int hci_extcap_auto_command(struct i3c_hci *hci, void __iomem *base)
  97. {
  98. u32 autocmd_ext_caps = readl(base + 0x04);
  99. unsigned int max_count = FIELD_GET(GENMASK(3, 0), autocmd_ext_caps);
  100. u32 autocmd_ext_config = readl(base + 0x08);
  101. unsigned int count = FIELD_GET(GENMASK(3, 0), autocmd_ext_config);
  102. dev_info(&hci->master.dev, "%d/%d active auto-command entries\n",
  103. count, max_count);
  104. /* remember auto-command register location for later use */
  105. hci->AUTOCMD_regs = base;
  106. return 0;
  107. }
  108. static int hci_extcap_debug(struct i3c_hci *hci, void __iomem *base)
  109. {
  110. dev_info(&hci->master.dev, "debug registers present\n");
  111. hci->DEBUG_regs = base;
  112. return 0;
  113. }
  114. static int hci_extcap_scheduled_cmd(struct i3c_hci *hci, void __iomem *base)
  115. {
  116. dev_info(&hci->master.dev, "scheduled commands available\n");
  117. /* hci->schedcmd_regs = base; */
  118. return 0;
  119. }
  120. static int hci_extcap_non_curr_master(struct i3c_hci *hci, void __iomem *base)
  121. {
  122. dev_info(&hci->master.dev, "Non-Current Master support available\n");
  123. /* hci->NCM_regs = base; */
  124. return 0;
  125. }
  126. static int hci_extcap_ccc_resp_conf(struct i3c_hci *hci, void __iomem *base)
  127. {
  128. dev_info(&hci->master.dev, "CCC Response Configuration available\n");
  129. return 0;
  130. }
  131. static int hci_extcap_global_DAT(struct i3c_hci *hci, void __iomem *base)
  132. {
  133. dev_info(&hci->master.dev, "Global DAT available\n");
  134. return 0;
  135. }
  136. static int hci_extcap_multilane(struct i3c_hci *hci, void __iomem *base)
  137. {
  138. dev_info(&hci->master.dev, "Master Multi-Lane support available\n");
  139. return 0;
  140. }
  141. static int hci_extcap_ncm_multilane(struct i3c_hci *hci, void __iomem *base)
  142. {
  143. dev_info(&hci->master.dev, "NCM Multi-Lane support available\n");
  144. return 0;
  145. }
  146. struct hci_ext_caps {
  147. u8 id;
  148. u16 min_length;
  149. int (*parser)(struct i3c_hci *hci, void __iomem *base);
  150. };
  151. #define EXT_CAP(_id, _highest_mandatory_reg_offset, _parser) \
  152. { .id = (_id), .parser = (_parser), \
  153. .min_length = (_highest_mandatory_reg_offset)/4 + 1 }
  154. static const struct hci_ext_caps ext_capabilities[] = {
  155. EXT_CAP(0x01, 0x0c, hci_extcap_hardware_id),
  156. EXT_CAP(0x02, 0x04, hci_extcap_master_config),
  157. EXT_CAP(0x03, 0x04, hci_extcap_multi_bus),
  158. EXT_CAP(0x04, 0x24, hci_extcap_xfer_modes),
  159. EXT_CAP(0x05, 0x08, hci_extcap_auto_command),
  160. EXT_CAP(0x08, 0x40, hci_extcap_xfer_rates),
  161. EXT_CAP(0x0c, 0x10, hci_extcap_debug),
  162. EXT_CAP(0x0d, 0x0c, hci_extcap_scheduled_cmd),
  163. EXT_CAP(0x0e, 0x80, hci_extcap_non_curr_master), /* TODO confirm size */
  164. EXT_CAP(0x0f, 0x04, hci_extcap_ccc_resp_conf),
  165. EXT_CAP(0x10, 0x08, hci_extcap_global_DAT),
  166. EXT_CAP(0x9d, 0x04, hci_extcap_multilane),
  167. EXT_CAP(0x9e, 0x04, hci_extcap_ncm_multilane),
  168. };
  169. static int hci_extcap_vendor_NXP(struct i3c_hci *hci, void __iomem *base)
  170. {
  171. hci->vendor_data = (__force void *)base;
  172. dev_info(&hci->master.dev, "Build Date Info = %#x\n", readl(base + 1*4));
  173. /* reset the FPGA */
  174. writel(0xdeadbeef, base + 1*4);
  175. return 0;
  176. }
  177. struct hci_ext_cap_vendor_specific {
  178. u32 vendor;
  179. u8 cap;
  180. u16 min_length;
  181. int (*parser)(struct i3c_hci *hci, void __iomem *base);
  182. };
  183. #define EXT_CAP_VENDOR(_vendor, _cap, _highest_mandatory_reg_offset) \
  184. { .vendor = (MIPI_VENDOR_##_vendor), .cap = (_cap), \
  185. .parser = (hci_extcap_vendor_##_vendor), \
  186. .min_length = (_highest_mandatory_reg_offset)/4 + 1 }
  187. static const struct hci_ext_cap_vendor_specific vendor_ext_caps[] = {
  188. EXT_CAP_VENDOR(NXP, 0xc0, 0x20),
  189. };
  190. static int hci_extcap_vendor_specific(struct i3c_hci *hci, void __iomem *base,
  191. u32 cap_id, u32 cap_length)
  192. {
  193. const struct hci_ext_cap_vendor_specific *vendor_cap_entry;
  194. int i;
  195. vendor_cap_entry = NULL;
  196. for (i = 0; i < ARRAY_SIZE(vendor_ext_caps); i++) {
  197. if (vendor_ext_caps[i].vendor == hci->vendor_mipi_id &&
  198. vendor_ext_caps[i].cap == cap_id) {
  199. vendor_cap_entry = &vendor_ext_caps[i];
  200. break;
  201. }
  202. }
  203. if (!vendor_cap_entry) {
  204. dev_notice(&hci->master.dev,
  205. "unknown ext_cap 0x%02x for vendor 0x%02x\n",
  206. cap_id, hci->vendor_mipi_id);
  207. return 0;
  208. }
  209. if (cap_length < vendor_cap_entry->min_length) {
  210. dev_err(&hci->master.dev,
  211. "ext_cap 0x%02x has size %d (expecting >= %d)\n",
  212. cap_id, cap_length, vendor_cap_entry->min_length);
  213. return -EINVAL;
  214. }
  215. return vendor_cap_entry->parser(hci, base);
  216. }
  217. int i3c_hci_parse_ext_caps(struct i3c_hci *hci)
  218. {
  219. void __iomem *curr_cap = hci->EXTCAPS_regs;
  220. void __iomem *end = curr_cap + 0x1000; /* some arbitrary limit */
  221. u32 cap_header, cap_id, cap_length;
  222. const struct hci_ext_caps *cap_entry;
  223. int i, err = 0;
  224. if (!curr_cap)
  225. return 0;
  226. for (; !err && curr_cap < end; curr_cap += cap_length * 4) {
  227. cap_header = readl(curr_cap);
  228. cap_id = FIELD_GET(CAP_HEADER_ID, cap_header);
  229. cap_length = FIELD_GET(CAP_HEADER_LENGTH, cap_header);
  230. DBG("id=0x%02x length=%d", cap_id, cap_length);
  231. if (!cap_length)
  232. break;
  233. if (curr_cap + cap_length * 4 >= end) {
  234. dev_err(&hci->master.dev,
  235. "ext_cap 0x%02x has size %d (too big)\n",
  236. cap_id, cap_length);
  237. err = -EINVAL;
  238. break;
  239. }
  240. if (cap_id >= 0xc0 && cap_id <= 0xcf) {
  241. err = hci_extcap_vendor_specific(hci, curr_cap,
  242. cap_id, cap_length);
  243. continue;
  244. }
  245. cap_entry = NULL;
  246. for (i = 0; i < ARRAY_SIZE(ext_capabilities); i++) {
  247. if (ext_capabilities[i].id == cap_id) {
  248. cap_entry = &ext_capabilities[i];
  249. break;
  250. }
  251. }
  252. if (!cap_entry) {
  253. dev_notice(&hci->master.dev,
  254. "unknown ext_cap 0x%02x\n", cap_id);
  255. } else if (cap_length < cap_entry->min_length) {
  256. dev_err(&hci->master.dev,
  257. "ext_cap 0x%02x has size %d (expecting >= %d)\n",
  258. cap_id, cap_length, cap_entry->min_length);
  259. err = -EINVAL;
  260. } else {
  261. err = cap_entry->parser(hci, curr_cap);
  262. }
  263. }
  264. return err;
  265. }