platform.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035
  1. // SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
  2. /*
  3. * Copyright(c) 2015, 2016 Intel Corporation.
  4. */
  5. #include <linux/firmware.h>
  6. #include "hfi.h"
  7. #include "efivar.h"
  8. #include "eprom.h"
  9. #define DEFAULT_PLATFORM_CONFIG_NAME "hfi1_platform.dat"
  10. static int validate_scratch_checksum(struct hfi1_devdata *dd)
  11. {
  12. u64 checksum = 0, temp_scratch = 0;
  13. int i, j, version;
  14. temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH);
  15. version = (temp_scratch & BITMAP_VERSION_SMASK) >> BITMAP_VERSION_SHIFT;
  16. /* Prevent power on default of all zeroes from passing checksum */
  17. if (!version) {
  18. dd_dev_err(dd, "%s: Config bitmap uninitialized\n", __func__);
  19. dd_dev_err(dd,
  20. "%s: Please update your BIOS to support active channels\n",
  21. __func__);
  22. return 0;
  23. }
  24. /*
  25. * ASIC scratch 0 only contains the checksum and bitmap version as
  26. * fields of interest, both of which are handled separately from the
  27. * loop below, so skip it
  28. */
  29. checksum += version;
  30. for (i = 1; i < ASIC_NUM_SCRATCH; i++) {
  31. temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH + (8 * i));
  32. for (j = sizeof(u64); j != 0; j -= 2) {
  33. checksum += (temp_scratch & 0xFFFF);
  34. temp_scratch >>= 16;
  35. }
  36. }
  37. while (checksum >> 16)
  38. checksum = (checksum & CHECKSUM_MASK) + (checksum >> 16);
  39. temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH);
  40. temp_scratch &= CHECKSUM_SMASK;
  41. temp_scratch >>= CHECKSUM_SHIFT;
  42. if (checksum + temp_scratch == 0xFFFF)
  43. return 1;
  44. dd_dev_err(dd, "%s: Configuration bitmap corrupted\n", __func__);
  45. return 0;
  46. }
  47. static void save_platform_config_fields(struct hfi1_devdata *dd)
  48. {
  49. struct hfi1_pportdata *ppd = dd->pport;
  50. u64 temp_scratch = 0, temp_dest = 0;
  51. temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH_1);
  52. temp_dest = temp_scratch &
  53. (dd->hfi1_id ? PORT1_PORT_TYPE_SMASK :
  54. PORT0_PORT_TYPE_SMASK);
  55. ppd->port_type = temp_dest >>
  56. (dd->hfi1_id ? PORT1_PORT_TYPE_SHIFT :
  57. PORT0_PORT_TYPE_SHIFT);
  58. temp_dest = temp_scratch &
  59. (dd->hfi1_id ? PORT1_LOCAL_ATTEN_SMASK :
  60. PORT0_LOCAL_ATTEN_SMASK);
  61. ppd->local_atten = temp_dest >>
  62. (dd->hfi1_id ? PORT1_LOCAL_ATTEN_SHIFT :
  63. PORT0_LOCAL_ATTEN_SHIFT);
  64. temp_dest = temp_scratch &
  65. (dd->hfi1_id ? PORT1_REMOTE_ATTEN_SMASK :
  66. PORT0_REMOTE_ATTEN_SMASK);
  67. ppd->remote_atten = temp_dest >>
  68. (dd->hfi1_id ? PORT1_REMOTE_ATTEN_SHIFT :
  69. PORT0_REMOTE_ATTEN_SHIFT);
  70. temp_dest = temp_scratch &
  71. (dd->hfi1_id ? PORT1_DEFAULT_ATTEN_SMASK :
  72. PORT0_DEFAULT_ATTEN_SMASK);
  73. ppd->default_atten = temp_dest >>
  74. (dd->hfi1_id ? PORT1_DEFAULT_ATTEN_SHIFT :
  75. PORT0_DEFAULT_ATTEN_SHIFT);
  76. temp_scratch = read_csr(dd, dd->hfi1_id ? ASIC_CFG_SCRATCH_3 :
  77. ASIC_CFG_SCRATCH_2);
  78. ppd->tx_preset_eq = (temp_scratch & TX_EQ_SMASK) >> TX_EQ_SHIFT;
  79. ppd->tx_preset_noeq = (temp_scratch & TX_NO_EQ_SMASK) >> TX_NO_EQ_SHIFT;
  80. ppd->rx_preset = (temp_scratch & RX_SMASK) >> RX_SHIFT;
  81. ppd->max_power_class = (temp_scratch & QSFP_MAX_POWER_SMASK) >>
  82. QSFP_MAX_POWER_SHIFT;
  83. ppd->config_from_scratch = true;
  84. }
  85. void get_platform_config(struct hfi1_devdata *dd)
  86. {
  87. int ret = 0;
  88. u8 *temp_platform_config = NULL;
  89. u32 esize;
  90. const struct firmware *platform_config_file = NULL;
  91. if (is_integrated(dd)) {
  92. if (validate_scratch_checksum(dd)) {
  93. save_platform_config_fields(dd);
  94. return;
  95. }
  96. } else {
  97. ret = eprom_read_platform_config(dd,
  98. (void **)&temp_platform_config,
  99. &esize);
  100. if (!ret) {
  101. /* success */
  102. dd->platform_config.data = temp_platform_config;
  103. dd->platform_config.size = esize;
  104. return;
  105. }
  106. }
  107. dd_dev_err(dd,
  108. "%s: Failed to get platform config, falling back to sub-optimal default file\n",
  109. __func__);
  110. ret = request_firmware(&platform_config_file,
  111. DEFAULT_PLATFORM_CONFIG_NAME,
  112. &dd->pcidev->dev);
  113. if (ret) {
  114. dd_dev_err(dd,
  115. "%s: No default platform config file found\n",
  116. __func__);
  117. return;
  118. }
  119. /*
  120. * Allocate separate memory block to store data and free firmware
  121. * structure. This allows free_platform_config to treat EPROM and
  122. * fallback configs in the same manner.
  123. */
  124. dd->platform_config.data = kmemdup(platform_config_file->data,
  125. platform_config_file->size,
  126. GFP_KERNEL);
  127. dd->platform_config.size = platform_config_file->size;
  128. release_firmware(platform_config_file);
  129. }
  130. void free_platform_config(struct hfi1_devdata *dd)
  131. {
  132. /* Release memory allocated for eprom or fallback file read. */
  133. kfree(dd->platform_config.data);
  134. dd->platform_config.data = NULL;
  135. }
  136. void get_port_type(struct hfi1_pportdata *ppd)
  137. {
  138. int ret;
  139. u32 temp;
  140. ret = get_platform_config_field(ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
  141. PORT_TABLE_PORT_TYPE, &temp,
  142. 4);
  143. if (ret) {
  144. ppd->port_type = PORT_TYPE_UNKNOWN;
  145. return;
  146. }
  147. ppd->port_type = temp;
  148. }
  149. int set_qsfp_tx(struct hfi1_pportdata *ppd, int on)
  150. {
  151. u8 tx_ctrl_byte = on ? 0x0 : 0xF;
  152. int ret = 0;
  153. ret = qsfp_write(ppd, ppd->dd->hfi1_id, QSFP_TX_CTRL_BYTE_OFFS,
  154. &tx_ctrl_byte, 1);
  155. /* we expected 1, so consider 0 an error */
  156. if (ret == 0)
  157. ret = -EIO;
  158. else if (ret == 1)
  159. ret = 0;
  160. return ret;
  161. }
  162. static int qual_power(struct hfi1_pportdata *ppd)
  163. {
  164. u32 cable_power_class = 0, power_class_max = 0;
  165. u8 *cache = ppd->qsfp_info.cache;
  166. int ret = 0;
  167. ret = get_platform_config_field(
  168. ppd->dd, PLATFORM_CONFIG_SYSTEM_TABLE, 0,
  169. SYSTEM_TABLE_QSFP_POWER_CLASS_MAX, &power_class_max, 4);
  170. if (ret)
  171. return ret;
  172. cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
  173. if (cable_power_class > power_class_max)
  174. ppd->offline_disabled_reason =
  175. HFI1_ODR_MASK(OPA_LINKDOWN_REASON_POWER_POLICY);
  176. if (ppd->offline_disabled_reason ==
  177. HFI1_ODR_MASK(OPA_LINKDOWN_REASON_POWER_POLICY)) {
  178. dd_dev_err(
  179. ppd->dd,
  180. "%s: Port disabled due to system power restrictions\n",
  181. __func__);
  182. ret = -EPERM;
  183. }
  184. return ret;
  185. }
  186. static int qual_bitrate(struct hfi1_pportdata *ppd)
  187. {
  188. u16 lss = ppd->link_speed_supported, lse = ppd->link_speed_enabled;
  189. u8 *cache = ppd->qsfp_info.cache;
  190. if ((lss & OPA_LINK_SPEED_25G) && (lse & OPA_LINK_SPEED_25G) &&
  191. cache[QSFP_NOM_BIT_RATE_250_OFFS] < 0x64)
  192. ppd->offline_disabled_reason =
  193. HFI1_ODR_MASK(OPA_LINKDOWN_REASON_LINKSPEED_POLICY);
  194. if ((lss & OPA_LINK_SPEED_12_5G) && (lse & OPA_LINK_SPEED_12_5G) &&
  195. cache[QSFP_NOM_BIT_RATE_100_OFFS] < 0x7D)
  196. ppd->offline_disabled_reason =
  197. HFI1_ODR_MASK(OPA_LINKDOWN_REASON_LINKSPEED_POLICY);
  198. if (ppd->offline_disabled_reason ==
  199. HFI1_ODR_MASK(OPA_LINKDOWN_REASON_LINKSPEED_POLICY)) {
  200. dd_dev_err(
  201. ppd->dd,
  202. "%s: Cable failed bitrate check, disabling port\n",
  203. __func__);
  204. return -EPERM;
  205. }
  206. return 0;
  207. }
  208. static int set_qsfp_high_power(struct hfi1_pportdata *ppd)
  209. {
  210. u8 cable_power_class = 0, power_ctrl_byte = 0;
  211. u8 *cache = ppd->qsfp_info.cache;
  212. int ret;
  213. cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
  214. if (cable_power_class > QSFP_POWER_CLASS_1) {
  215. power_ctrl_byte = cache[QSFP_PWR_CTRL_BYTE_OFFS];
  216. power_ctrl_byte |= 1;
  217. power_ctrl_byte &= ~(0x2);
  218. ret = qsfp_write(ppd, ppd->dd->hfi1_id,
  219. QSFP_PWR_CTRL_BYTE_OFFS,
  220. &power_ctrl_byte, 1);
  221. if (ret != 1)
  222. return -EIO;
  223. if (cable_power_class > QSFP_POWER_CLASS_4) {
  224. power_ctrl_byte |= (1 << 2);
  225. ret = qsfp_write(ppd, ppd->dd->hfi1_id,
  226. QSFP_PWR_CTRL_BYTE_OFFS,
  227. &power_ctrl_byte, 1);
  228. if (ret != 1)
  229. return -EIO;
  230. }
  231. /* SFF 8679 rev 1.7 LPMode Deassert time */
  232. msleep(300);
  233. }
  234. return 0;
  235. }
  236. static void apply_rx_cdr(struct hfi1_pportdata *ppd,
  237. u32 rx_preset_index,
  238. u8 *cdr_ctrl_byte)
  239. {
  240. u32 rx_preset;
  241. u8 *cache = ppd->qsfp_info.cache;
  242. int cable_power_class;
  243. if (!((cache[QSFP_MOD_PWR_OFFS] & 0x4) &&
  244. (cache[QSFP_CDR_INFO_OFFS] & 0x40)))
  245. return;
  246. /* RX CDR present, bypass supported */
  247. cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
  248. if (cable_power_class <= QSFP_POWER_CLASS_3) {
  249. /* Power class <= 3, ignore config & turn RX CDR on */
  250. *cdr_ctrl_byte |= 0xF;
  251. return;
  252. }
  253. get_platform_config_field(
  254. ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
  255. rx_preset_index, RX_PRESET_TABLE_QSFP_RX_CDR_APPLY,
  256. &rx_preset, 4);
  257. if (!rx_preset) {
  258. dd_dev_info(
  259. ppd->dd,
  260. "%s: RX_CDR_APPLY is set to disabled\n",
  261. __func__);
  262. return;
  263. }
  264. get_platform_config_field(
  265. ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
  266. rx_preset_index, RX_PRESET_TABLE_QSFP_RX_CDR,
  267. &rx_preset, 4);
  268. /* Expand cdr setting to all 4 lanes */
  269. rx_preset = (rx_preset | (rx_preset << 1) |
  270. (rx_preset << 2) | (rx_preset << 3));
  271. if (rx_preset) {
  272. *cdr_ctrl_byte |= rx_preset;
  273. } else {
  274. *cdr_ctrl_byte &= rx_preset;
  275. /* Preserve current TX CDR status */
  276. *cdr_ctrl_byte |= (cache[QSFP_CDR_CTRL_BYTE_OFFS] & 0xF0);
  277. }
  278. }
  279. static void apply_tx_cdr(struct hfi1_pportdata *ppd,
  280. u32 tx_preset_index,
  281. u8 *cdr_ctrl_byte)
  282. {
  283. u32 tx_preset;
  284. u8 *cache = ppd->qsfp_info.cache;
  285. int cable_power_class;
  286. if (!((cache[QSFP_MOD_PWR_OFFS] & 0x8) &&
  287. (cache[QSFP_CDR_INFO_OFFS] & 0x80)))
  288. return;
  289. /* TX CDR present, bypass supported */
  290. cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
  291. if (cable_power_class <= QSFP_POWER_CLASS_3) {
  292. /* Power class <= 3, ignore config & turn TX CDR on */
  293. *cdr_ctrl_byte |= 0xF0;
  294. return;
  295. }
  296. get_platform_config_field(
  297. ppd->dd,
  298. PLATFORM_CONFIG_TX_PRESET_TABLE, tx_preset_index,
  299. TX_PRESET_TABLE_QSFP_TX_CDR_APPLY, &tx_preset, 4);
  300. if (!tx_preset) {
  301. dd_dev_info(
  302. ppd->dd,
  303. "%s: TX_CDR_APPLY is set to disabled\n",
  304. __func__);
  305. return;
  306. }
  307. get_platform_config_field(
  308. ppd->dd,
  309. PLATFORM_CONFIG_TX_PRESET_TABLE,
  310. tx_preset_index,
  311. TX_PRESET_TABLE_QSFP_TX_CDR, &tx_preset, 4);
  312. /* Expand cdr setting to all 4 lanes */
  313. tx_preset = (tx_preset | (tx_preset << 1) |
  314. (tx_preset << 2) | (tx_preset << 3));
  315. if (tx_preset)
  316. *cdr_ctrl_byte |= (tx_preset << 4);
  317. else
  318. /* Preserve current/determined RX CDR status */
  319. *cdr_ctrl_byte &= ((tx_preset << 4) | 0xF);
  320. }
  321. static void apply_cdr_settings(
  322. struct hfi1_pportdata *ppd, u32 rx_preset_index,
  323. u32 tx_preset_index)
  324. {
  325. u8 *cache = ppd->qsfp_info.cache;
  326. u8 cdr_ctrl_byte = cache[QSFP_CDR_CTRL_BYTE_OFFS];
  327. apply_rx_cdr(ppd, rx_preset_index, &cdr_ctrl_byte);
  328. apply_tx_cdr(ppd, tx_preset_index, &cdr_ctrl_byte);
  329. qsfp_write(ppd, ppd->dd->hfi1_id, QSFP_CDR_CTRL_BYTE_OFFS,
  330. &cdr_ctrl_byte, 1);
  331. }
  332. static void apply_tx_eq_auto(struct hfi1_pportdata *ppd)
  333. {
  334. u8 *cache = ppd->qsfp_info.cache;
  335. u8 tx_eq;
  336. if (!(cache[QSFP_EQ_INFO_OFFS] & 0x8))
  337. return;
  338. /* Disable adaptive TX EQ if present */
  339. tx_eq = cache[(128 * 3) + 241];
  340. tx_eq &= 0xF0;
  341. qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 241, &tx_eq, 1);
  342. }
  343. static void apply_tx_eq_prog(struct hfi1_pportdata *ppd, u32 tx_preset_index)
  344. {
  345. u8 *cache = ppd->qsfp_info.cache;
  346. u32 tx_preset;
  347. u8 tx_eq;
  348. if (!(cache[QSFP_EQ_INFO_OFFS] & 0x4))
  349. return;
  350. get_platform_config_field(
  351. ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
  352. tx_preset_index, TX_PRESET_TABLE_QSFP_TX_EQ_APPLY,
  353. &tx_preset, 4);
  354. if (!tx_preset) {
  355. dd_dev_info(
  356. ppd->dd,
  357. "%s: TX_EQ_APPLY is set to disabled\n",
  358. __func__);
  359. return;
  360. }
  361. get_platform_config_field(
  362. ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
  363. tx_preset_index, TX_PRESET_TABLE_QSFP_TX_EQ,
  364. &tx_preset, 4);
  365. if (((cache[(128 * 3) + 224] & 0xF0) >> 4) < tx_preset) {
  366. dd_dev_info(
  367. ppd->dd,
  368. "%s: TX EQ %x unsupported\n",
  369. __func__, tx_preset);
  370. dd_dev_info(
  371. ppd->dd,
  372. "%s: Applying EQ %x\n",
  373. __func__, cache[608] & 0xF0);
  374. tx_preset = (cache[608] & 0xF0) >> 4;
  375. }
  376. tx_eq = tx_preset | (tx_preset << 4);
  377. qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 234, &tx_eq, 1);
  378. qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 235, &tx_eq, 1);
  379. }
  380. static void apply_rx_eq_emp(struct hfi1_pportdata *ppd, u32 rx_preset_index)
  381. {
  382. u32 rx_preset;
  383. u8 rx_eq, *cache = ppd->qsfp_info.cache;
  384. if (!(cache[QSFP_EQ_INFO_OFFS] & 0x2))
  385. return;
  386. get_platform_config_field(
  387. ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
  388. rx_preset_index, RX_PRESET_TABLE_QSFP_RX_EMP_APPLY,
  389. &rx_preset, 4);
  390. if (!rx_preset) {
  391. dd_dev_info(
  392. ppd->dd,
  393. "%s: RX_EMP_APPLY is set to disabled\n",
  394. __func__);
  395. return;
  396. }
  397. get_platform_config_field(
  398. ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
  399. rx_preset_index, RX_PRESET_TABLE_QSFP_RX_EMP,
  400. &rx_preset, 4);
  401. if ((cache[(128 * 3) + 224] & 0xF) < rx_preset) {
  402. dd_dev_info(
  403. ppd->dd,
  404. "%s: Requested RX EMP %x\n",
  405. __func__, rx_preset);
  406. dd_dev_info(
  407. ppd->dd,
  408. "%s: Applying supported EMP %x\n",
  409. __func__, cache[608] & 0xF);
  410. rx_preset = cache[608] & 0xF;
  411. }
  412. rx_eq = rx_preset | (rx_preset << 4);
  413. qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 236, &rx_eq, 1);
  414. qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 237, &rx_eq, 1);
  415. }
  416. static void apply_eq_settings(struct hfi1_pportdata *ppd,
  417. u32 rx_preset_index, u32 tx_preset_index)
  418. {
  419. u8 *cache = ppd->qsfp_info.cache;
  420. /* no point going on w/o a page 3 */
  421. if (cache[2] & 4) {
  422. dd_dev_info(ppd->dd,
  423. "%s: Upper page 03 not present\n",
  424. __func__);
  425. return;
  426. }
  427. apply_tx_eq_auto(ppd);
  428. apply_tx_eq_prog(ppd, tx_preset_index);
  429. apply_rx_eq_emp(ppd, rx_preset_index);
  430. }
  431. static void apply_rx_amplitude_settings(
  432. struct hfi1_pportdata *ppd, u32 rx_preset_index,
  433. u32 tx_preset_index)
  434. {
  435. u32 rx_preset;
  436. u8 rx_amp = 0, i = 0, preferred = 0, *cache = ppd->qsfp_info.cache;
  437. /* no point going on w/o a page 3 */
  438. if (cache[2] & 4) {
  439. dd_dev_info(ppd->dd,
  440. "%s: Upper page 03 not present\n",
  441. __func__);
  442. return;
  443. }
  444. if (!(cache[QSFP_EQ_INFO_OFFS] & 0x1)) {
  445. dd_dev_info(ppd->dd,
  446. "%s: RX_AMP_APPLY is set to disabled\n",
  447. __func__);
  448. return;
  449. }
  450. get_platform_config_field(ppd->dd,
  451. PLATFORM_CONFIG_RX_PRESET_TABLE,
  452. rx_preset_index,
  453. RX_PRESET_TABLE_QSFP_RX_AMP_APPLY,
  454. &rx_preset, 4);
  455. if (!rx_preset) {
  456. dd_dev_info(ppd->dd,
  457. "%s: RX_AMP_APPLY is set to disabled\n",
  458. __func__);
  459. return;
  460. }
  461. get_platform_config_field(ppd->dd,
  462. PLATFORM_CONFIG_RX_PRESET_TABLE,
  463. rx_preset_index,
  464. RX_PRESET_TABLE_QSFP_RX_AMP,
  465. &rx_preset, 4);
  466. dd_dev_info(ppd->dd,
  467. "%s: Requested RX AMP %x\n",
  468. __func__,
  469. rx_preset);
  470. for (i = 0; i < 4; i++) {
  471. if (cache[(128 * 3) + 225] & (1 << i)) {
  472. preferred = i;
  473. if (preferred == rx_preset)
  474. break;
  475. }
  476. }
  477. /*
  478. * Verify that preferred RX amplitude is not just a
  479. * fall through of the default
  480. */
  481. if (!preferred && !(cache[(128 * 3) + 225] & 0x1)) {
  482. dd_dev_info(ppd->dd, "No supported RX AMP, not applying\n");
  483. return;
  484. }
  485. dd_dev_info(ppd->dd,
  486. "%s: Applying RX AMP %x\n", __func__, preferred);
  487. rx_amp = preferred | (preferred << 4);
  488. qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 238, &rx_amp, 1);
  489. qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 239, &rx_amp, 1);
  490. }
  491. #define OPA_INVALID_INDEX 0xFFF
  492. static void apply_tx_lanes(struct hfi1_pportdata *ppd, u8 field_id,
  493. u32 config_data, const char *message)
  494. {
  495. u8 i;
  496. int ret;
  497. for (i = 0; i < 4; i++) {
  498. ret = load_8051_config(ppd->dd, field_id, i, config_data);
  499. if (ret != HCMD_SUCCESS) {
  500. dd_dev_err(
  501. ppd->dd,
  502. "%s: %s for lane %u failed\n",
  503. message, __func__, i);
  504. }
  505. }
  506. }
  507. /*
  508. * Return a special SerDes setting for low power AOC cables. The power class
  509. * threshold and setting being used were all found by empirical testing.
  510. *
  511. * Summary of the logic:
  512. *
  513. * if (QSFP and QSFP_TYPE == AOC and QSFP_POWER_CLASS < 4)
  514. * return 0xe
  515. * return 0; // leave at default
  516. */
  517. static u8 aoc_low_power_setting(struct hfi1_pportdata *ppd)
  518. {
  519. u8 *cache = ppd->qsfp_info.cache;
  520. int power_class;
  521. /* QSFP only */
  522. if (ppd->port_type != PORT_TYPE_QSFP)
  523. return 0; /* leave at default */
  524. /* active optical cables only */
  525. switch ((cache[QSFP_MOD_TECH_OFFS] & 0xF0) >> 4) {
  526. case 0x0 ... 0x9: fallthrough;
  527. case 0xC: fallthrough;
  528. case 0xE:
  529. /* active AOC */
  530. power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
  531. if (power_class < QSFP_POWER_CLASS_4)
  532. return 0xe;
  533. }
  534. return 0; /* leave at default */
  535. }
  536. static void apply_tunings(
  537. struct hfi1_pportdata *ppd, u32 tx_preset_index,
  538. u8 tuning_method, u32 total_atten, u8 limiting_active)
  539. {
  540. int ret = 0;
  541. u32 config_data = 0, tx_preset = 0;
  542. u8 precur = 0, attn = 0, postcur = 0, external_device_config = 0;
  543. u8 *cache = ppd->qsfp_info.cache;
  544. /* Pass tuning method to 8051 */
  545. read_8051_config(ppd->dd, LINK_TUNING_PARAMETERS, GENERAL_CONFIG,
  546. &config_data);
  547. config_data &= ~(0xff << TUNING_METHOD_SHIFT);
  548. config_data |= ((u32)tuning_method << TUNING_METHOD_SHIFT);
  549. ret = load_8051_config(ppd->dd, LINK_TUNING_PARAMETERS, GENERAL_CONFIG,
  550. config_data);
  551. if (ret != HCMD_SUCCESS)
  552. dd_dev_err(ppd->dd, "%s: Failed to set tuning method\n",
  553. __func__);
  554. /* Set same channel loss for both TX and RX */
  555. config_data = 0 | (total_atten << 16) | (total_atten << 24);
  556. apply_tx_lanes(ppd, CHANNEL_LOSS_SETTINGS, config_data,
  557. "Setting channel loss");
  558. /* Inform 8051 of cable capabilities */
  559. if (ppd->qsfp_info.cache_valid) {
  560. external_device_config =
  561. ((cache[QSFP_MOD_PWR_OFFS] & 0x4) << 3) |
  562. ((cache[QSFP_MOD_PWR_OFFS] & 0x8) << 2) |
  563. ((cache[QSFP_EQ_INFO_OFFS] & 0x2) << 1) |
  564. (cache[QSFP_EQ_INFO_OFFS] & 0x4);
  565. ret = read_8051_config(ppd->dd, DC_HOST_COMM_SETTINGS,
  566. GENERAL_CONFIG, &config_data);
  567. /* Clear, then set the external device config field */
  568. config_data &= ~(u32)0xFF;
  569. config_data |= external_device_config;
  570. ret = load_8051_config(ppd->dd, DC_HOST_COMM_SETTINGS,
  571. GENERAL_CONFIG, config_data);
  572. if (ret != HCMD_SUCCESS)
  573. dd_dev_err(ppd->dd,
  574. "%s: Failed set ext device config params\n",
  575. __func__);
  576. }
  577. if (tx_preset_index == OPA_INVALID_INDEX) {
  578. if (ppd->port_type == PORT_TYPE_QSFP && limiting_active)
  579. dd_dev_err(ppd->dd, "%s: Invalid Tx preset index\n",
  580. __func__);
  581. return;
  582. }
  583. /* Following for limiting active channels only */
  584. get_platform_config_field(
  585. ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE, tx_preset_index,
  586. TX_PRESET_TABLE_PRECUR, &tx_preset, 4);
  587. precur = tx_preset;
  588. get_platform_config_field(
  589. ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
  590. tx_preset_index, TX_PRESET_TABLE_ATTN, &tx_preset, 4);
  591. attn = tx_preset;
  592. get_platform_config_field(
  593. ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
  594. tx_preset_index, TX_PRESET_TABLE_POSTCUR, &tx_preset, 4);
  595. postcur = tx_preset;
  596. /*
  597. * NOTES:
  598. * o The aoc_low_power_setting is applied to all lanes even
  599. * though only lane 0's value is examined by the firmware.
  600. * o A lingering low power setting after a cable swap does
  601. * not occur. On cable unplug the 8051 is reset and
  602. * restarted on cable insert. This resets all settings to
  603. * their default, erasing any previous low power setting.
  604. */
  605. config_data = precur | (attn << 8) | (postcur << 16) |
  606. (aoc_low_power_setting(ppd) << 24);
  607. apply_tx_lanes(ppd, TX_EQ_SETTINGS, config_data,
  608. "Applying TX settings");
  609. }
  610. /* Must be holding the QSFP i2c resource */
  611. static int tune_active_qsfp(struct hfi1_pportdata *ppd, u32 *ptr_tx_preset,
  612. u32 *ptr_rx_preset, u32 *ptr_total_atten)
  613. {
  614. int ret;
  615. u16 lss = ppd->link_speed_supported, lse = ppd->link_speed_enabled;
  616. u8 *cache = ppd->qsfp_info.cache;
  617. ppd->qsfp_info.limiting_active = 1;
  618. ret = set_qsfp_tx(ppd, 0);
  619. if (ret)
  620. return ret;
  621. ret = qual_power(ppd);
  622. if (ret)
  623. return ret;
  624. ret = qual_bitrate(ppd);
  625. if (ret)
  626. return ret;
  627. /*
  628. * We'll change the QSFP memory contents from here on out, thus we set a
  629. * flag here to remind ourselves to reset the QSFP module. This prevents
  630. * reuse of stale settings established in our previous pass through.
  631. */
  632. if (ppd->qsfp_info.reset_needed) {
  633. ret = reset_qsfp(ppd);
  634. if (ret)
  635. return ret;
  636. refresh_qsfp_cache(ppd, &ppd->qsfp_info);
  637. } else {
  638. ppd->qsfp_info.reset_needed = 1;
  639. }
  640. ret = set_qsfp_high_power(ppd);
  641. if (ret)
  642. return ret;
  643. if (cache[QSFP_EQ_INFO_OFFS] & 0x4) {
  644. ret = get_platform_config_field(
  645. ppd->dd,
  646. PLATFORM_CONFIG_PORT_TABLE, 0,
  647. PORT_TABLE_TX_PRESET_IDX_ACTIVE_EQ,
  648. ptr_tx_preset, 4);
  649. if (ret) {
  650. *ptr_tx_preset = OPA_INVALID_INDEX;
  651. return ret;
  652. }
  653. } else {
  654. ret = get_platform_config_field(
  655. ppd->dd,
  656. PLATFORM_CONFIG_PORT_TABLE, 0,
  657. PORT_TABLE_TX_PRESET_IDX_ACTIVE_NO_EQ,
  658. ptr_tx_preset, 4);
  659. if (ret) {
  660. *ptr_tx_preset = OPA_INVALID_INDEX;
  661. return ret;
  662. }
  663. }
  664. ret = get_platform_config_field(
  665. ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
  666. PORT_TABLE_RX_PRESET_IDX, ptr_rx_preset, 4);
  667. if (ret) {
  668. *ptr_rx_preset = OPA_INVALID_INDEX;
  669. return ret;
  670. }
  671. if ((lss & OPA_LINK_SPEED_25G) && (lse & OPA_LINK_SPEED_25G))
  672. get_platform_config_field(
  673. ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
  674. PORT_TABLE_LOCAL_ATTEN_25G, ptr_total_atten, 4);
  675. else if ((lss & OPA_LINK_SPEED_12_5G) && (lse & OPA_LINK_SPEED_12_5G))
  676. get_platform_config_field(
  677. ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
  678. PORT_TABLE_LOCAL_ATTEN_12G, ptr_total_atten, 4);
  679. apply_cdr_settings(ppd, *ptr_rx_preset, *ptr_tx_preset);
  680. apply_eq_settings(ppd, *ptr_rx_preset, *ptr_tx_preset);
  681. apply_rx_amplitude_settings(ppd, *ptr_rx_preset, *ptr_tx_preset);
  682. ret = set_qsfp_tx(ppd, 1);
  683. return ret;
  684. }
  685. static int tune_qsfp(struct hfi1_pportdata *ppd,
  686. u32 *ptr_tx_preset, u32 *ptr_rx_preset,
  687. u8 *ptr_tuning_method, u32 *ptr_total_atten)
  688. {
  689. u32 cable_atten = 0, remote_atten = 0, platform_atten = 0;
  690. u16 lss = ppd->link_speed_supported, lse = ppd->link_speed_enabled;
  691. int ret = 0;
  692. u8 *cache = ppd->qsfp_info.cache;
  693. switch ((cache[QSFP_MOD_TECH_OFFS] & 0xF0) >> 4) {
  694. case 0xA ... 0xB:
  695. ret = get_platform_config_field(
  696. ppd->dd,
  697. PLATFORM_CONFIG_PORT_TABLE, 0,
  698. PORT_TABLE_LOCAL_ATTEN_25G,
  699. &platform_atten, 4);
  700. if (ret)
  701. return ret;
  702. if ((lss & OPA_LINK_SPEED_25G) && (lse & OPA_LINK_SPEED_25G))
  703. cable_atten = cache[QSFP_CU_ATTEN_12G_OFFS];
  704. else if ((lss & OPA_LINK_SPEED_12_5G) &&
  705. (lse & OPA_LINK_SPEED_12_5G))
  706. cable_atten = cache[QSFP_CU_ATTEN_7G_OFFS];
  707. /* Fallback to configured attenuation if cable memory is bad */
  708. if (cable_atten == 0 || cable_atten > 36) {
  709. ret = get_platform_config_field(
  710. ppd->dd,
  711. PLATFORM_CONFIG_SYSTEM_TABLE, 0,
  712. SYSTEM_TABLE_QSFP_ATTENUATION_DEFAULT_25G,
  713. &cable_atten, 4);
  714. if (ret)
  715. return ret;
  716. }
  717. ret = get_platform_config_field(
  718. ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
  719. PORT_TABLE_REMOTE_ATTEN_25G, &remote_atten, 4);
  720. if (ret)
  721. return ret;
  722. *ptr_total_atten = platform_atten + cable_atten + remote_atten;
  723. *ptr_tuning_method = OPA_PASSIVE_TUNING;
  724. break;
  725. case 0x0 ... 0x9: fallthrough;
  726. case 0xC: fallthrough;
  727. case 0xE:
  728. ret = tune_active_qsfp(ppd, ptr_tx_preset, ptr_rx_preset,
  729. ptr_total_atten);
  730. if (ret)
  731. return ret;
  732. *ptr_tuning_method = OPA_ACTIVE_TUNING;
  733. break;
  734. case 0xD: fallthrough;
  735. case 0xF:
  736. default:
  737. dd_dev_warn(ppd->dd, "%s: Unknown/unsupported cable\n",
  738. __func__);
  739. break;
  740. }
  741. return ret;
  742. }
  743. /*
  744. * This function communicates its success or failure via ppd->driver_link_ready
  745. * Thus, it depends on its association with start_link(...) which checks
  746. * driver_link_ready before proceeding with the link negotiation and
  747. * initialization process.
  748. */
  749. void tune_serdes(struct hfi1_pportdata *ppd)
  750. {
  751. int ret = 0;
  752. u32 total_atten = 0;
  753. u32 remote_atten = 0, platform_atten = 0;
  754. u32 rx_preset_index, tx_preset_index;
  755. u8 tuning_method = 0, limiting_active = 0;
  756. struct hfi1_devdata *dd = ppd->dd;
  757. rx_preset_index = OPA_INVALID_INDEX;
  758. tx_preset_index = OPA_INVALID_INDEX;
  759. /* the link defaults to enabled */
  760. ppd->link_enabled = 1;
  761. /* the driver link ready state defaults to not ready */
  762. ppd->driver_link_ready = 0;
  763. ppd->offline_disabled_reason = HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
  764. /* Skip the tuning for testing (loopback != none) and simulations */
  765. if (loopback != LOOPBACK_NONE ||
  766. ppd->dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
  767. ppd->driver_link_ready = 1;
  768. if (qsfp_mod_present(ppd)) {
  769. ret = acquire_chip_resource(ppd->dd,
  770. qsfp_resource(ppd->dd),
  771. QSFP_WAIT);
  772. if (ret) {
  773. dd_dev_err(ppd->dd, "%s: hfi%d: cannot lock i2c chain\n",
  774. __func__, (int)ppd->dd->hfi1_id);
  775. goto bail;
  776. }
  777. refresh_qsfp_cache(ppd, &ppd->qsfp_info);
  778. release_chip_resource(ppd->dd, qsfp_resource(ppd->dd));
  779. }
  780. return;
  781. }
  782. switch (ppd->port_type) {
  783. case PORT_TYPE_DISCONNECTED:
  784. ppd->offline_disabled_reason =
  785. HFI1_ODR_MASK(OPA_LINKDOWN_REASON_DISCONNECTED);
  786. dd_dev_warn(dd, "%s: Port disconnected, disabling port\n",
  787. __func__);
  788. goto bail;
  789. case PORT_TYPE_FIXED:
  790. /* platform_atten, remote_atten pre-zeroed to catch error */
  791. get_platform_config_field(
  792. ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
  793. PORT_TABLE_LOCAL_ATTEN_25G, &platform_atten, 4);
  794. get_platform_config_field(
  795. ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
  796. PORT_TABLE_REMOTE_ATTEN_25G, &remote_atten, 4);
  797. total_atten = platform_atten + remote_atten;
  798. tuning_method = OPA_PASSIVE_TUNING;
  799. break;
  800. case PORT_TYPE_VARIABLE:
  801. if (qsfp_mod_present(ppd)) {
  802. /*
  803. * platform_atten, remote_atten pre-zeroed to
  804. * catch error
  805. */
  806. get_platform_config_field(
  807. ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
  808. PORT_TABLE_LOCAL_ATTEN_25G,
  809. &platform_atten, 4);
  810. get_platform_config_field(
  811. ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
  812. PORT_TABLE_REMOTE_ATTEN_25G,
  813. &remote_atten, 4);
  814. total_atten = platform_atten + remote_atten;
  815. tuning_method = OPA_PASSIVE_TUNING;
  816. } else {
  817. ppd->offline_disabled_reason =
  818. HFI1_ODR_MASK(OPA_LINKDOWN_REASON_CHASSIS_CONFIG);
  819. goto bail;
  820. }
  821. break;
  822. case PORT_TYPE_QSFP:
  823. if (qsfp_mod_present(ppd)) {
  824. ret = acquire_chip_resource(ppd->dd,
  825. qsfp_resource(ppd->dd),
  826. QSFP_WAIT);
  827. if (ret) {
  828. dd_dev_err(ppd->dd, "%s: hfi%d: cannot lock i2c chain\n",
  829. __func__, (int)ppd->dd->hfi1_id);
  830. goto bail;
  831. }
  832. refresh_qsfp_cache(ppd, &ppd->qsfp_info);
  833. if (ppd->qsfp_info.cache_valid) {
  834. ret = tune_qsfp(ppd,
  835. &tx_preset_index,
  836. &rx_preset_index,
  837. &tuning_method,
  838. &total_atten);
  839. /*
  840. * We may have modified the QSFP memory, so
  841. * update the cache to reflect the changes
  842. */
  843. refresh_qsfp_cache(ppd, &ppd->qsfp_info);
  844. limiting_active =
  845. ppd->qsfp_info.limiting_active;
  846. } else {
  847. dd_dev_err(dd,
  848. "%s: Reading QSFP memory failed\n",
  849. __func__);
  850. ret = -EINVAL; /* a fail indication */
  851. }
  852. release_chip_resource(ppd->dd, qsfp_resource(ppd->dd));
  853. if (ret)
  854. goto bail;
  855. } else {
  856. ppd->offline_disabled_reason =
  857. HFI1_ODR_MASK(
  858. OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED);
  859. goto bail;
  860. }
  861. break;
  862. default:
  863. dd_dev_warn(ppd->dd, "%s: Unknown port type\n", __func__);
  864. ppd->port_type = PORT_TYPE_UNKNOWN;
  865. tuning_method = OPA_UNKNOWN_TUNING;
  866. total_atten = 0;
  867. limiting_active = 0;
  868. tx_preset_index = OPA_INVALID_INDEX;
  869. break;
  870. }
  871. if (ppd->offline_disabled_reason ==
  872. HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
  873. apply_tunings(ppd, tx_preset_index, tuning_method,
  874. total_atten, limiting_active);
  875. if (!ret)
  876. ppd->driver_link_ready = 1;
  877. return;
  878. bail:
  879. ppd->driver_link_ready = 0;
  880. }