firmware.c 62 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253
  1. // SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
  2. /*
  3. * Copyright(c) 2015 - 2017 Intel Corporation.
  4. */
  5. #include <linux/firmware.h>
  6. #include <linux/mutex.h>
  7. #include <linux/delay.h>
  8. #include <linux/crc32.h>
  9. #include "hfi.h"
  10. #include "trace.h"
  11. /*
  12. * Make it easy to toggle firmware file name and if it gets loaded by
  13. * editing the following. This may be something we do while in development
  14. * but not necessarily something a user would ever need to use.
  15. */
  16. #define DEFAULT_FW_8051_NAME_FPGA "hfi_dc8051.bin"
  17. #define DEFAULT_FW_8051_NAME_ASIC "hfi1_dc8051.fw"
  18. #define DEFAULT_FW_FABRIC_NAME "hfi1_fabric.fw"
  19. #define DEFAULT_FW_SBUS_NAME "hfi1_sbus.fw"
  20. #define DEFAULT_FW_PCIE_NAME "hfi1_pcie.fw"
  21. #define ALT_FW_8051_NAME_ASIC "hfi1_dc8051_d.fw"
  22. #define ALT_FW_FABRIC_NAME "hfi1_fabric_d.fw"
  23. #define ALT_FW_SBUS_NAME "hfi1_sbus_d.fw"
  24. #define ALT_FW_PCIE_NAME "hfi1_pcie_d.fw"
  25. MODULE_FIRMWARE(DEFAULT_FW_8051_NAME_ASIC);
  26. MODULE_FIRMWARE(DEFAULT_FW_FABRIC_NAME);
  27. MODULE_FIRMWARE(DEFAULT_FW_SBUS_NAME);
  28. MODULE_FIRMWARE(DEFAULT_FW_PCIE_NAME);
  29. static uint fw_8051_load = 1;
  30. static uint fw_fabric_serdes_load = 1;
  31. static uint fw_pcie_serdes_load = 1;
  32. static uint fw_sbus_load = 1;
  33. /* Firmware file names get set in hfi1_firmware_init() based on the above */
  34. static char *fw_8051_name;
  35. static char *fw_fabric_serdes_name;
  36. static char *fw_sbus_name;
  37. static char *fw_pcie_serdes_name;
  38. #define SBUS_MAX_POLL_COUNT 100
  39. #define SBUS_COUNTER(reg, name) \
  40. (((reg) >> ASIC_STS_SBUS_COUNTERS_##name##_CNT_SHIFT) & \
  41. ASIC_STS_SBUS_COUNTERS_##name##_CNT_MASK)
  42. /*
  43. * Firmware security header.
  44. */
  45. struct css_header {
  46. u32 module_type;
  47. u32 header_len;
  48. u32 header_version;
  49. u32 module_id;
  50. u32 module_vendor;
  51. u32 date; /* BCD yyyymmdd */
  52. u32 size; /* in DWORDs */
  53. u32 key_size; /* in DWORDs */
  54. u32 modulus_size; /* in DWORDs */
  55. u32 exponent_size; /* in DWORDs */
  56. u32 reserved[22];
  57. };
  58. /* expected field values */
  59. #define CSS_MODULE_TYPE 0x00000006
  60. #define CSS_HEADER_LEN 0x000000a1
  61. #define CSS_HEADER_VERSION 0x00010000
  62. #define CSS_MODULE_VENDOR 0x00008086
  63. #define KEY_SIZE 256
  64. #define MU_SIZE 8
  65. #define EXPONENT_SIZE 4
  66. /* size of platform configuration partition */
  67. #define MAX_PLATFORM_CONFIG_FILE_SIZE 4096
  68. /* size of file of plaform configuration encoded in format version 4 */
  69. #define PLATFORM_CONFIG_FORMAT_4_FILE_SIZE 528
  70. /* the file itself */
  71. struct firmware_file {
  72. struct css_header css_header;
  73. u8 modulus[KEY_SIZE];
  74. u8 exponent[EXPONENT_SIZE];
  75. u8 signature[KEY_SIZE];
  76. u8 firmware[];
  77. };
  78. struct augmented_firmware_file {
  79. struct css_header css_header;
  80. u8 modulus[KEY_SIZE];
  81. u8 exponent[EXPONENT_SIZE];
  82. u8 signature[KEY_SIZE];
  83. u8 r2[KEY_SIZE];
  84. u8 mu[MU_SIZE];
  85. u8 firmware[];
  86. };
  87. /* augmented file size difference */
  88. #define AUGMENT_SIZE (sizeof(struct augmented_firmware_file) - \
  89. sizeof(struct firmware_file))
  90. struct firmware_details {
  91. /* Linux core piece */
  92. const struct firmware *fw;
  93. struct css_header *css_header;
  94. u8 *firmware_ptr; /* pointer to binary data */
  95. u32 firmware_len; /* length in bytes */
  96. u8 *modulus; /* pointer to the modulus */
  97. u8 *exponent; /* pointer to the exponent */
  98. u8 *signature; /* pointer to the signature */
  99. u8 *r2; /* pointer to r2 */
  100. u8 *mu; /* pointer to mu */
  101. struct augmented_firmware_file dummy_header;
  102. };
  103. /*
  104. * The mutex protects fw_state, fw_err, and all of the firmware_details
  105. * variables.
  106. */
  107. static DEFINE_MUTEX(fw_mutex);
  108. enum fw_state {
  109. FW_EMPTY,
  110. FW_TRY,
  111. FW_FINAL,
  112. FW_ERR
  113. };
  114. static enum fw_state fw_state = FW_EMPTY;
  115. static int fw_err;
  116. static struct firmware_details fw_8051;
  117. static struct firmware_details fw_fabric;
  118. static struct firmware_details fw_pcie;
  119. static struct firmware_details fw_sbus;
  120. /* flags for turn_off_spicos() */
  121. #define SPICO_SBUS 0x1
  122. #define SPICO_FABRIC 0x2
  123. #define ENABLE_SPICO_SMASK 0x1
  124. /* security block commands */
  125. #define RSA_CMD_INIT 0x1
  126. #define RSA_CMD_START 0x2
  127. /* security block status */
  128. #define RSA_STATUS_IDLE 0x0
  129. #define RSA_STATUS_ACTIVE 0x1
  130. #define RSA_STATUS_DONE 0x2
  131. #define RSA_STATUS_FAILED 0x3
  132. /* RSA engine timeout, in ms */
  133. #define RSA_ENGINE_TIMEOUT 100 /* ms */
  134. /* hardware mutex timeout, in ms */
  135. #define HM_TIMEOUT 10 /* ms */
  136. /* 8051 memory access timeout, in us */
  137. #define DC8051_ACCESS_TIMEOUT 100 /* us */
  138. /* the number of fabric SerDes on the SBus */
  139. #define NUM_FABRIC_SERDES 4
  140. /* ASIC_STS_SBUS_RESULT.RESULT_CODE value */
  141. #define SBUS_READ_COMPLETE 0x4
  142. /* SBus fabric SerDes addresses, one set per HFI */
  143. static const u8 fabric_serdes_addrs[2][NUM_FABRIC_SERDES] = {
  144. { 0x01, 0x02, 0x03, 0x04 },
  145. { 0x28, 0x29, 0x2a, 0x2b }
  146. };
  147. /* SBus PCIe SerDes addresses, one set per HFI */
  148. static const u8 pcie_serdes_addrs[2][NUM_PCIE_SERDES] = {
  149. { 0x08, 0x0a, 0x0c, 0x0e, 0x10, 0x12, 0x14, 0x16,
  150. 0x18, 0x1a, 0x1c, 0x1e, 0x20, 0x22, 0x24, 0x26 },
  151. { 0x2f, 0x31, 0x33, 0x35, 0x37, 0x39, 0x3b, 0x3d,
  152. 0x3f, 0x41, 0x43, 0x45, 0x47, 0x49, 0x4b, 0x4d }
  153. };
  154. /* SBus PCIe PCS addresses, one set per HFI */
  155. const u8 pcie_pcs_addrs[2][NUM_PCIE_SERDES] = {
  156. { 0x09, 0x0b, 0x0d, 0x0f, 0x11, 0x13, 0x15, 0x17,
  157. 0x19, 0x1b, 0x1d, 0x1f, 0x21, 0x23, 0x25, 0x27 },
  158. { 0x30, 0x32, 0x34, 0x36, 0x38, 0x3a, 0x3c, 0x3e,
  159. 0x40, 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e }
  160. };
  161. /* SBus fabric SerDes broadcast addresses, one per HFI */
  162. static const u8 fabric_serdes_broadcast[2] = { 0xe4, 0xe5 };
  163. static const u8 all_fabric_serdes_broadcast = 0xe1;
  164. /* SBus PCIe SerDes broadcast addresses, one per HFI */
  165. const u8 pcie_serdes_broadcast[2] = { 0xe2, 0xe3 };
  166. static const u8 all_pcie_serdes_broadcast = 0xe0;
  167. static const u32 platform_config_table_limits[PLATFORM_CONFIG_TABLE_MAX] = {
  168. 0,
  169. SYSTEM_TABLE_MAX,
  170. PORT_TABLE_MAX,
  171. RX_PRESET_TABLE_MAX,
  172. TX_PRESET_TABLE_MAX,
  173. QSFP_ATTEN_TABLE_MAX,
  174. VARIABLE_SETTINGS_TABLE_MAX
  175. };
  176. /* forwards */
  177. static void dispose_one_firmware(struct firmware_details *fdet);
  178. static int load_fabric_serdes_firmware(struct hfi1_devdata *dd,
  179. struct firmware_details *fdet);
  180. static void dump_fw_version(struct hfi1_devdata *dd);
  181. /*
  182. * Read a single 64-bit value from 8051 data memory.
  183. *
  184. * Expects:
  185. * o caller to have already set up data read, no auto increment
  186. * o caller to turn off read enable when finished
  187. *
  188. * The address argument is a byte offset. Bits 0:2 in the address are
  189. * ignored - i.e. the hardware will always do aligned 8-byte reads as if
  190. * the lower bits are zero.
  191. *
  192. * Return 0 on success, -ENXIO on a read error (timeout).
  193. */
  194. static int __read_8051_data(struct hfi1_devdata *dd, u32 addr, u64 *result)
  195. {
  196. u64 reg;
  197. int count;
  198. /* step 1: set the address, clear enable */
  199. reg = (addr & DC_DC8051_CFG_RAM_ACCESS_CTRL_ADDRESS_MASK)
  200. << DC_DC8051_CFG_RAM_ACCESS_CTRL_ADDRESS_SHIFT;
  201. write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_CTRL, reg);
  202. /* step 2: enable */
  203. write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_CTRL,
  204. reg | DC_DC8051_CFG_RAM_ACCESS_CTRL_READ_ENA_SMASK);
  205. /* wait until ACCESS_COMPLETED is set */
  206. count = 0;
  207. while ((read_csr(dd, DC_DC8051_CFG_RAM_ACCESS_STATUS)
  208. & DC_DC8051_CFG_RAM_ACCESS_STATUS_ACCESS_COMPLETED_SMASK)
  209. == 0) {
  210. count++;
  211. if (count > DC8051_ACCESS_TIMEOUT) {
  212. dd_dev_err(dd, "timeout reading 8051 data\n");
  213. return -ENXIO;
  214. }
  215. ndelay(10);
  216. }
  217. /* gather the data */
  218. *result = read_csr(dd, DC_DC8051_CFG_RAM_ACCESS_RD_DATA);
  219. return 0;
  220. }
  221. /*
  222. * Read 8051 data starting at addr, for len bytes. Will read in 8-byte chunks.
  223. * Return 0 on success, -errno on error.
  224. */
  225. int read_8051_data(struct hfi1_devdata *dd, u32 addr, u32 len, u64 *result)
  226. {
  227. unsigned long flags;
  228. u32 done;
  229. int ret = 0;
  230. spin_lock_irqsave(&dd->dc8051_memlock, flags);
  231. /* data read set-up, no auto-increment */
  232. write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_SETUP, 0);
  233. for (done = 0; done < len; addr += 8, done += 8, result++) {
  234. ret = __read_8051_data(dd, addr, result);
  235. if (ret)
  236. break;
  237. }
  238. /* turn off read enable */
  239. write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_CTRL, 0);
  240. spin_unlock_irqrestore(&dd->dc8051_memlock, flags);
  241. return ret;
  242. }
  243. /*
  244. * Write data or code to the 8051 code or data RAM.
  245. */
  246. static int write_8051(struct hfi1_devdata *dd, int code, u32 start,
  247. const u8 *data, u32 len)
  248. {
  249. u64 reg;
  250. u32 offset;
  251. int aligned, count;
  252. /* check alignment */
  253. aligned = ((unsigned long)data & 0x7) == 0;
  254. /* write set-up */
  255. reg = (code ? DC_DC8051_CFG_RAM_ACCESS_SETUP_RAM_SEL_SMASK : 0ull)
  256. | DC_DC8051_CFG_RAM_ACCESS_SETUP_AUTO_INCR_ADDR_SMASK;
  257. write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_SETUP, reg);
  258. reg = ((start & DC_DC8051_CFG_RAM_ACCESS_CTRL_ADDRESS_MASK)
  259. << DC_DC8051_CFG_RAM_ACCESS_CTRL_ADDRESS_SHIFT)
  260. | DC_DC8051_CFG_RAM_ACCESS_CTRL_WRITE_ENA_SMASK;
  261. write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_CTRL, reg);
  262. /* write */
  263. for (offset = 0; offset < len; offset += 8) {
  264. int bytes = len - offset;
  265. if (bytes < 8) {
  266. reg = 0;
  267. memcpy(&reg, &data[offset], bytes);
  268. } else if (aligned) {
  269. reg = *(u64 *)&data[offset];
  270. } else {
  271. memcpy(&reg, &data[offset], 8);
  272. }
  273. write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_WR_DATA, reg);
  274. /* wait until ACCESS_COMPLETED is set */
  275. count = 0;
  276. while ((read_csr(dd, DC_DC8051_CFG_RAM_ACCESS_STATUS)
  277. & DC_DC8051_CFG_RAM_ACCESS_STATUS_ACCESS_COMPLETED_SMASK)
  278. == 0) {
  279. count++;
  280. if (count > DC8051_ACCESS_TIMEOUT) {
  281. dd_dev_err(dd, "timeout writing 8051 data\n");
  282. return -ENXIO;
  283. }
  284. udelay(1);
  285. }
  286. }
  287. /* turn off write access, auto increment (also sets to data access) */
  288. write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_CTRL, 0);
  289. write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_SETUP, 0);
  290. return 0;
  291. }
  292. /* return 0 if values match, non-zero and complain otherwise */
  293. static int invalid_header(struct hfi1_devdata *dd, const char *what,
  294. u32 actual, u32 expected)
  295. {
  296. if (actual == expected)
  297. return 0;
  298. dd_dev_err(dd,
  299. "invalid firmware header field %s: expected 0x%x, actual 0x%x\n",
  300. what, expected, actual);
  301. return 1;
  302. }
  303. /*
  304. * Verify that the static fields in the CSS header match.
  305. */
  306. static int verify_css_header(struct hfi1_devdata *dd, struct css_header *css)
  307. {
  308. /* verify CSS header fields (most sizes are in DW, so add /4) */
  309. if (invalid_header(dd, "module_type", css->module_type,
  310. CSS_MODULE_TYPE) ||
  311. invalid_header(dd, "header_len", css->header_len,
  312. (sizeof(struct firmware_file) / 4)) ||
  313. invalid_header(dd, "header_version", css->header_version,
  314. CSS_HEADER_VERSION) ||
  315. invalid_header(dd, "module_vendor", css->module_vendor,
  316. CSS_MODULE_VENDOR) ||
  317. invalid_header(dd, "key_size", css->key_size, KEY_SIZE / 4) ||
  318. invalid_header(dd, "modulus_size", css->modulus_size,
  319. KEY_SIZE / 4) ||
  320. invalid_header(dd, "exponent_size", css->exponent_size,
  321. EXPONENT_SIZE / 4)) {
  322. return -EINVAL;
  323. }
  324. return 0;
  325. }
  326. /*
  327. * Make sure there are at least some bytes after the prefix.
  328. */
  329. static int payload_check(struct hfi1_devdata *dd, const char *name,
  330. long file_size, long prefix_size)
  331. {
  332. /* make sure we have some payload */
  333. if (prefix_size >= file_size) {
  334. dd_dev_err(dd,
  335. "firmware \"%s\", size %ld, must be larger than %ld bytes\n",
  336. name, file_size, prefix_size);
  337. return -EINVAL;
  338. }
  339. return 0;
  340. }
  341. /*
  342. * Request the firmware from the system. Extract the pieces and fill in
  343. * fdet. If successful, the caller will need to call dispose_one_firmware().
  344. * Returns 0 on success, -ERRNO on error.
  345. */
  346. static int obtain_one_firmware(struct hfi1_devdata *dd, const char *name,
  347. struct firmware_details *fdet)
  348. {
  349. struct css_header *css;
  350. int ret;
  351. memset(fdet, 0, sizeof(*fdet));
  352. ret = request_firmware(&fdet->fw, name, &dd->pcidev->dev);
  353. if (ret) {
  354. dd_dev_warn(dd, "cannot find firmware \"%s\", err %d\n",
  355. name, ret);
  356. return ret;
  357. }
  358. /* verify the firmware */
  359. if (fdet->fw->size < sizeof(struct css_header)) {
  360. dd_dev_err(dd, "firmware \"%s\" is too small\n", name);
  361. ret = -EINVAL;
  362. goto done;
  363. }
  364. css = (struct css_header *)fdet->fw->data;
  365. hfi1_cdbg(FIRMWARE, "Firmware %s details:", name);
  366. hfi1_cdbg(FIRMWARE, "file size: 0x%lx bytes", fdet->fw->size);
  367. hfi1_cdbg(FIRMWARE, "CSS structure:");
  368. hfi1_cdbg(FIRMWARE, " module_type 0x%x", css->module_type);
  369. hfi1_cdbg(FIRMWARE, " header_len 0x%03x (0x%03x bytes)",
  370. css->header_len, 4 * css->header_len);
  371. hfi1_cdbg(FIRMWARE, " header_version 0x%x", css->header_version);
  372. hfi1_cdbg(FIRMWARE, " module_id 0x%x", css->module_id);
  373. hfi1_cdbg(FIRMWARE, " module_vendor 0x%x", css->module_vendor);
  374. hfi1_cdbg(FIRMWARE, " date 0x%x", css->date);
  375. hfi1_cdbg(FIRMWARE, " size 0x%03x (0x%03x bytes)",
  376. css->size, 4 * css->size);
  377. hfi1_cdbg(FIRMWARE, " key_size 0x%03x (0x%03x bytes)",
  378. css->key_size, 4 * css->key_size);
  379. hfi1_cdbg(FIRMWARE, " modulus_size 0x%03x (0x%03x bytes)",
  380. css->modulus_size, 4 * css->modulus_size);
  381. hfi1_cdbg(FIRMWARE, " exponent_size 0x%03x (0x%03x bytes)",
  382. css->exponent_size, 4 * css->exponent_size);
  383. hfi1_cdbg(FIRMWARE, "firmware size: 0x%lx bytes",
  384. fdet->fw->size - sizeof(struct firmware_file));
  385. /*
  386. * If the file does not have a valid CSS header, fail.
  387. * Otherwise, check the CSS size field for an expected size.
  388. * The augmented file has r2 and mu inserted after the header
  389. * was generated, so there will be a known difference between
  390. * the CSS header size and the actual file size. Use this
  391. * difference to identify an augmented file.
  392. *
  393. * Note: css->size is in DWORDs, multiply by 4 to get bytes.
  394. */
  395. ret = verify_css_header(dd, css);
  396. if (ret) {
  397. dd_dev_info(dd, "Invalid CSS header for \"%s\"\n", name);
  398. } else if ((css->size * 4) == fdet->fw->size) {
  399. /* non-augmented firmware file */
  400. struct firmware_file *ff = (struct firmware_file *)
  401. fdet->fw->data;
  402. /* make sure there are bytes in the payload */
  403. ret = payload_check(dd, name, fdet->fw->size,
  404. sizeof(struct firmware_file));
  405. if (ret == 0) {
  406. fdet->css_header = css;
  407. fdet->modulus = ff->modulus;
  408. fdet->exponent = ff->exponent;
  409. fdet->signature = ff->signature;
  410. fdet->r2 = fdet->dummy_header.r2; /* use dummy space */
  411. fdet->mu = fdet->dummy_header.mu; /* use dummy space */
  412. fdet->firmware_ptr = ff->firmware;
  413. fdet->firmware_len = fdet->fw->size -
  414. sizeof(struct firmware_file);
  415. /*
  416. * Header does not include r2 and mu - generate here.
  417. * For now, fail.
  418. */
  419. dd_dev_err(dd, "driver is unable to validate firmware without r2 and mu (not in firmware file)\n");
  420. ret = -EINVAL;
  421. }
  422. } else if ((css->size * 4) + AUGMENT_SIZE == fdet->fw->size) {
  423. /* augmented firmware file */
  424. struct augmented_firmware_file *aff =
  425. (struct augmented_firmware_file *)fdet->fw->data;
  426. /* make sure there are bytes in the payload */
  427. ret = payload_check(dd, name, fdet->fw->size,
  428. sizeof(struct augmented_firmware_file));
  429. if (ret == 0) {
  430. fdet->css_header = css;
  431. fdet->modulus = aff->modulus;
  432. fdet->exponent = aff->exponent;
  433. fdet->signature = aff->signature;
  434. fdet->r2 = aff->r2;
  435. fdet->mu = aff->mu;
  436. fdet->firmware_ptr = aff->firmware;
  437. fdet->firmware_len = fdet->fw->size -
  438. sizeof(struct augmented_firmware_file);
  439. }
  440. } else {
  441. /* css->size check failed */
  442. dd_dev_err(dd,
  443. "invalid firmware header field size: expected 0x%lx or 0x%lx, actual 0x%x\n",
  444. fdet->fw->size / 4,
  445. (fdet->fw->size - AUGMENT_SIZE) / 4,
  446. css->size);
  447. ret = -EINVAL;
  448. }
  449. done:
  450. /* if returning an error, clean up after ourselves */
  451. if (ret)
  452. dispose_one_firmware(fdet);
  453. return ret;
  454. }
  455. static void dispose_one_firmware(struct firmware_details *fdet)
  456. {
  457. release_firmware(fdet->fw);
  458. /* erase all previous information */
  459. memset(fdet, 0, sizeof(*fdet));
  460. }
  461. /*
  462. * Obtain the 4 firmwares from the OS. All must be obtained at once or not
  463. * at all. If called with the firmware state in FW_TRY, use alternate names.
  464. * On exit, this routine will have set the firmware state to one of FW_TRY,
  465. * FW_FINAL, or FW_ERR.
  466. *
  467. * Must be holding fw_mutex.
  468. */
  469. static void __obtain_firmware(struct hfi1_devdata *dd)
  470. {
  471. int err = 0;
  472. if (fw_state == FW_FINAL) /* nothing more to obtain */
  473. return;
  474. if (fw_state == FW_ERR) /* already in error */
  475. return;
  476. /* fw_state is FW_EMPTY or FW_TRY */
  477. retry:
  478. if (fw_state == FW_TRY) {
  479. /*
  480. * We tried the original and it failed. Move to the
  481. * alternate.
  482. */
  483. dd_dev_warn(dd, "using alternate firmware names\n");
  484. /*
  485. * Let others run. Some systems, when missing firmware, does
  486. * something that holds for 30 seconds. If we do that twice
  487. * in a row it triggers task blocked warning.
  488. */
  489. cond_resched();
  490. if (fw_8051_load)
  491. dispose_one_firmware(&fw_8051);
  492. if (fw_fabric_serdes_load)
  493. dispose_one_firmware(&fw_fabric);
  494. if (fw_sbus_load)
  495. dispose_one_firmware(&fw_sbus);
  496. if (fw_pcie_serdes_load)
  497. dispose_one_firmware(&fw_pcie);
  498. fw_8051_name = ALT_FW_8051_NAME_ASIC;
  499. fw_fabric_serdes_name = ALT_FW_FABRIC_NAME;
  500. fw_sbus_name = ALT_FW_SBUS_NAME;
  501. fw_pcie_serdes_name = ALT_FW_PCIE_NAME;
  502. /*
  503. * Add a delay before obtaining and loading debug firmware.
  504. * Authorization will fail if the delay between firmware
  505. * authorization events is shorter than 50us. Add 100us to
  506. * make a delay time safe.
  507. */
  508. usleep_range(100, 120);
  509. }
  510. if (fw_sbus_load) {
  511. err = obtain_one_firmware(dd, fw_sbus_name, &fw_sbus);
  512. if (err)
  513. goto done;
  514. }
  515. if (fw_pcie_serdes_load) {
  516. err = obtain_one_firmware(dd, fw_pcie_serdes_name, &fw_pcie);
  517. if (err)
  518. goto done;
  519. }
  520. if (fw_fabric_serdes_load) {
  521. err = obtain_one_firmware(dd, fw_fabric_serdes_name,
  522. &fw_fabric);
  523. if (err)
  524. goto done;
  525. }
  526. if (fw_8051_load) {
  527. err = obtain_one_firmware(dd, fw_8051_name, &fw_8051);
  528. if (err)
  529. goto done;
  530. }
  531. done:
  532. if (err) {
  533. /* oops, had problems obtaining a firmware */
  534. if (fw_state == FW_EMPTY && dd->icode == ICODE_RTL_SILICON) {
  535. /* retry with alternate (RTL only) */
  536. fw_state = FW_TRY;
  537. goto retry;
  538. }
  539. dd_dev_err(dd, "unable to obtain working firmware\n");
  540. fw_state = FW_ERR;
  541. fw_err = -ENOENT;
  542. } else {
  543. /* success */
  544. if (fw_state == FW_EMPTY &&
  545. dd->icode != ICODE_FUNCTIONAL_SIMULATOR)
  546. fw_state = FW_TRY; /* may retry later */
  547. else
  548. fw_state = FW_FINAL; /* cannot try again */
  549. }
  550. }
  551. /*
  552. * Called by all HFIs when loading their firmware - i.e. device probe time.
  553. * The first one will do the actual firmware load. Use a mutex to resolve
  554. * any possible race condition.
  555. *
  556. * The call to this routine cannot be moved to driver load because the kernel
  557. * call request_firmware() requires a device which is only available after
  558. * the first device probe.
  559. */
  560. static int obtain_firmware(struct hfi1_devdata *dd)
  561. {
  562. unsigned long timeout;
  563. mutex_lock(&fw_mutex);
  564. /* 40s delay due to long delay on missing firmware on some systems */
  565. timeout = jiffies + msecs_to_jiffies(40000);
  566. while (fw_state == FW_TRY) {
  567. /*
  568. * Another device is trying the firmware. Wait until it
  569. * decides what works (or not).
  570. */
  571. if (time_after(jiffies, timeout)) {
  572. /* waited too long */
  573. dd_dev_err(dd, "Timeout waiting for firmware try");
  574. fw_state = FW_ERR;
  575. fw_err = -ETIMEDOUT;
  576. break;
  577. }
  578. mutex_unlock(&fw_mutex);
  579. msleep(20); /* arbitrary delay */
  580. mutex_lock(&fw_mutex);
  581. }
  582. /* not in FW_TRY state */
  583. /* set fw_state to FW_TRY, FW_FINAL, or FW_ERR, and fw_err */
  584. if (fw_state == FW_EMPTY)
  585. __obtain_firmware(dd);
  586. mutex_unlock(&fw_mutex);
  587. return fw_err;
  588. }
  589. /*
  590. * Called when the driver unloads. The timing is asymmetric with its
  591. * counterpart, obtain_firmware(). If called at device remove time,
  592. * then it is conceivable that another device could probe while the
  593. * firmware is being disposed. The mutexes can be moved to do that
  594. * safely, but then the firmware would be requested from the OS multiple
  595. * times.
  596. *
  597. * No mutex is needed as the driver is unloading and there cannot be any
  598. * other callers.
  599. */
  600. void dispose_firmware(void)
  601. {
  602. dispose_one_firmware(&fw_8051);
  603. dispose_one_firmware(&fw_fabric);
  604. dispose_one_firmware(&fw_pcie);
  605. dispose_one_firmware(&fw_sbus);
  606. /* retain the error state, otherwise revert to empty */
  607. if (fw_state != FW_ERR)
  608. fw_state = FW_EMPTY;
  609. }
  610. /*
  611. * Called with the result of a firmware download.
  612. *
  613. * Return 1 to retry loading the firmware, 0 to stop.
  614. */
  615. static int retry_firmware(struct hfi1_devdata *dd, int load_result)
  616. {
  617. int retry;
  618. mutex_lock(&fw_mutex);
  619. if (load_result == 0) {
  620. /*
  621. * The load succeeded, so expect all others to do the same.
  622. * Do not retry again.
  623. */
  624. if (fw_state == FW_TRY)
  625. fw_state = FW_FINAL;
  626. retry = 0; /* do NOT retry */
  627. } else if (fw_state == FW_TRY) {
  628. /* load failed, obtain alternate firmware */
  629. __obtain_firmware(dd);
  630. retry = (fw_state == FW_FINAL);
  631. } else {
  632. /* else in FW_FINAL or FW_ERR, no retry in either case */
  633. retry = 0;
  634. }
  635. mutex_unlock(&fw_mutex);
  636. return retry;
  637. }
  638. /*
  639. * Write a block of data to a given array CSR. All calls will be in
  640. * multiples of 8 bytes.
  641. */
  642. static void write_rsa_data(struct hfi1_devdata *dd, int what,
  643. const u8 *data, int nbytes)
  644. {
  645. int qw_size = nbytes / 8;
  646. int i;
  647. if (((unsigned long)data & 0x7) == 0) {
  648. /* aligned */
  649. u64 *ptr = (u64 *)data;
  650. for (i = 0; i < qw_size; i++, ptr++)
  651. write_csr(dd, what + (8 * i), *ptr);
  652. } else {
  653. /* not aligned */
  654. for (i = 0; i < qw_size; i++, data += 8) {
  655. u64 value;
  656. memcpy(&value, data, 8);
  657. write_csr(dd, what + (8 * i), value);
  658. }
  659. }
  660. }
  661. /*
  662. * Write a block of data to a given CSR as a stream of writes. All calls will
  663. * be in multiples of 8 bytes.
  664. */
  665. static void write_streamed_rsa_data(struct hfi1_devdata *dd, int what,
  666. const u8 *data, int nbytes)
  667. {
  668. u64 *ptr = (u64 *)data;
  669. int qw_size = nbytes / 8;
  670. for (; qw_size > 0; qw_size--, ptr++)
  671. write_csr(dd, what, *ptr);
  672. }
  673. /*
  674. * Download the signature and start the RSA mechanism. Wait for
  675. * RSA_ENGINE_TIMEOUT before giving up.
  676. */
  677. static int run_rsa(struct hfi1_devdata *dd, const char *who,
  678. const u8 *signature)
  679. {
  680. unsigned long timeout;
  681. u64 reg;
  682. u32 status;
  683. int ret = 0;
  684. /* write the signature */
  685. write_rsa_data(dd, MISC_CFG_RSA_SIGNATURE, signature, KEY_SIZE);
  686. /* initialize RSA */
  687. write_csr(dd, MISC_CFG_RSA_CMD, RSA_CMD_INIT);
  688. /*
  689. * Make sure the engine is idle and insert a delay between the two
  690. * writes to MISC_CFG_RSA_CMD.
  691. */
  692. status = (read_csr(dd, MISC_CFG_FW_CTRL)
  693. & MISC_CFG_FW_CTRL_RSA_STATUS_SMASK)
  694. >> MISC_CFG_FW_CTRL_RSA_STATUS_SHIFT;
  695. if (status != RSA_STATUS_IDLE) {
  696. dd_dev_err(dd, "%s security engine not idle - giving up\n",
  697. who);
  698. return -EBUSY;
  699. }
  700. /* start RSA */
  701. write_csr(dd, MISC_CFG_RSA_CMD, RSA_CMD_START);
  702. /*
  703. * Look for the result.
  704. *
  705. * The RSA engine is hooked up to two MISC errors. The driver
  706. * masks these errors as they do not respond to the standard
  707. * error "clear down" mechanism. Look for these errors here and
  708. * clear them when possible. This routine will exit with the
  709. * errors of the current run still set.
  710. *
  711. * MISC_FW_AUTH_FAILED_ERR
  712. * Firmware authorization failed. This can be cleared by
  713. * re-initializing the RSA engine, then clearing the status bit.
  714. * Do not re-init the RSA angine immediately after a successful
  715. * run - this will reset the current authorization.
  716. *
  717. * MISC_KEY_MISMATCH_ERR
  718. * Key does not match. The only way to clear this is to load
  719. * a matching key then clear the status bit. If this error
  720. * is raised, it will persist outside of this routine until a
  721. * matching key is loaded.
  722. */
  723. timeout = msecs_to_jiffies(RSA_ENGINE_TIMEOUT) + jiffies;
  724. while (1) {
  725. status = (read_csr(dd, MISC_CFG_FW_CTRL)
  726. & MISC_CFG_FW_CTRL_RSA_STATUS_SMASK)
  727. >> MISC_CFG_FW_CTRL_RSA_STATUS_SHIFT;
  728. if (status == RSA_STATUS_IDLE) {
  729. /* should not happen */
  730. dd_dev_err(dd, "%s firmware security bad idle state\n",
  731. who);
  732. ret = -EINVAL;
  733. break;
  734. } else if (status == RSA_STATUS_DONE) {
  735. /* finished successfully */
  736. break;
  737. } else if (status == RSA_STATUS_FAILED) {
  738. /* finished unsuccessfully */
  739. ret = -EINVAL;
  740. break;
  741. }
  742. /* else still active */
  743. if (time_after(jiffies, timeout)) {
  744. /*
  745. * Timed out while active. We can't reset the engine
  746. * if it is stuck active, but run through the
  747. * error code to see what error bits are set.
  748. */
  749. dd_dev_err(dd, "%s firmware security time out\n", who);
  750. ret = -ETIMEDOUT;
  751. break;
  752. }
  753. msleep(20);
  754. }
  755. /*
  756. * Arrive here on success or failure. Clear all RSA engine
  757. * errors. All current errors will stick - the RSA logic is keeping
  758. * error high. All previous errors will clear - the RSA logic
  759. * is not keeping the error high.
  760. */
  761. write_csr(dd, MISC_ERR_CLEAR,
  762. MISC_ERR_STATUS_MISC_FW_AUTH_FAILED_ERR_SMASK |
  763. MISC_ERR_STATUS_MISC_KEY_MISMATCH_ERR_SMASK);
  764. /*
  765. * All that is left are the current errors. Print warnings on
  766. * authorization failure details, if any. Firmware authorization
  767. * can be retried, so these are only warnings.
  768. */
  769. reg = read_csr(dd, MISC_ERR_STATUS);
  770. if (ret) {
  771. if (reg & MISC_ERR_STATUS_MISC_FW_AUTH_FAILED_ERR_SMASK)
  772. dd_dev_warn(dd, "%s firmware authorization failed\n",
  773. who);
  774. if (reg & MISC_ERR_STATUS_MISC_KEY_MISMATCH_ERR_SMASK)
  775. dd_dev_warn(dd, "%s firmware key mismatch\n", who);
  776. }
  777. return ret;
  778. }
  779. static void load_security_variables(struct hfi1_devdata *dd,
  780. struct firmware_details *fdet)
  781. {
  782. /* Security variables a. Write the modulus */
  783. write_rsa_data(dd, MISC_CFG_RSA_MODULUS, fdet->modulus, KEY_SIZE);
  784. /* Security variables b. Write the r2 */
  785. write_rsa_data(dd, MISC_CFG_RSA_R2, fdet->r2, KEY_SIZE);
  786. /* Security variables c. Write the mu */
  787. write_rsa_data(dd, MISC_CFG_RSA_MU, fdet->mu, MU_SIZE);
  788. /* Security variables d. Write the header */
  789. write_streamed_rsa_data(dd, MISC_CFG_SHA_PRELOAD,
  790. (u8 *)fdet->css_header,
  791. sizeof(struct css_header));
  792. }
  793. /* return the 8051 firmware state */
  794. static inline u32 get_firmware_state(struct hfi1_devdata *dd)
  795. {
  796. u64 reg = read_csr(dd, DC_DC8051_STS_CUR_STATE);
  797. return (reg >> DC_DC8051_STS_CUR_STATE_FIRMWARE_SHIFT)
  798. & DC_DC8051_STS_CUR_STATE_FIRMWARE_MASK;
  799. }
  800. /*
  801. * Wait until the firmware is up and ready to take host requests.
  802. * Return 0 on success, -ETIMEDOUT on timeout.
  803. */
  804. int wait_fm_ready(struct hfi1_devdata *dd, u32 mstimeout)
  805. {
  806. unsigned long timeout;
  807. /* in the simulator, the fake 8051 is always ready */
  808. if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
  809. return 0;
  810. timeout = msecs_to_jiffies(mstimeout) + jiffies;
  811. while (1) {
  812. if (get_firmware_state(dd) == 0xa0) /* ready */
  813. return 0;
  814. if (time_after(jiffies, timeout)) /* timed out */
  815. return -ETIMEDOUT;
  816. usleep_range(1950, 2050); /* sleep 2ms-ish */
  817. }
  818. }
  819. /*
  820. * Load the 8051 firmware.
  821. */
  822. static int load_8051_firmware(struct hfi1_devdata *dd,
  823. struct firmware_details *fdet)
  824. {
  825. u64 reg;
  826. int ret;
  827. u8 ver_major;
  828. u8 ver_minor;
  829. u8 ver_patch;
  830. /*
  831. * DC Reset sequence
  832. * Load DC 8051 firmware
  833. */
  834. /*
  835. * DC reset step 1: Reset DC8051
  836. */
  837. reg = DC_DC8051_CFG_RST_M8051W_SMASK
  838. | DC_DC8051_CFG_RST_CRAM_SMASK
  839. | DC_DC8051_CFG_RST_DRAM_SMASK
  840. | DC_DC8051_CFG_RST_IRAM_SMASK
  841. | DC_DC8051_CFG_RST_SFR_SMASK;
  842. write_csr(dd, DC_DC8051_CFG_RST, reg);
  843. /*
  844. * DC reset step 2 (optional): Load 8051 data memory with link
  845. * configuration
  846. */
  847. /*
  848. * DC reset step 3: Load DC8051 firmware
  849. */
  850. /* release all but the core reset */
  851. reg = DC_DC8051_CFG_RST_M8051W_SMASK;
  852. write_csr(dd, DC_DC8051_CFG_RST, reg);
  853. /* Firmware load step 1 */
  854. load_security_variables(dd, fdet);
  855. /*
  856. * Firmware load step 2. Clear MISC_CFG_FW_CTRL.FW_8051_LOADED
  857. */
  858. write_csr(dd, MISC_CFG_FW_CTRL, 0);
  859. /* Firmware load steps 3-5 */
  860. ret = write_8051(dd, 1/*code*/, 0, fdet->firmware_ptr,
  861. fdet->firmware_len);
  862. if (ret)
  863. return ret;
  864. /*
  865. * DC reset step 4. Host starts the DC8051 firmware
  866. */
  867. /*
  868. * Firmware load step 6. Set MISC_CFG_FW_CTRL.FW_8051_LOADED
  869. */
  870. write_csr(dd, MISC_CFG_FW_CTRL, MISC_CFG_FW_CTRL_FW_8051_LOADED_SMASK);
  871. /* Firmware load steps 7-10 */
  872. ret = run_rsa(dd, "8051", fdet->signature);
  873. if (ret)
  874. return ret;
  875. /* clear all reset bits, releasing the 8051 */
  876. write_csr(dd, DC_DC8051_CFG_RST, 0ull);
  877. /*
  878. * DC reset step 5. Wait for firmware to be ready to accept host
  879. * requests.
  880. */
  881. ret = wait_fm_ready(dd, TIMEOUT_8051_START);
  882. if (ret) { /* timed out */
  883. dd_dev_err(dd, "8051 start timeout, current state 0x%x\n",
  884. get_firmware_state(dd));
  885. return -ETIMEDOUT;
  886. }
  887. read_misc_status(dd, &ver_major, &ver_minor, &ver_patch);
  888. dd_dev_info(dd, "8051 firmware version %d.%d.%d\n",
  889. (int)ver_major, (int)ver_minor, (int)ver_patch);
  890. dd->dc8051_ver = dc8051_ver(ver_major, ver_minor, ver_patch);
  891. ret = write_host_interface_version(dd, HOST_INTERFACE_VERSION);
  892. if (ret != HCMD_SUCCESS) {
  893. dd_dev_err(dd,
  894. "Failed to set host interface version, return 0x%x\n",
  895. ret);
  896. return -EIO;
  897. }
  898. return 0;
  899. }
  900. /*
  901. * Write the SBus request register
  902. *
  903. * No need for masking - the arguments are sized exactly.
  904. */
  905. void sbus_request(struct hfi1_devdata *dd,
  906. u8 receiver_addr, u8 data_addr, u8 command, u32 data_in)
  907. {
  908. write_csr(dd, ASIC_CFG_SBUS_REQUEST,
  909. ((u64)data_in << ASIC_CFG_SBUS_REQUEST_DATA_IN_SHIFT) |
  910. ((u64)command << ASIC_CFG_SBUS_REQUEST_COMMAND_SHIFT) |
  911. ((u64)data_addr << ASIC_CFG_SBUS_REQUEST_DATA_ADDR_SHIFT) |
  912. ((u64)receiver_addr <<
  913. ASIC_CFG_SBUS_REQUEST_RECEIVER_ADDR_SHIFT));
  914. }
  915. /*
  916. * Read a value from the SBus.
  917. *
  918. * Requires the caller to be in fast mode
  919. */
  920. static u32 sbus_read(struct hfi1_devdata *dd, u8 receiver_addr, u8 data_addr,
  921. u32 data_in)
  922. {
  923. u64 reg;
  924. int retries;
  925. int success = 0;
  926. u32 result = 0;
  927. u32 result_code = 0;
  928. sbus_request(dd, receiver_addr, data_addr, READ_SBUS_RECEIVER, data_in);
  929. for (retries = 0; retries < 100; retries++) {
  930. usleep_range(1000, 1200); /* arbitrary */
  931. reg = read_csr(dd, ASIC_STS_SBUS_RESULT);
  932. result_code = (reg >> ASIC_STS_SBUS_RESULT_RESULT_CODE_SHIFT)
  933. & ASIC_STS_SBUS_RESULT_RESULT_CODE_MASK;
  934. if (result_code != SBUS_READ_COMPLETE)
  935. continue;
  936. success = 1;
  937. result = (reg >> ASIC_STS_SBUS_RESULT_DATA_OUT_SHIFT)
  938. & ASIC_STS_SBUS_RESULT_DATA_OUT_MASK;
  939. break;
  940. }
  941. if (!success) {
  942. dd_dev_err(dd, "%s: read failed, result code 0x%x\n", __func__,
  943. result_code);
  944. }
  945. return result;
  946. }
  947. /*
  948. * Turn off the SBus and fabric serdes spicos.
  949. *
  950. * + Must be called with Sbus fast mode turned on.
  951. * + Must be called after fabric serdes broadcast is set up.
  952. * + Must be called before the 8051 is loaded - assumes 8051 is not loaded
  953. * when using MISC_CFG_FW_CTRL.
  954. */
  955. static void turn_off_spicos(struct hfi1_devdata *dd, int flags)
  956. {
  957. /* only needed on A0 */
  958. if (!is_ax(dd))
  959. return;
  960. dd_dev_info(dd, "Turning off spicos:%s%s\n",
  961. flags & SPICO_SBUS ? " SBus" : "",
  962. flags & SPICO_FABRIC ? " fabric" : "");
  963. write_csr(dd, MISC_CFG_FW_CTRL, ENABLE_SPICO_SMASK);
  964. /* disable SBus spico */
  965. if (flags & SPICO_SBUS)
  966. sbus_request(dd, SBUS_MASTER_BROADCAST, 0x01,
  967. WRITE_SBUS_RECEIVER, 0x00000040);
  968. /* disable the fabric serdes spicos */
  969. if (flags & SPICO_FABRIC)
  970. sbus_request(dd, fabric_serdes_broadcast[dd->hfi1_id],
  971. 0x07, WRITE_SBUS_RECEIVER, 0x00000000);
  972. write_csr(dd, MISC_CFG_FW_CTRL, 0);
  973. }
  974. /*
  975. * Reset all of the fabric serdes for this HFI in preparation to take the
  976. * link to Polling.
  977. *
  978. * To do a reset, we need to write to the serdes registers. Unfortunately,
  979. * the fabric serdes download to the other HFI on the ASIC will have turned
  980. * off the firmware validation on this HFI. This means we can't write to the
  981. * registers to reset the serdes. Work around this by performing a complete
  982. * re-download and validation of the fabric serdes firmware. This, as a
  983. * by-product, will reset the serdes. NOTE: the re-download requires that
  984. * the 8051 be in the Offline state. I.e. not actively trying to use the
  985. * serdes. This routine is called at the point where the link is Offline and
  986. * is getting ready to go to Polling.
  987. */
  988. void fabric_serdes_reset(struct hfi1_devdata *dd)
  989. {
  990. int ret;
  991. if (!fw_fabric_serdes_load)
  992. return;
  993. ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
  994. if (ret) {
  995. dd_dev_err(dd,
  996. "Cannot acquire SBus resource to reset fabric SerDes - perhaps you should reboot\n");
  997. return;
  998. }
  999. set_sbus_fast_mode(dd);
  1000. if (is_ax(dd)) {
  1001. /* A0 serdes do not work with a re-download */
  1002. u8 ra = fabric_serdes_broadcast[dd->hfi1_id];
  1003. /* place SerDes in reset and disable SPICO */
  1004. sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000011);
  1005. /* wait 100 refclk cycles @ 156.25MHz => 640ns */
  1006. udelay(1);
  1007. /* remove SerDes reset */
  1008. sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000010);
  1009. /* turn SPICO enable on */
  1010. sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000002);
  1011. } else {
  1012. turn_off_spicos(dd, SPICO_FABRIC);
  1013. /*
  1014. * No need for firmware retry - what to download has already
  1015. * been decided.
  1016. * No need to pay attention to the load return - the only
  1017. * failure is a validation failure, which has already been
  1018. * checked by the initial download.
  1019. */
  1020. (void)load_fabric_serdes_firmware(dd, &fw_fabric);
  1021. }
  1022. clear_sbus_fast_mode(dd);
  1023. release_chip_resource(dd, CR_SBUS);
  1024. }
  1025. /* Access to the SBus in this routine should probably be serialized */
  1026. int sbus_request_slow(struct hfi1_devdata *dd,
  1027. u8 receiver_addr, u8 data_addr, u8 command, u32 data_in)
  1028. {
  1029. u64 reg, count = 0;
  1030. /* make sure fast mode is clear */
  1031. clear_sbus_fast_mode(dd);
  1032. sbus_request(dd, receiver_addr, data_addr, command, data_in);
  1033. write_csr(dd, ASIC_CFG_SBUS_EXECUTE,
  1034. ASIC_CFG_SBUS_EXECUTE_EXECUTE_SMASK);
  1035. /* Wait for both DONE and RCV_DATA_VALID to go high */
  1036. reg = read_csr(dd, ASIC_STS_SBUS_RESULT);
  1037. while (!((reg & ASIC_STS_SBUS_RESULT_DONE_SMASK) &&
  1038. (reg & ASIC_STS_SBUS_RESULT_RCV_DATA_VALID_SMASK))) {
  1039. if (count++ >= SBUS_MAX_POLL_COUNT) {
  1040. u64 counts = read_csr(dd, ASIC_STS_SBUS_COUNTERS);
  1041. /*
  1042. * If the loop has timed out, we are OK if DONE bit
  1043. * is set and RCV_DATA_VALID and EXECUTE counters
  1044. * are the same. If not, we cannot proceed.
  1045. */
  1046. if ((reg & ASIC_STS_SBUS_RESULT_DONE_SMASK) &&
  1047. (SBUS_COUNTER(counts, RCV_DATA_VALID) ==
  1048. SBUS_COUNTER(counts, EXECUTE)))
  1049. break;
  1050. return -ETIMEDOUT;
  1051. }
  1052. udelay(1);
  1053. reg = read_csr(dd, ASIC_STS_SBUS_RESULT);
  1054. }
  1055. count = 0;
  1056. write_csr(dd, ASIC_CFG_SBUS_EXECUTE, 0);
  1057. /* Wait for DONE to clear after EXECUTE is cleared */
  1058. reg = read_csr(dd, ASIC_STS_SBUS_RESULT);
  1059. while (reg & ASIC_STS_SBUS_RESULT_DONE_SMASK) {
  1060. if (count++ >= SBUS_MAX_POLL_COUNT)
  1061. return -ETIME;
  1062. udelay(1);
  1063. reg = read_csr(dd, ASIC_STS_SBUS_RESULT);
  1064. }
  1065. return 0;
  1066. }
  1067. static int load_fabric_serdes_firmware(struct hfi1_devdata *dd,
  1068. struct firmware_details *fdet)
  1069. {
  1070. int i, err;
  1071. const u8 ra = fabric_serdes_broadcast[dd->hfi1_id]; /* receiver addr */
  1072. dd_dev_info(dd, "Downloading fabric firmware\n");
  1073. /* step 1: load security variables */
  1074. load_security_variables(dd, fdet);
  1075. /* step 2: place SerDes in reset and disable SPICO */
  1076. sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000011);
  1077. /* wait 100 refclk cycles @ 156.25MHz => 640ns */
  1078. udelay(1);
  1079. /* step 3: remove SerDes reset */
  1080. sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000010);
  1081. /* step 4: assert IMEM override */
  1082. sbus_request(dd, ra, 0x00, WRITE_SBUS_RECEIVER, 0x40000000);
  1083. /* step 5: download SerDes machine code */
  1084. for (i = 0; i < fdet->firmware_len; i += 4) {
  1085. sbus_request(dd, ra, 0x0a, WRITE_SBUS_RECEIVER,
  1086. *(u32 *)&fdet->firmware_ptr[i]);
  1087. }
  1088. /* step 6: IMEM override off */
  1089. sbus_request(dd, ra, 0x00, WRITE_SBUS_RECEIVER, 0x00000000);
  1090. /* step 7: turn ECC on */
  1091. sbus_request(dd, ra, 0x0b, WRITE_SBUS_RECEIVER, 0x000c0000);
  1092. /* steps 8-11: run the RSA engine */
  1093. err = run_rsa(dd, "fabric serdes", fdet->signature);
  1094. if (err)
  1095. return err;
  1096. /* step 12: turn SPICO enable on */
  1097. sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000002);
  1098. /* step 13: enable core hardware interrupts */
  1099. sbus_request(dd, ra, 0x08, WRITE_SBUS_RECEIVER, 0x00000000);
  1100. return 0;
  1101. }
  1102. static int load_sbus_firmware(struct hfi1_devdata *dd,
  1103. struct firmware_details *fdet)
  1104. {
  1105. int i, err;
  1106. const u8 ra = SBUS_MASTER_BROADCAST; /* receiver address */
  1107. dd_dev_info(dd, "Downloading SBus firmware\n");
  1108. /* step 1: load security variables */
  1109. load_security_variables(dd, fdet);
  1110. /* step 2: place SPICO into reset and enable off */
  1111. sbus_request(dd, ra, 0x01, WRITE_SBUS_RECEIVER, 0x000000c0);
  1112. /* step 3: remove reset, enable off, IMEM_CNTRL_EN on */
  1113. sbus_request(dd, ra, 0x01, WRITE_SBUS_RECEIVER, 0x00000240);
  1114. /* step 4: set starting IMEM address for burst download */
  1115. sbus_request(dd, ra, 0x03, WRITE_SBUS_RECEIVER, 0x80000000);
  1116. /* step 5: download the SBus Master machine code */
  1117. for (i = 0; i < fdet->firmware_len; i += 4) {
  1118. sbus_request(dd, ra, 0x14, WRITE_SBUS_RECEIVER,
  1119. *(u32 *)&fdet->firmware_ptr[i]);
  1120. }
  1121. /* step 6: set IMEM_CNTL_EN off */
  1122. sbus_request(dd, ra, 0x01, WRITE_SBUS_RECEIVER, 0x00000040);
  1123. /* step 7: turn ECC on */
  1124. sbus_request(dd, ra, 0x16, WRITE_SBUS_RECEIVER, 0x000c0000);
  1125. /* steps 8-11: run the RSA engine */
  1126. err = run_rsa(dd, "SBus", fdet->signature);
  1127. if (err)
  1128. return err;
  1129. /* step 12: set SPICO_ENABLE on */
  1130. sbus_request(dd, ra, 0x01, WRITE_SBUS_RECEIVER, 0x00000140);
  1131. return 0;
  1132. }
  1133. static int load_pcie_serdes_firmware(struct hfi1_devdata *dd,
  1134. struct firmware_details *fdet)
  1135. {
  1136. int i;
  1137. const u8 ra = SBUS_MASTER_BROADCAST; /* receiver address */
  1138. dd_dev_info(dd, "Downloading PCIe firmware\n");
  1139. /* step 1: load security variables */
  1140. load_security_variables(dd, fdet);
  1141. /* step 2: assert single step (halts the SBus Master spico) */
  1142. sbus_request(dd, ra, 0x05, WRITE_SBUS_RECEIVER, 0x00000001);
  1143. /* step 3: enable XDMEM access */
  1144. sbus_request(dd, ra, 0x01, WRITE_SBUS_RECEIVER, 0x00000d40);
  1145. /* step 4: load firmware into SBus Master XDMEM */
  1146. /*
  1147. * NOTE: the dmem address, write_en, and wdata are all pre-packed,
  1148. * we only need to pick up the bytes and write them
  1149. */
  1150. for (i = 0; i < fdet->firmware_len; i += 4) {
  1151. sbus_request(dd, ra, 0x04, WRITE_SBUS_RECEIVER,
  1152. *(u32 *)&fdet->firmware_ptr[i]);
  1153. }
  1154. /* step 5: disable XDMEM access */
  1155. sbus_request(dd, ra, 0x01, WRITE_SBUS_RECEIVER, 0x00000140);
  1156. /* step 6: allow SBus Spico to run */
  1157. sbus_request(dd, ra, 0x05, WRITE_SBUS_RECEIVER, 0x00000000);
  1158. /*
  1159. * steps 7-11: run RSA, if it succeeds, firmware is available to
  1160. * be swapped
  1161. */
  1162. return run_rsa(dd, "PCIe serdes", fdet->signature);
  1163. }
  1164. /*
  1165. * Set the given broadcast values on the given list of devices.
  1166. */
  1167. static void set_serdes_broadcast(struct hfi1_devdata *dd, u8 bg1, u8 bg2,
  1168. const u8 *addrs, int count)
  1169. {
  1170. while (--count >= 0) {
  1171. /*
  1172. * Set BROADCAST_GROUP_1 and BROADCAST_GROUP_2, leave
  1173. * defaults for everything else. Do not read-modify-write,
  1174. * per instruction from the manufacturer.
  1175. *
  1176. * Register 0xfd:
  1177. * bits what
  1178. * ----- ---------------------------------
  1179. * 0 IGNORE_BROADCAST (default 0)
  1180. * 11:4 BROADCAST_GROUP_1 (default 0xff)
  1181. * 23:16 BROADCAST_GROUP_2 (default 0xff)
  1182. */
  1183. sbus_request(dd, addrs[count], 0xfd, WRITE_SBUS_RECEIVER,
  1184. (u32)bg1 << 4 | (u32)bg2 << 16);
  1185. }
  1186. }
  1187. int acquire_hw_mutex(struct hfi1_devdata *dd)
  1188. {
  1189. unsigned long timeout;
  1190. int try = 0;
  1191. u8 mask = 1 << dd->hfi1_id;
  1192. u8 user = (u8)read_csr(dd, ASIC_CFG_MUTEX);
  1193. if (user == mask) {
  1194. dd_dev_info(dd,
  1195. "Hardware mutex already acquired, mutex mask %u\n",
  1196. (u32)mask);
  1197. return 0;
  1198. }
  1199. retry:
  1200. timeout = msecs_to_jiffies(HM_TIMEOUT) + jiffies;
  1201. while (1) {
  1202. write_csr(dd, ASIC_CFG_MUTEX, mask);
  1203. user = (u8)read_csr(dd, ASIC_CFG_MUTEX);
  1204. if (user == mask)
  1205. return 0; /* success */
  1206. if (time_after(jiffies, timeout))
  1207. break; /* timed out */
  1208. msleep(20);
  1209. }
  1210. /* timed out */
  1211. dd_dev_err(dd,
  1212. "Unable to acquire hardware mutex, mutex mask %u, my mask %u (%s)\n",
  1213. (u32)user, (u32)mask, (try == 0) ? "retrying" : "giving up");
  1214. if (try == 0) {
  1215. /* break mutex and retry */
  1216. write_csr(dd, ASIC_CFG_MUTEX, 0);
  1217. try++;
  1218. goto retry;
  1219. }
  1220. return -EBUSY;
  1221. }
  1222. void release_hw_mutex(struct hfi1_devdata *dd)
  1223. {
  1224. u8 mask = 1 << dd->hfi1_id;
  1225. u8 user = (u8)read_csr(dd, ASIC_CFG_MUTEX);
  1226. if (user != mask)
  1227. dd_dev_warn(dd,
  1228. "Unable to release hardware mutex, mutex mask %u, my mask %u\n",
  1229. (u32)user, (u32)mask);
  1230. else
  1231. write_csr(dd, ASIC_CFG_MUTEX, 0);
  1232. }
  1233. /* return the given resource bit(s) as a mask for the given HFI */
  1234. static inline u64 resource_mask(u32 hfi1_id, u32 resource)
  1235. {
  1236. return ((u64)resource) << (hfi1_id ? CR_DYN_SHIFT : 0);
  1237. }
  1238. static void fail_mutex_acquire_message(struct hfi1_devdata *dd,
  1239. const char *func)
  1240. {
  1241. dd_dev_err(dd,
  1242. "%s: hardware mutex stuck - suggest rebooting the machine\n",
  1243. func);
  1244. }
  1245. /*
  1246. * Acquire access to a chip resource.
  1247. *
  1248. * Return 0 on success, -EBUSY if resource busy, -EIO if mutex acquire failed.
  1249. */
  1250. static int __acquire_chip_resource(struct hfi1_devdata *dd, u32 resource)
  1251. {
  1252. u64 scratch0, all_bits, my_bit;
  1253. int ret;
  1254. if (resource & CR_DYN_MASK) {
  1255. /* a dynamic resource is in use if either HFI has set the bit */
  1256. if (dd->pcidev->device == PCI_DEVICE_ID_INTEL0 &&
  1257. (resource & (CR_I2C1 | CR_I2C2))) {
  1258. /* discrete devices must serialize across both chains */
  1259. all_bits = resource_mask(0, CR_I2C1 | CR_I2C2) |
  1260. resource_mask(1, CR_I2C1 | CR_I2C2);
  1261. } else {
  1262. all_bits = resource_mask(0, resource) |
  1263. resource_mask(1, resource);
  1264. }
  1265. my_bit = resource_mask(dd->hfi1_id, resource);
  1266. } else {
  1267. /* non-dynamic resources are not split between HFIs */
  1268. all_bits = resource;
  1269. my_bit = resource;
  1270. }
  1271. /* lock against other callers within the driver wanting a resource */
  1272. mutex_lock(&dd->asic_data->asic_resource_mutex);
  1273. ret = acquire_hw_mutex(dd);
  1274. if (ret) {
  1275. fail_mutex_acquire_message(dd, __func__);
  1276. ret = -EIO;
  1277. goto done;
  1278. }
  1279. scratch0 = read_csr(dd, ASIC_CFG_SCRATCH);
  1280. if (scratch0 & all_bits) {
  1281. ret = -EBUSY;
  1282. } else {
  1283. write_csr(dd, ASIC_CFG_SCRATCH, scratch0 | my_bit);
  1284. /* force write to be visible to other HFI on another OS */
  1285. (void)read_csr(dd, ASIC_CFG_SCRATCH);
  1286. }
  1287. release_hw_mutex(dd);
  1288. done:
  1289. mutex_unlock(&dd->asic_data->asic_resource_mutex);
  1290. return ret;
  1291. }
  1292. /*
  1293. * Acquire access to a chip resource, wait up to mswait milliseconds for
  1294. * the resource to become available.
  1295. *
  1296. * Return 0 on success, -EBUSY if busy (even after wait), -EIO if mutex
  1297. * acquire failed.
  1298. */
  1299. int acquire_chip_resource(struct hfi1_devdata *dd, u32 resource, u32 mswait)
  1300. {
  1301. unsigned long timeout;
  1302. int ret;
  1303. timeout = jiffies + msecs_to_jiffies(mswait);
  1304. while (1) {
  1305. ret = __acquire_chip_resource(dd, resource);
  1306. if (ret != -EBUSY)
  1307. return ret;
  1308. /* resource is busy, check our timeout */
  1309. if (time_after_eq(jiffies, timeout))
  1310. return -EBUSY;
  1311. usleep_range(80, 120); /* arbitrary delay */
  1312. }
  1313. }
  1314. /*
  1315. * Release access to a chip resource
  1316. */
  1317. void release_chip_resource(struct hfi1_devdata *dd, u32 resource)
  1318. {
  1319. u64 scratch0, bit;
  1320. /* only dynamic resources should ever be cleared */
  1321. if (!(resource & CR_DYN_MASK)) {
  1322. dd_dev_err(dd, "%s: invalid resource 0x%x\n", __func__,
  1323. resource);
  1324. return;
  1325. }
  1326. bit = resource_mask(dd->hfi1_id, resource);
  1327. /* lock against other callers within the driver wanting a resource */
  1328. mutex_lock(&dd->asic_data->asic_resource_mutex);
  1329. if (acquire_hw_mutex(dd)) {
  1330. fail_mutex_acquire_message(dd, __func__);
  1331. goto done;
  1332. }
  1333. scratch0 = read_csr(dd, ASIC_CFG_SCRATCH);
  1334. if ((scratch0 & bit) != 0) {
  1335. scratch0 &= ~bit;
  1336. write_csr(dd, ASIC_CFG_SCRATCH, scratch0);
  1337. /* force write to be visible to other HFI on another OS */
  1338. (void)read_csr(dd, ASIC_CFG_SCRATCH);
  1339. } else {
  1340. dd_dev_warn(dd, "%s: id %d, resource 0x%x: bit not set\n",
  1341. __func__, dd->hfi1_id, resource);
  1342. }
  1343. release_hw_mutex(dd);
  1344. done:
  1345. mutex_unlock(&dd->asic_data->asic_resource_mutex);
  1346. }
  1347. /*
  1348. * Return true if resource is set, false otherwise. Print a warning
  1349. * if not set and a function is supplied.
  1350. */
  1351. bool check_chip_resource(struct hfi1_devdata *dd, u32 resource,
  1352. const char *func)
  1353. {
  1354. u64 scratch0, bit;
  1355. if (resource & CR_DYN_MASK)
  1356. bit = resource_mask(dd->hfi1_id, resource);
  1357. else
  1358. bit = resource;
  1359. scratch0 = read_csr(dd, ASIC_CFG_SCRATCH);
  1360. if ((scratch0 & bit) == 0) {
  1361. if (func)
  1362. dd_dev_warn(dd,
  1363. "%s: id %d, resource 0x%x, not acquired!\n",
  1364. func, dd->hfi1_id, resource);
  1365. return false;
  1366. }
  1367. return true;
  1368. }
  1369. static void clear_chip_resources(struct hfi1_devdata *dd, const char *func)
  1370. {
  1371. u64 scratch0;
  1372. /* lock against other callers within the driver wanting a resource */
  1373. mutex_lock(&dd->asic_data->asic_resource_mutex);
  1374. if (acquire_hw_mutex(dd)) {
  1375. fail_mutex_acquire_message(dd, func);
  1376. goto done;
  1377. }
  1378. /* clear all dynamic access bits for this HFI */
  1379. scratch0 = read_csr(dd, ASIC_CFG_SCRATCH);
  1380. scratch0 &= ~resource_mask(dd->hfi1_id, CR_DYN_MASK);
  1381. write_csr(dd, ASIC_CFG_SCRATCH, scratch0);
  1382. /* force write to be visible to other HFI on another OS */
  1383. (void)read_csr(dd, ASIC_CFG_SCRATCH);
  1384. release_hw_mutex(dd);
  1385. done:
  1386. mutex_unlock(&dd->asic_data->asic_resource_mutex);
  1387. }
  1388. void init_chip_resources(struct hfi1_devdata *dd)
  1389. {
  1390. /* clear any holds left by us */
  1391. clear_chip_resources(dd, __func__);
  1392. }
  1393. void finish_chip_resources(struct hfi1_devdata *dd)
  1394. {
  1395. /* clear any holds left by us */
  1396. clear_chip_resources(dd, __func__);
  1397. }
  1398. void set_sbus_fast_mode(struct hfi1_devdata *dd)
  1399. {
  1400. write_csr(dd, ASIC_CFG_SBUS_EXECUTE,
  1401. ASIC_CFG_SBUS_EXECUTE_FAST_MODE_SMASK);
  1402. }
  1403. void clear_sbus_fast_mode(struct hfi1_devdata *dd)
  1404. {
  1405. u64 reg, count = 0;
  1406. reg = read_csr(dd, ASIC_STS_SBUS_COUNTERS);
  1407. while (SBUS_COUNTER(reg, EXECUTE) !=
  1408. SBUS_COUNTER(reg, RCV_DATA_VALID)) {
  1409. if (count++ >= SBUS_MAX_POLL_COUNT)
  1410. break;
  1411. udelay(1);
  1412. reg = read_csr(dd, ASIC_STS_SBUS_COUNTERS);
  1413. }
  1414. write_csr(dd, ASIC_CFG_SBUS_EXECUTE, 0);
  1415. }
  1416. int load_firmware(struct hfi1_devdata *dd)
  1417. {
  1418. int ret;
  1419. if (fw_fabric_serdes_load) {
  1420. ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
  1421. if (ret)
  1422. return ret;
  1423. set_sbus_fast_mode(dd);
  1424. set_serdes_broadcast(dd, all_fabric_serdes_broadcast,
  1425. fabric_serdes_broadcast[dd->hfi1_id],
  1426. fabric_serdes_addrs[dd->hfi1_id],
  1427. NUM_FABRIC_SERDES);
  1428. turn_off_spicos(dd, SPICO_FABRIC);
  1429. do {
  1430. ret = load_fabric_serdes_firmware(dd, &fw_fabric);
  1431. } while (retry_firmware(dd, ret));
  1432. clear_sbus_fast_mode(dd);
  1433. release_chip_resource(dd, CR_SBUS);
  1434. if (ret)
  1435. return ret;
  1436. }
  1437. if (fw_8051_load) {
  1438. do {
  1439. ret = load_8051_firmware(dd, &fw_8051);
  1440. } while (retry_firmware(dd, ret));
  1441. if (ret)
  1442. return ret;
  1443. }
  1444. dump_fw_version(dd);
  1445. return 0;
  1446. }
  1447. int hfi1_firmware_init(struct hfi1_devdata *dd)
  1448. {
  1449. /* only RTL can use these */
  1450. if (dd->icode != ICODE_RTL_SILICON) {
  1451. fw_fabric_serdes_load = 0;
  1452. fw_pcie_serdes_load = 0;
  1453. fw_sbus_load = 0;
  1454. }
  1455. /* no 8051 or QSFP on simulator */
  1456. if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
  1457. fw_8051_load = 0;
  1458. if (!fw_8051_name) {
  1459. if (dd->icode == ICODE_RTL_SILICON)
  1460. fw_8051_name = DEFAULT_FW_8051_NAME_ASIC;
  1461. else
  1462. fw_8051_name = DEFAULT_FW_8051_NAME_FPGA;
  1463. }
  1464. if (!fw_fabric_serdes_name)
  1465. fw_fabric_serdes_name = DEFAULT_FW_FABRIC_NAME;
  1466. if (!fw_sbus_name)
  1467. fw_sbus_name = DEFAULT_FW_SBUS_NAME;
  1468. if (!fw_pcie_serdes_name)
  1469. fw_pcie_serdes_name = DEFAULT_FW_PCIE_NAME;
  1470. return obtain_firmware(dd);
  1471. }
  1472. /*
  1473. * This function is a helper function for parse_platform_config(...) and
  1474. * does not check for validity of the platform configuration cache
  1475. * (because we know it is invalid as we are building up the cache).
  1476. * As such, this should not be called from anywhere other than
  1477. * parse_platform_config
  1478. */
  1479. static int check_meta_version(struct hfi1_devdata *dd, u32 *system_table)
  1480. {
  1481. u32 meta_ver, meta_ver_meta, ver_start, ver_len, mask;
  1482. struct platform_config_cache *pcfgcache = &dd->pcfg_cache;
  1483. if (!system_table)
  1484. return -EINVAL;
  1485. meta_ver_meta =
  1486. *(pcfgcache->config_tables[PLATFORM_CONFIG_SYSTEM_TABLE].table_metadata
  1487. + SYSTEM_TABLE_META_VERSION);
  1488. mask = ((1 << METADATA_TABLE_FIELD_START_LEN_BITS) - 1);
  1489. ver_start = meta_ver_meta & mask;
  1490. meta_ver_meta >>= METADATA_TABLE_FIELD_LEN_SHIFT;
  1491. mask = ((1 << METADATA_TABLE_FIELD_LEN_LEN_BITS) - 1);
  1492. ver_len = meta_ver_meta & mask;
  1493. ver_start /= 8;
  1494. meta_ver = *((u8 *)system_table + ver_start) & ((1 << ver_len) - 1);
  1495. if (meta_ver < 4) {
  1496. dd_dev_info(
  1497. dd, "%s:Please update platform config\n", __func__);
  1498. return -EINVAL;
  1499. }
  1500. return 0;
  1501. }
  1502. int parse_platform_config(struct hfi1_devdata *dd)
  1503. {
  1504. struct platform_config_cache *pcfgcache = &dd->pcfg_cache;
  1505. struct hfi1_pportdata *ppd = dd->pport;
  1506. u32 *ptr = NULL;
  1507. u32 header1 = 0, header2 = 0, magic_num = 0, crc = 0, file_length = 0;
  1508. u32 record_idx = 0, table_type = 0, table_length_dwords = 0;
  1509. int ret = -EINVAL; /* assume failure */
  1510. /*
  1511. * For integrated devices that did not fall back to the default file,
  1512. * the SI tuning information for active channels is acquired from the
  1513. * scratch register bitmap, thus there is no platform config to parse.
  1514. * Skip parsing in these situations.
  1515. */
  1516. if (ppd->config_from_scratch)
  1517. return 0;
  1518. if (!dd->platform_config.data) {
  1519. dd_dev_err(dd, "%s: Missing config file\n", __func__);
  1520. ret = -EINVAL;
  1521. goto bail;
  1522. }
  1523. ptr = (u32 *)dd->platform_config.data;
  1524. magic_num = *ptr;
  1525. ptr++;
  1526. if (magic_num != PLATFORM_CONFIG_MAGIC_NUM) {
  1527. dd_dev_err(dd, "%s: Bad config file\n", __func__);
  1528. ret = -EINVAL;
  1529. goto bail;
  1530. }
  1531. /* Field is file size in DWORDs */
  1532. file_length = (*ptr) * 4;
  1533. /*
  1534. * Length can't be larger than partition size. Assume platform
  1535. * config format version 4 is being used. Interpret the file size
  1536. * field as header instead by not moving the pointer.
  1537. */
  1538. if (file_length > MAX_PLATFORM_CONFIG_FILE_SIZE) {
  1539. dd_dev_info(dd,
  1540. "%s:File length out of bounds, using alternative format\n",
  1541. __func__);
  1542. file_length = PLATFORM_CONFIG_FORMAT_4_FILE_SIZE;
  1543. } else {
  1544. ptr++;
  1545. }
  1546. if (file_length > dd->platform_config.size) {
  1547. dd_dev_info(dd, "%s:File claims to be larger than read size\n",
  1548. __func__);
  1549. ret = -EINVAL;
  1550. goto bail;
  1551. } else if (file_length < dd->platform_config.size) {
  1552. dd_dev_info(dd,
  1553. "%s:File claims to be smaller than read size, continuing\n",
  1554. __func__);
  1555. }
  1556. /* exactly equal, perfection */
  1557. /*
  1558. * In both cases where we proceed, using the self-reported file length
  1559. * is the safer option. In case of old format a predefined value is
  1560. * being used.
  1561. */
  1562. while (ptr < (u32 *)(dd->platform_config.data + file_length)) {
  1563. header1 = *ptr;
  1564. header2 = *(ptr + 1);
  1565. if (header1 != ~header2) {
  1566. dd_dev_err(dd, "%s: Failed validation at offset %ld\n",
  1567. __func__, (ptr - (u32 *)
  1568. dd->platform_config.data));
  1569. ret = -EINVAL;
  1570. goto bail;
  1571. }
  1572. record_idx = *ptr &
  1573. ((1 << PLATFORM_CONFIG_HEADER_RECORD_IDX_LEN_BITS) - 1);
  1574. table_length_dwords = (*ptr >>
  1575. PLATFORM_CONFIG_HEADER_TABLE_LENGTH_SHIFT) &
  1576. ((1 << PLATFORM_CONFIG_HEADER_TABLE_LENGTH_LEN_BITS) - 1);
  1577. table_type = (*ptr >> PLATFORM_CONFIG_HEADER_TABLE_TYPE_SHIFT) &
  1578. ((1 << PLATFORM_CONFIG_HEADER_TABLE_TYPE_LEN_BITS) - 1);
  1579. /* Done with this set of headers */
  1580. ptr += 2;
  1581. if (record_idx) {
  1582. /* data table */
  1583. switch (table_type) {
  1584. case PLATFORM_CONFIG_SYSTEM_TABLE:
  1585. pcfgcache->config_tables[table_type].num_table =
  1586. 1;
  1587. ret = check_meta_version(dd, ptr);
  1588. if (ret)
  1589. goto bail;
  1590. break;
  1591. case PLATFORM_CONFIG_PORT_TABLE:
  1592. pcfgcache->config_tables[table_type].num_table =
  1593. 2;
  1594. break;
  1595. case PLATFORM_CONFIG_RX_PRESET_TABLE:
  1596. case PLATFORM_CONFIG_TX_PRESET_TABLE:
  1597. case PLATFORM_CONFIG_QSFP_ATTEN_TABLE:
  1598. case PLATFORM_CONFIG_VARIABLE_SETTINGS_TABLE:
  1599. pcfgcache->config_tables[table_type].num_table =
  1600. table_length_dwords;
  1601. break;
  1602. default:
  1603. dd_dev_err(dd,
  1604. "%s: Unknown data table %d, offset %ld\n",
  1605. __func__, table_type,
  1606. (ptr - (u32 *)
  1607. dd->platform_config.data));
  1608. ret = -EINVAL;
  1609. goto bail; /* We don't trust this file now */
  1610. }
  1611. pcfgcache->config_tables[table_type].table = ptr;
  1612. } else {
  1613. /* metadata table */
  1614. switch (table_type) {
  1615. case PLATFORM_CONFIG_SYSTEM_TABLE:
  1616. case PLATFORM_CONFIG_PORT_TABLE:
  1617. case PLATFORM_CONFIG_RX_PRESET_TABLE:
  1618. case PLATFORM_CONFIG_TX_PRESET_TABLE:
  1619. case PLATFORM_CONFIG_QSFP_ATTEN_TABLE:
  1620. case PLATFORM_CONFIG_VARIABLE_SETTINGS_TABLE:
  1621. break;
  1622. default:
  1623. dd_dev_err(dd,
  1624. "%s: Unknown meta table %d, offset %ld\n",
  1625. __func__, table_type,
  1626. (ptr -
  1627. (u32 *)dd->platform_config.data));
  1628. ret = -EINVAL;
  1629. goto bail; /* We don't trust this file now */
  1630. }
  1631. pcfgcache->config_tables[table_type].table_metadata =
  1632. ptr;
  1633. }
  1634. /* Calculate and check table crc */
  1635. crc = crc32_le(~(u32)0, (unsigned char const *)ptr,
  1636. (table_length_dwords * 4));
  1637. crc ^= ~(u32)0;
  1638. /* Jump the table */
  1639. ptr += table_length_dwords;
  1640. if (crc != *ptr) {
  1641. dd_dev_err(dd, "%s: Failed CRC check at offset %ld\n",
  1642. __func__, (ptr -
  1643. (u32 *)dd->platform_config.data));
  1644. ret = -EINVAL;
  1645. goto bail;
  1646. }
  1647. /* Jump the CRC DWORD */
  1648. ptr++;
  1649. }
  1650. pcfgcache->cache_valid = 1;
  1651. return 0;
  1652. bail:
  1653. memset(pcfgcache, 0, sizeof(struct platform_config_cache));
  1654. return ret;
  1655. }
  1656. static void get_integrated_platform_config_field(
  1657. struct hfi1_devdata *dd,
  1658. enum platform_config_table_type_encoding table_type,
  1659. int field_index, u32 *data)
  1660. {
  1661. struct hfi1_pportdata *ppd = dd->pport;
  1662. u8 *cache = ppd->qsfp_info.cache;
  1663. u32 tx_preset = 0;
  1664. switch (table_type) {
  1665. case PLATFORM_CONFIG_SYSTEM_TABLE:
  1666. if (field_index == SYSTEM_TABLE_QSFP_POWER_CLASS_MAX)
  1667. *data = ppd->max_power_class;
  1668. else if (field_index == SYSTEM_TABLE_QSFP_ATTENUATION_DEFAULT_25G)
  1669. *data = ppd->default_atten;
  1670. break;
  1671. case PLATFORM_CONFIG_PORT_TABLE:
  1672. if (field_index == PORT_TABLE_PORT_TYPE)
  1673. *data = ppd->port_type;
  1674. else if (field_index == PORT_TABLE_LOCAL_ATTEN_25G)
  1675. *data = ppd->local_atten;
  1676. else if (field_index == PORT_TABLE_REMOTE_ATTEN_25G)
  1677. *data = ppd->remote_atten;
  1678. break;
  1679. case PLATFORM_CONFIG_RX_PRESET_TABLE:
  1680. if (field_index == RX_PRESET_TABLE_QSFP_RX_CDR_APPLY)
  1681. *data = (ppd->rx_preset & QSFP_RX_CDR_APPLY_SMASK) >>
  1682. QSFP_RX_CDR_APPLY_SHIFT;
  1683. else if (field_index == RX_PRESET_TABLE_QSFP_RX_EMP_APPLY)
  1684. *data = (ppd->rx_preset & QSFP_RX_EMP_APPLY_SMASK) >>
  1685. QSFP_RX_EMP_APPLY_SHIFT;
  1686. else if (field_index == RX_PRESET_TABLE_QSFP_RX_AMP_APPLY)
  1687. *data = (ppd->rx_preset & QSFP_RX_AMP_APPLY_SMASK) >>
  1688. QSFP_RX_AMP_APPLY_SHIFT;
  1689. else if (field_index == RX_PRESET_TABLE_QSFP_RX_CDR)
  1690. *data = (ppd->rx_preset & QSFP_RX_CDR_SMASK) >>
  1691. QSFP_RX_CDR_SHIFT;
  1692. else if (field_index == RX_PRESET_TABLE_QSFP_RX_EMP)
  1693. *data = (ppd->rx_preset & QSFP_RX_EMP_SMASK) >>
  1694. QSFP_RX_EMP_SHIFT;
  1695. else if (field_index == RX_PRESET_TABLE_QSFP_RX_AMP)
  1696. *data = (ppd->rx_preset & QSFP_RX_AMP_SMASK) >>
  1697. QSFP_RX_AMP_SHIFT;
  1698. break;
  1699. case PLATFORM_CONFIG_TX_PRESET_TABLE:
  1700. if (cache[QSFP_EQ_INFO_OFFS] & 0x4)
  1701. tx_preset = ppd->tx_preset_eq;
  1702. else
  1703. tx_preset = ppd->tx_preset_noeq;
  1704. if (field_index == TX_PRESET_TABLE_PRECUR)
  1705. *data = (tx_preset & TX_PRECUR_SMASK) >>
  1706. TX_PRECUR_SHIFT;
  1707. else if (field_index == TX_PRESET_TABLE_ATTN)
  1708. *data = (tx_preset & TX_ATTN_SMASK) >>
  1709. TX_ATTN_SHIFT;
  1710. else if (field_index == TX_PRESET_TABLE_POSTCUR)
  1711. *data = (tx_preset & TX_POSTCUR_SMASK) >>
  1712. TX_POSTCUR_SHIFT;
  1713. else if (field_index == TX_PRESET_TABLE_QSFP_TX_CDR_APPLY)
  1714. *data = (tx_preset & QSFP_TX_CDR_APPLY_SMASK) >>
  1715. QSFP_TX_CDR_APPLY_SHIFT;
  1716. else if (field_index == TX_PRESET_TABLE_QSFP_TX_EQ_APPLY)
  1717. *data = (tx_preset & QSFP_TX_EQ_APPLY_SMASK) >>
  1718. QSFP_TX_EQ_APPLY_SHIFT;
  1719. else if (field_index == TX_PRESET_TABLE_QSFP_TX_CDR)
  1720. *data = (tx_preset & QSFP_TX_CDR_SMASK) >>
  1721. QSFP_TX_CDR_SHIFT;
  1722. else if (field_index == TX_PRESET_TABLE_QSFP_TX_EQ)
  1723. *data = (tx_preset & QSFP_TX_EQ_SMASK) >>
  1724. QSFP_TX_EQ_SHIFT;
  1725. break;
  1726. case PLATFORM_CONFIG_QSFP_ATTEN_TABLE:
  1727. case PLATFORM_CONFIG_VARIABLE_SETTINGS_TABLE:
  1728. default:
  1729. break;
  1730. }
  1731. }
  1732. static int get_platform_fw_field_metadata(struct hfi1_devdata *dd, int table,
  1733. int field, u32 *field_len_bits,
  1734. u32 *field_start_bits)
  1735. {
  1736. struct platform_config_cache *pcfgcache = &dd->pcfg_cache;
  1737. u32 *src_ptr = NULL;
  1738. if (!pcfgcache->cache_valid)
  1739. return -EINVAL;
  1740. switch (table) {
  1741. case PLATFORM_CONFIG_SYSTEM_TABLE:
  1742. case PLATFORM_CONFIG_PORT_TABLE:
  1743. case PLATFORM_CONFIG_RX_PRESET_TABLE:
  1744. case PLATFORM_CONFIG_TX_PRESET_TABLE:
  1745. case PLATFORM_CONFIG_QSFP_ATTEN_TABLE:
  1746. case PLATFORM_CONFIG_VARIABLE_SETTINGS_TABLE:
  1747. if (field && field < platform_config_table_limits[table])
  1748. src_ptr =
  1749. pcfgcache->config_tables[table].table_metadata + field;
  1750. break;
  1751. default:
  1752. dd_dev_info(dd, "%s: Unknown table\n", __func__);
  1753. break;
  1754. }
  1755. if (!src_ptr)
  1756. return -EINVAL;
  1757. if (field_start_bits)
  1758. *field_start_bits = *src_ptr &
  1759. ((1 << METADATA_TABLE_FIELD_START_LEN_BITS) - 1);
  1760. if (field_len_bits)
  1761. *field_len_bits = (*src_ptr >> METADATA_TABLE_FIELD_LEN_SHIFT)
  1762. & ((1 << METADATA_TABLE_FIELD_LEN_LEN_BITS) - 1);
  1763. return 0;
  1764. }
  1765. /* This is the central interface to getting data out of the platform config
  1766. * file. It depends on parse_platform_config() having populated the
  1767. * platform_config_cache in hfi1_devdata, and checks the cache_valid member to
  1768. * validate the sanity of the cache.
  1769. *
  1770. * The non-obvious parameters:
  1771. * @table_index: Acts as a look up key into which instance of the tables the
  1772. * relevant field is fetched from.
  1773. *
  1774. * This applies to the data tables that have multiple instances. The port table
  1775. * is an exception to this rule as each HFI only has one port and thus the
  1776. * relevant table can be distinguished by hfi_id.
  1777. *
  1778. * @data: pointer to memory that will be populated with the field requested.
  1779. * @len: length of memory pointed by @data in bytes.
  1780. */
  1781. int get_platform_config_field(struct hfi1_devdata *dd,
  1782. enum platform_config_table_type_encoding
  1783. table_type, int table_index, int field_index,
  1784. u32 *data, u32 len)
  1785. {
  1786. int ret = 0, wlen = 0, seek = 0;
  1787. u32 field_len_bits = 0, field_start_bits = 0, *src_ptr = NULL;
  1788. struct platform_config_cache *pcfgcache = &dd->pcfg_cache;
  1789. struct hfi1_pportdata *ppd = dd->pport;
  1790. if (data)
  1791. memset(data, 0, len);
  1792. else
  1793. return -EINVAL;
  1794. if (ppd->config_from_scratch) {
  1795. /*
  1796. * Use saved configuration from ppd for integrated platforms
  1797. */
  1798. get_integrated_platform_config_field(dd, table_type,
  1799. field_index, data);
  1800. return 0;
  1801. }
  1802. ret = get_platform_fw_field_metadata(dd, table_type, field_index,
  1803. &field_len_bits,
  1804. &field_start_bits);
  1805. if (ret)
  1806. return -EINVAL;
  1807. /* Convert length to bits */
  1808. len *= 8;
  1809. /* Our metadata function checked cache_valid and field_index for us */
  1810. switch (table_type) {
  1811. case PLATFORM_CONFIG_SYSTEM_TABLE:
  1812. src_ptr = pcfgcache->config_tables[table_type].table;
  1813. if (field_index != SYSTEM_TABLE_QSFP_POWER_CLASS_MAX) {
  1814. if (len < field_len_bits)
  1815. return -EINVAL;
  1816. seek = field_start_bits / 8;
  1817. wlen = field_len_bits / 8;
  1818. src_ptr = (u32 *)((u8 *)src_ptr + seek);
  1819. /*
  1820. * We expect the field to be byte aligned and whole byte
  1821. * lengths if we are here
  1822. */
  1823. memcpy(data, src_ptr, wlen);
  1824. return 0;
  1825. }
  1826. break;
  1827. case PLATFORM_CONFIG_PORT_TABLE:
  1828. /* Port table is 4 DWORDS */
  1829. src_ptr = dd->hfi1_id ?
  1830. pcfgcache->config_tables[table_type].table + 4 :
  1831. pcfgcache->config_tables[table_type].table;
  1832. break;
  1833. case PLATFORM_CONFIG_RX_PRESET_TABLE:
  1834. case PLATFORM_CONFIG_TX_PRESET_TABLE:
  1835. case PLATFORM_CONFIG_QSFP_ATTEN_TABLE:
  1836. case PLATFORM_CONFIG_VARIABLE_SETTINGS_TABLE:
  1837. src_ptr = pcfgcache->config_tables[table_type].table;
  1838. if (table_index <
  1839. pcfgcache->config_tables[table_type].num_table)
  1840. src_ptr += table_index;
  1841. else
  1842. src_ptr = NULL;
  1843. break;
  1844. default:
  1845. dd_dev_info(dd, "%s: Unknown table\n", __func__);
  1846. break;
  1847. }
  1848. if (!src_ptr || len < field_len_bits)
  1849. return -EINVAL;
  1850. src_ptr += (field_start_bits / 32);
  1851. *data = (*src_ptr >> (field_start_bits % 32)) &
  1852. ((1 << field_len_bits) - 1);
  1853. return 0;
  1854. }
  1855. /*
  1856. * Download the firmware needed for the Gen3 PCIe SerDes. An update
  1857. * to the SBus firmware is needed before updating the PCIe firmware.
  1858. *
  1859. * Note: caller must be holding the SBus resource.
  1860. */
  1861. int load_pcie_firmware(struct hfi1_devdata *dd)
  1862. {
  1863. int ret = 0;
  1864. /* both firmware loads below use the SBus */
  1865. set_sbus_fast_mode(dd);
  1866. if (fw_sbus_load) {
  1867. turn_off_spicos(dd, SPICO_SBUS);
  1868. do {
  1869. ret = load_sbus_firmware(dd, &fw_sbus);
  1870. } while (retry_firmware(dd, ret));
  1871. if (ret)
  1872. goto done;
  1873. }
  1874. if (fw_pcie_serdes_load) {
  1875. dd_dev_info(dd, "Setting PCIe SerDes broadcast\n");
  1876. set_serdes_broadcast(dd, all_pcie_serdes_broadcast,
  1877. pcie_serdes_broadcast[dd->hfi1_id],
  1878. pcie_serdes_addrs[dd->hfi1_id],
  1879. NUM_PCIE_SERDES);
  1880. do {
  1881. ret = load_pcie_serdes_firmware(dd, &fw_pcie);
  1882. } while (retry_firmware(dd, ret));
  1883. if (ret)
  1884. goto done;
  1885. }
  1886. done:
  1887. clear_sbus_fast_mode(dd);
  1888. return ret;
  1889. }
  1890. /*
  1891. * Read the GUID from the hardware, store it in dd.
  1892. */
  1893. void read_guid(struct hfi1_devdata *dd)
  1894. {
  1895. /* Take the DC out of reset to get a valid GUID value */
  1896. write_csr(dd, CCE_DC_CTRL, 0);
  1897. (void)read_csr(dd, CCE_DC_CTRL);
  1898. dd->base_guid = read_csr(dd, DC_DC8051_CFG_LOCAL_GUID);
  1899. dd_dev_info(dd, "GUID %llx",
  1900. (unsigned long long)dd->base_guid);
  1901. }
  1902. /* read and display firmware version info */
  1903. static void dump_fw_version(struct hfi1_devdata *dd)
  1904. {
  1905. u32 pcie_vers[NUM_PCIE_SERDES];
  1906. u32 fabric_vers[NUM_FABRIC_SERDES];
  1907. u32 sbus_vers;
  1908. int i;
  1909. int all_same;
  1910. int ret;
  1911. u8 rcv_addr;
  1912. ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
  1913. if (ret) {
  1914. dd_dev_err(dd, "Unable to acquire SBus to read firmware versions\n");
  1915. return;
  1916. }
  1917. /* set fast mode */
  1918. set_sbus_fast_mode(dd);
  1919. /* read version for SBus Master */
  1920. sbus_request(dd, SBUS_MASTER_BROADCAST, 0x02, WRITE_SBUS_RECEIVER, 0);
  1921. sbus_request(dd, SBUS_MASTER_BROADCAST, 0x07, WRITE_SBUS_RECEIVER, 0x1);
  1922. /* wait for interrupt to be processed */
  1923. usleep_range(10000, 11000);
  1924. sbus_vers = sbus_read(dd, SBUS_MASTER_BROADCAST, 0x08, 0x1);
  1925. dd_dev_info(dd, "SBus Master firmware version 0x%08x\n", sbus_vers);
  1926. /* read version for PCIe SerDes */
  1927. all_same = 1;
  1928. pcie_vers[0] = 0;
  1929. for (i = 0; i < NUM_PCIE_SERDES; i++) {
  1930. rcv_addr = pcie_serdes_addrs[dd->hfi1_id][i];
  1931. sbus_request(dd, rcv_addr, 0x03, WRITE_SBUS_RECEIVER, 0);
  1932. /* wait for interrupt to be processed */
  1933. usleep_range(10000, 11000);
  1934. pcie_vers[i] = sbus_read(dd, rcv_addr, 0x04, 0x0);
  1935. if (i > 0 && pcie_vers[0] != pcie_vers[i])
  1936. all_same = 0;
  1937. }
  1938. if (all_same) {
  1939. dd_dev_info(dd, "PCIe SerDes firmware version 0x%x\n",
  1940. pcie_vers[0]);
  1941. } else {
  1942. dd_dev_warn(dd, "PCIe SerDes do not have the same firmware version\n");
  1943. for (i = 0; i < NUM_PCIE_SERDES; i++) {
  1944. dd_dev_info(dd,
  1945. "PCIe SerDes lane %d firmware version 0x%x\n",
  1946. i, pcie_vers[i]);
  1947. }
  1948. }
  1949. /* read version for fabric SerDes */
  1950. all_same = 1;
  1951. fabric_vers[0] = 0;
  1952. for (i = 0; i < NUM_FABRIC_SERDES; i++) {
  1953. rcv_addr = fabric_serdes_addrs[dd->hfi1_id][i];
  1954. sbus_request(dd, rcv_addr, 0x03, WRITE_SBUS_RECEIVER, 0);
  1955. /* wait for interrupt to be processed */
  1956. usleep_range(10000, 11000);
  1957. fabric_vers[i] = sbus_read(dd, rcv_addr, 0x04, 0x0);
  1958. if (i > 0 && fabric_vers[0] != fabric_vers[i])
  1959. all_same = 0;
  1960. }
  1961. if (all_same) {
  1962. dd_dev_info(dd, "Fabric SerDes firmware version 0x%x\n",
  1963. fabric_vers[0]);
  1964. } else {
  1965. dd_dev_warn(dd, "Fabric SerDes do not have the same firmware version\n");
  1966. for (i = 0; i < NUM_FABRIC_SERDES; i++) {
  1967. dd_dev_info(dd,
  1968. "Fabric SerDes lane %d firmware version 0x%x\n",
  1969. i, fabric_vers[i]);
  1970. }
  1971. }
  1972. clear_sbus_fast_mode(dd);
  1973. release_chip_resource(dd, CR_SBUS);
  1974. }