iwl-drv.c 55 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956
  1. // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
  2. /*
  3. * Copyright (C) 2005-2014, 2018-2021 Intel Corporation
  4. * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
  5. * Copyright (C) 2016-2017 Intel Deutschland GmbH
  6. */
  7. #include <linux/completion.h>
  8. #include <linux/dma-mapping.h>
  9. #include <linux/firmware.h>
  10. #include <linux/module.h>
  11. #include <linux/vmalloc.h>
  12. #include "iwl-drv.h"
  13. #include "iwl-csr.h"
  14. #include "iwl-debug.h"
  15. #include "iwl-trans.h"
  16. #include "iwl-op-mode.h"
  17. #include "iwl-agn-hw.h"
  18. #include "fw/img.h"
  19. #include "iwl-dbg-tlv.h"
  20. #include "iwl-config.h"
  21. #include "iwl-modparams.h"
  22. #include "fw/api/alive.h"
  23. #include "fw/api/mac.h"
  24. /******************************************************************************
  25. *
  26. * module boiler plate
  27. *
  28. ******************************************************************************/
  29. #define DRV_DESCRIPTION "Intel(R) Wireless WiFi driver for Linux"
  30. MODULE_DESCRIPTION(DRV_DESCRIPTION);
  31. MODULE_LICENSE("GPL");
  32. #ifdef CONFIG_IWLWIFI_DEBUGFS
  33. static struct dentry *iwl_dbgfs_root;
  34. #endif
  35. /**
  36. * struct iwl_drv - drv common data
  37. * @list: list of drv structures using this opmode
  38. * @fw: the iwl_fw structure
  39. * @op_mode: the running op_mode
  40. * @trans: transport layer
  41. * @dev: for debug prints only
  42. * @fw_index: firmware revision to try loading
  43. * @firmware_name: composite filename of ucode file to load
  44. * @request_firmware_complete: the firmware has been obtained from user space
  45. * @dbgfs_drv: debugfs root directory entry
  46. * @dbgfs_trans: debugfs transport directory entry
  47. * @dbgfs_op_mode: debugfs op_mode directory entry
  48. */
  49. struct iwl_drv {
  50. struct list_head list;
  51. struct iwl_fw fw;
  52. struct iwl_op_mode *op_mode;
  53. struct iwl_trans *trans;
  54. struct device *dev;
  55. int fw_index; /* firmware we're trying to load */
  56. char firmware_name[64]; /* name of firmware file to load */
  57. struct completion request_firmware_complete;
  58. #ifdef CONFIG_IWLWIFI_DEBUGFS
  59. struct dentry *dbgfs_drv;
  60. struct dentry *dbgfs_trans;
  61. struct dentry *dbgfs_op_mode;
  62. #endif
  63. };
  64. enum {
  65. DVM_OP_MODE,
  66. MVM_OP_MODE,
  67. };
  68. /* Protects the table contents, i.e. the ops pointer & drv list */
  69. static DEFINE_MUTEX(iwlwifi_opmode_table_mtx);
  70. static struct iwlwifi_opmode_table {
  71. const char *name; /* name: iwldvm, iwlmvm, etc */
  72. const struct iwl_op_mode_ops *ops; /* pointer to op_mode ops */
  73. struct list_head drv; /* list of devices using this op_mode */
  74. } iwlwifi_opmode_table[] = { /* ops set when driver is initialized */
  75. [DVM_OP_MODE] = { .name = "iwldvm", .ops = NULL },
  76. [MVM_OP_MODE] = { .name = "iwlmvm", .ops = NULL },
  77. };
  78. #define IWL_DEFAULT_SCAN_CHANNELS 40
  79. /*
  80. * struct fw_sec: Just for the image parsing process.
  81. * For the fw storage we are using struct fw_desc.
  82. */
  83. struct fw_sec {
  84. const void *data; /* the sec data */
  85. size_t size; /* section size */
  86. u32 offset; /* offset of writing in the device */
  87. };
  88. static void iwl_free_fw_desc(struct iwl_drv *drv, struct fw_desc *desc)
  89. {
  90. vfree(desc->data);
  91. desc->data = NULL;
  92. desc->len = 0;
  93. }
  94. static void iwl_free_fw_img(struct iwl_drv *drv, struct fw_img *img)
  95. {
  96. int i;
  97. for (i = 0; i < img->num_sec; i++)
  98. iwl_free_fw_desc(drv, &img->sec[i]);
  99. kfree(img->sec);
  100. }
  101. static void iwl_dealloc_ucode(struct iwl_drv *drv)
  102. {
  103. int i;
  104. kfree(drv->fw.dbg.dest_tlv);
  105. for (i = 0; i < ARRAY_SIZE(drv->fw.dbg.conf_tlv); i++)
  106. kfree(drv->fw.dbg.conf_tlv[i]);
  107. for (i = 0; i < ARRAY_SIZE(drv->fw.dbg.trigger_tlv); i++)
  108. kfree(drv->fw.dbg.trigger_tlv[i]);
  109. kfree(drv->fw.dbg.mem_tlv);
  110. kfree(drv->fw.iml);
  111. kfree(drv->fw.ucode_capa.cmd_versions);
  112. kfree(drv->fw.phy_integration_ver);
  113. for (i = 0; i < IWL_UCODE_TYPE_MAX; i++)
  114. iwl_free_fw_img(drv, drv->fw.img + i);
  115. /* clear the data for the aborted load case */
  116. memset(&drv->fw, 0, sizeof(drv->fw));
  117. }
  118. static int iwl_alloc_fw_desc(struct iwl_drv *drv, struct fw_desc *desc,
  119. struct fw_sec *sec)
  120. {
  121. void *data;
  122. desc->data = NULL;
  123. if (!sec || !sec->size)
  124. return -EINVAL;
  125. data = vmalloc(sec->size);
  126. if (!data)
  127. return -ENOMEM;
  128. desc->len = sec->size;
  129. desc->offset = sec->offset;
  130. memcpy(data, sec->data, desc->len);
  131. desc->data = data;
  132. return 0;
  133. }
  134. static void iwl_req_fw_callback(const struct firmware *ucode_raw,
  135. void *context);
  136. static int iwl_request_firmware(struct iwl_drv *drv, bool first)
  137. {
  138. const struct iwl_cfg *cfg = drv->trans->cfg;
  139. char tag[8];
  140. if (drv->trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_9000 &&
  141. (drv->trans->hw_rev_step != SILICON_B_STEP &&
  142. drv->trans->hw_rev_step != SILICON_C_STEP)) {
  143. IWL_ERR(drv,
  144. "Only HW steps B and C are currently supported (0x%0x)\n",
  145. drv->trans->hw_rev);
  146. return -EINVAL;
  147. }
  148. if (first) {
  149. drv->fw_index = cfg->ucode_api_max;
  150. sprintf(tag, "%d", drv->fw_index);
  151. } else {
  152. drv->fw_index--;
  153. sprintf(tag, "%d", drv->fw_index);
  154. }
  155. if (drv->fw_index < cfg->ucode_api_min) {
  156. IWL_ERR(drv, "no suitable firmware found!\n");
  157. if (cfg->ucode_api_min == cfg->ucode_api_max) {
  158. IWL_ERR(drv, "%s%d is required\n", cfg->fw_name_pre,
  159. cfg->ucode_api_max);
  160. } else {
  161. IWL_ERR(drv, "minimum version required: %s%d\n",
  162. cfg->fw_name_pre, cfg->ucode_api_min);
  163. IWL_ERR(drv, "maximum version supported: %s%d\n",
  164. cfg->fw_name_pre, cfg->ucode_api_max);
  165. }
  166. IWL_ERR(drv,
  167. "check git://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git\n");
  168. return -ENOENT;
  169. }
  170. snprintf(drv->firmware_name, sizeof(drv->firmware_name), "%s%s.ucode",
  171. cfg->fw_name_pre, tag);
  172. IWL_DEBUG_FW_INFO(drv, "attempting to load firmware '%s'\n",
  173. drv->firmware_name);
  174. return request_firmware_nowait(THIS_MODULE, 1, drv->firmware_name,
  175. drv->trans->dev,
  176. GFP_KERNEL, drv, iwl_req_fw_callback);
  177. }
  178. struct fw_img_parsing {
  179. struct fw_sec *sec;
  180. int sec_counter;
  181. };
  182. /*
  183. * struct fw_sec_parsing: to extract fw section and it's offset from tlv
  184. */
  185. struct fw_sec_parsing {
  186. __le32 offset;
  187. const u8 data[];
  188. } __packed;
  189. /**
  190. * struct iwl_tlv_calib_data - parse the default calib data from TLV
  191. *
  192. * @ucode_type: the uCode to which the following default calib relates.
  193. * @calib: default calibrations.
  194. */
  195. struct iwl_tlv_calib_data {
  196. __le32 ucode_type;
  197. struct iwl_tlv_calib_ctrl calib;
  198. } __packed;
  199. struct iwl_firmware_pieces {
  200. struct fw_img_parsing img[IWL_UCODE_TYPE_MAX];
  201. u32 init_evtlog_ptr, init_evtlog_size, init_errlog_ptr;
  202. u32 inst_evtlog_ptr, inst_evtlog_size, inst_errlog_ptr;
  203. /* FW debug data parsed for driver usage */
  204. bool dbg_dest_tlv_init;
  205. const u8 *dbg_dest_ver;
  206. union {
  207. const struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv;
  208. const struct iwl_fw_dbg_dest_tlv_v1 *dbg_dest_tlv_v1;
  209. };
  210. const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
  211. size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX];
  212. const struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
  213. size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX];
  214. struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv;
  215. size_t n_mem_tlv;
  216. };
  217. /*
  218. * These functions are just to extract uCode section data from the pieces
  219. * structure.
  220. */
  221. static struct fw_sec *get_sec(struct iwl_firmware_pieces *pieces,
  222. enum iwl_ucode_type type,
  223. int sec)
  224. {
  225. return &pieces->img[type].sec[sec];
  226. }
  227. static void alloc_sec_data(struct iwl_firmware_pieces *pieces,
  228. enum iwl_ucode_type type,
  229. int sec)
  230. {
  231. struct fw_img_parsing *img = &pieces->img[type];
  232. struct fw_sec *sec_memory;
  233. int size = sec + 1;
  234. size_t alloc_size = sizeof(*img->sec) * size;
  235. if (img->sec && img->sec_counter >= size)
  236. return;
  237. sec_memory = krealloc(img->sec, alloc_size, GFP_KERNEL);
  238. if (!sec_memory)
  239. return;
  240. img->sec = sec_memory;
  241. img->sec_counter = size;
  242. }
  243. static void set_sec_data(struct iwl_firmware_pieces *pieces,
  244. enum iwl_ucode_type type,
  245. int sec,
  246. const void *data)
  247. {
  248. alloc_sec_data(pieces, type, sec);
  249. pieces->img[type].sec[sec].data = data;
  250. }
  251. static void set_sec_size(struct iwl_firmware_pieces *pieces,
  252. enum iwl_ucode_type type,
  253. int sec,
  254. size_t size)
  255. {
  256. alloc_sec_data(pieces, type, sec);
  257. pieces->img[type].sec[sec].size = size;
  258. }
  259. static size_t get_sec_size(struct iwl_firmware_pieces *pieces,
  260. enum iwl_ucode_type type,
  261. int sec)
  262. {
  263. return pieces->img[type].sec[sec].size;
  264. }
  265. static void set_sec_offset(struct iwl_firmware_pieces *pieces,
  266. enum iwl_ucode_type type,
  267. int sec,
  268. u32 offset)
  269. {
  270. alloc_sec_data(pieces, type, sec);
  271. pieces->img[type].sec[sec].offset = offset;
  272. }
  273. /*
  274. * Gets uCode section from tlv.
  275. */
  276. static int iwl_store_ucode_sec(struct iwl_firmware_pieces *pieces,
  277. const void *data, enum iwl_ucode_type type,
  278. int size)
  279. {
  280. struct fw_img_parsing *img;
  281. struct fw_sec *sec;
  282. const struct fw_sec_parsing *sec_parse;
  283. size_t alloc_size;
  284. if (WARN_ON(!pieces || !data || type >= IWL_UCODE_TYPE_MAX))
  285. return -1;
  286. sec_parse = (const struct fw_sec_parsing *)data;
  287. img = &pieces->img[type];
  288. alloc_size = sizeof(*img->sec) * (img->sec_counter + 1);
  289. sec = krealloc(img->sec, alloc_size, GFP_KERNEL);
  290. if (!sec)
  291. return -ENOMEM;
  292. img->sec = sec;
  293. sec = &img->sec[img->sec_counter];
  294. sec->offset = le32_to_cpu(sec_parse->offset);
  295. sec->data = sec_parse->data;
  296. sec->size = size - sizeof(sec_parse->offset);
  297. ++img->sec_counter;
  298. return 0;
  299. }
  300. static int iwl_set_default_calib(struct iwl_drv *drv, const u8 *data)
  301. {
  302. const struct iwl_tlv_calib_data *def_calib =
  303. (const struct iwl_tlv_calib_data *)data;
  304. u32 ucode_type = le32_to_cpu(def_calib->ucode_type);
  305. if (ucode_type >= IWL_UCODE_TYPE_MAX) {
  306. IWL_ERR(drv, "Wrong ucode_type %u for default calibration.\n",
  307. ucode_type);
  308. return -EINVAL;
  309. }
  310. drv->fw.default_calib[ucode_type].flow_trigger =
  311. def_calib->calib.flow_trigger;
  312. drv->fw.default_calib[ucode_type].event_trigger =
  313. def_calib->calib.event_trigger;
  314. return 0;
  315. }
  316. static void iwl_set_ucode_api_flags(struct iwl_drv *drv, const u8 *data,
  317. struct iwl_ucode_capabilities *capa)
  318. {
  319. const struct iwl_ucode_api *ucode_api = (const void *)data;
  320. u32 api_index = le32_to_cpu(ucode_api->api_index);
  321. u32 api_flags = le32_to_cpu(ucode_api->api_flags);
  322. int i;
  323. if (api_index >= DIV_ROUND_UP(NUM_IWL_UCODE_TLV_API, 32)) {
  324. IWL_WARN(drv,
  325. "api flags index %d larger than supported by driver\n",
  326. api_index);
  327. return;
  328. }
  329. for (i = 0; i < 32; i++) {
  330. if (api_flags & BIT(i))
  331. __set_bit(i + 32 * api_index, capa->_api);
  332. }
  333. }
  334. static void iwl_set_ucode_capabilities(struct iwl_drv *drv, const u8 *data,
  335. struct iwl_ucode_capabilities *capa)
  336. {
  337. const struct iwl_ucode_capa *ucode_capa = (const void *)data;
  338. u32 api_index = le32_to_cpu(ucode_capa->api_index);
  339. u32 api_flags = le32_to_cpu(ucode_capa->api_capa);
  340. int i;
  341. if (api_index >= DIV_ROUND_UP(NUM_IWL_UCODE_TLV_CAPA, 32)) {
  342. IWL_WARN(drv,
  343. "capa flags index %d larger than supported by driver\n",
  344. api_index);
  345. return;
  346. }
  347. for (i = 0; i < 32; i++) {
  348. if (api_flags & BIT(i))
  349. __set_bit(i + 32 * api_index, capa->_capa);
  350. }
  351. }
  352. static const char *iwl_reduced_fw_name(struct iwl_drv *drv)
  353. {
  354. const char *name = drv->firmware_name;
  355. if (strncmp(name, "iwlwifi-", 8) == 0)
  356. name += 8;
  357. return name;
  358. }
  359. static int iwl_parse_v1_v2_firmware(struct iwl_drv *drv,
  360. const struct firmware *ucode_raw,
  361. struct iwl_firmware_pieces *pieces)
  362. {
  363. const struct iwl_ucode_header *ucode = (const void *)ucode_raw->data;
  364. u32 api_ver, hdr_size, build;
  365. char buildstr[25];
  366. const u8 *src;
  367. drv->fw.ucode_ver = le32_to_cpu(ucode->ver);
  368. api_ver = IWL_UCODE_API(drv->fw.ucode_ver);
  369. switch (api_ver) {
  370. default:
  371. hdr_size = 28;
  372. if (ucode_raw->size < hdr_size) {
  373. IWL_ERR(drv, "File size too small!\n");
  374. return -EINVAL;
  375. }
  376. build = le32_to_cpu(ucode->u.v2.build);
  377. set_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST,
  378. le32_to_cpu(ucode->u.v2.inst_size));
  379. set_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA,
  380. le32_to_cpu(ucode->u.v2.data_size));
  381. set_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST,
  382. le32_to_cpu(ucode->u.v2.init_size));
  383. set_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA,
  384. le32_to_cpu(ucode->u.v2.init_data_size));
  385. src = ucode->u.v2.data;
  386. break;
  387. case 0:
  388. case 1:
  389. case 2:
  390. hdr_size = 24;
  391. if (ucode_raw->size < hdr_size) {
  392. IWL_ERR(drv, "File size too small!\n");
  393. return -EINVAL;
  394. }
  395. build = 0;
  396. set_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST,
  397. le32_to_cpu(ucode->u.v1.inst_size));
  398. set_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA,
  399. le32_to_cpu(ucode->u.v1.data_size));
  400. set_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST,
  401. le32_to_cpu(ucode->u.v1.init_size));
  402. set_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA,
  403. le32_to_cpu(ucode->u.v1.init_data_size));
  404. src = ucode->u.v1.data;
  405. break;
  406. }
  407. if (build)
  408. sprintf(buildstr, " build %u", build);
  409. else
  410. buildstr[0] = '\0';
  411. snprintf(drv->fw.fw_version,
  412. sizeof(drv->fw.fw_version),
  413. "%u.%u.%u.%u%s %s",
  414. IWL_UCODE_MAJOR(drv->fw.ucode_ver),
  415. IWL_UCODE_MINOR(drv->fw.ucode_ver),
  416. IWL_UCODE_API(drv->fw.ucode_ver),
  417. IWL_UCODE_SERIAL(drv->fw.ucode_ver),
  418. buildstr, iwl_reduced_fw_name(drv));
  419. /* Verify size of file vs. image size info in file's header */
  420. if (ucode_raw->size != hdr_size +
  421. get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST) +
  422. get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA) +
  423. get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST) +
  424. get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA)) {
  425. IWL_ERR(drv,
  426. "uCode file size %d does not match expected size\n",
  427. (int)ucode_raw->size);
  428. return -EINVAL;
  429. }
  430. set_sec_data(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST, src);
  431. src += get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST);
  432. set_sec_offset(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST,
  433. IWLAGN_RTC_INST_LOWER_BOUND);
  434. set_sec_data(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA, src);
  435. src += get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA);
  436. set_sec_offset(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA,
  437. IWLAGN_RTC_DATA_LOWER_BOUND);
  438. set_sec_data(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST, src);
  439. src += get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST);
  440. set_sec_offset(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST,
  441. IWLAGN_RTC_INST_LOWER_BOUND);
  442. set_sec_data(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA, src);
  443. src += get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA);
  444. set_sec_offset(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA,
  445. IWLAGN_RTC_DATA_LOWER_BOUND);
  446. return 0;
  447. }
  448. static void iwl_drv_set_dump_exclude(struct iwl_drv *drv,
  449. enum iwl_ucode_tlv_type tlv_type,
  450. const void *tlv_data, u32 tlv_len)
  451. {
  452. const struct iwl_fw_dump_exclude *fw = tlv_data;
  453. struct iwl_dump_exclude *excl;
  454. if (tlv_len < sizeof(*fw))
  455. return;
  456. if (tlv_type == IWL_UCODE_TLV_SEC_TABLE_ADDR) {
  457. excl = &drv->fw.dump_excl[0];
  458. /* second time we find this, it's for WoWLAN */
  459. if (excl->addr)
  460. excl = &drv->fw.dump_excl_wowlan[0];
  461. } else if (fw_has_capa(&drv->fw.ucode_capa,
  462. IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG)) {
  463. /* IWL_UCODE_TLV_D3_KEK_KCK_ADDR is regular image */
  464. excl = &drv->fw.dump_excl[0];
  465. } else {
  466. /* IWL_UCODE_TLV_D3_KEK_KCK_ADDR is WoWLAN image */
  467. excl = &drv->fw.dump_excl_wowlan[0];
  468. }
  469. if (excl->addr)
  470. excl++;
  471. if (excl->addr) {
  472. IWL_DEBUG_FW_INFO(drv, "found too many excludes in fw file\n");
  473. return;
  474. }
  475. excl->addr = le32_to_cpu(fw->addr) & ~FW_ADDR_CACHE_CONTROL;
  476. excl->size = le32_to_cpu(fw->size);
  477. }
  478. static void iwl_parse_dbg_tlv_assert_tables(struct iwl_drv *drv,
  479. const struct iwl_ucode_tlv *tlv)
  480. {
  481. const struct iwl_fw_ini_region_tlv *region;
  482. u32 length = le32_to_cpu(tlv->length);
  483. u32 addr;
  484. if (length < offsetof(typeof(*region), special_mem) +
  485. sizeof(region->special_mem))
  486. return;
  487. region = (const void *)tlv->data;
  488. addr = le32_to_cpu(region->special_mem.base_addr);
  489. addr += le32_to_cpu(region->special_mem.offset);
  490. addr &= ~FW_ADDR_CACHE_CONTROL;
  491. if (region->type != IWL_FW_INI_REGION_SPECIAL_DEVICE_MEMORY)
  492. return;
  493. switch (region->sub_type) {
  494. case IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_UMAC_ERROR_TABLE:
  495. drv->trans->dbg.umac_error_event_table = addr;
  496. drv->trans->dbg.error_event_table_tlv_status |=
  497. IWL_ERROR_EVENT_TABLE_UMAC;
  498. break;
  499. case IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_LMAC_1_ERROR_TABLE:
  500. drv->trans->dbg.lmac_error_event_table[0] = addr;
  501. drv->trans->dbg.error_event_table_tlv_status |=
  502. IWL_ERROR_EVENT_TABLE_LMAC1;
  503. break;
  504. case IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_LMAC_2_ERROR_TABLE:
  505. drv->trans->dbg.lmac_error_event_table[1] = addr;
  506. drv->trans->dbg.error_event_table_tlv_status |=
  507. IWL_ERROR_EVENT_TABLE_LMAC2;
  508. break;
  509. case IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_TCM_1_ERROR_TABLE:
  510. drv->trans->dbg.tcm_error_event_table[0] = addr;
  511. drv->trans->dbg.error_event_table_tlv_status |=
  512. IWL_ERROR_EVENT_TABLE_TCM1;
  513. break;
  514. case IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_TCM_2_ERROR_TABLE:
  515. drv->trans->dbg.tcm_error_event_table[1] = addr;
  516. drv->trans->dbg.error_event_table_tlv_status |=
  517. IWL_ERROR_EVENT_TABLE_TCM2;
  518. break;
  519. case IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_RCM_1_ERROR_TABLE:
  520. drv->trans->dbg.rcm_error_event_table[0] = addr;
  521. drv->trans->dbg.error_event_table_tlv_status |=
  522. IWL_ERROR_EVENT_TABLE_RCM1;
  523. break;
  524. case IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_RCM_2_ERROR_TABLE:
  525. drv->trans->dbg.rcm_error_event_table[1] = addr;
  526. drv->trans->dbg.error_event_table_tlv_status |=
  527. IWL_ERROR_EVENT_TABLE_RCM2;
  528. break;
  529. default:
  530. break;
  531. }
  532. }
  533. static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
  534. const struct firmware *ucode_raw,
  535. struct iwl_firmware_pieces *pieces,
  536. struct iwl_ucode_capabilities *capa,
  537. bool *usniffer_images)
  538. {
  539. const struct iwl_tlv_ucode_header *ucode = (const void *)ucode_raw->data;
  540. const struct iwl_ucode_tlv *tlv;
  541. size_t len = ucode_raw->size;
  542. const u8 *data;
  543. u32 tlv_len;
  544. u32 usniffer_img;
  545. enum iwl_ucode_tlv_type tlv_type;
  546. const u8 *tlv_data;
  547. char buildstr[25];
  548. u32 build, paging_mem_size;
  549. int num_of_cpus;
  550. bool usniffer_req = false;
  551. if (len < sizeof(*ucode)) {
  552. IWL_ERR(drv, "uCode has invalid length: %zd\n", len);
  553. return -EINVAL;
  554. }
  555. if (ucode->magic != cpu_to_le32(IWL_TLV_UCODE_MAGIC)) {
  556. IWL_ERR(drv, "invalid uCode magic: 0X%x\n",
  557. le32_to_cpu(ucode->magic));
  558. return -EINVAL;
  559. }
  560. drv->fw.ucode_ver = le32_to_cpu(ucode->ver);
  561. memcpy(drv->fw.human_readable, ucode->human_readable,
  562. sizeof(drv->fw.human_readable));
  563. build = le32_to_cpu(ucode->build);
  564. if (build)
  565. sprintf(buildstr, " build %u", build);
  566. else
  567. buildstr[0] = '\0';
  568. snprintf(drv->fw.fw_version,
  569. sizeof(drv->fw.fw_version),
  570. "%u.%u.%u.%u%s %s",
  571. IWL_UCODE_MAJOR(drv->fw.ucode_ver),
  572. IWL_UCODE_MINOR(drv->fw.ucode_ver),
  573. IWL_UCODE_API(drv->fw.ucode_ver),
  574. IWL_UCODE_SERIAL(drv->fw.ucode_ver),
  575. buildstr, iwl_reduced_fw_name(drv));
  576. data = ucode->data;
  577. len -= sizeof(*ucode);
  578. while (len >= sizeof(*tlv)) {
  579. len -= sizeof(*tlv);
  580. tlv = (const void *)data;
  581. tlv_len = le32_to_cpu(tlv->length);
  582. tlv_type = le32_to_cpu(tlv->type);
  583. tlv_data = tlv->data;
  584. if (len < tlv_len) {
  585. IWL_ERR(drv, "invalid TLV len: %zd/%u\n",
  586. len, tlv_len);
  587. return -EINVAL;
  588. }
  589. len -= ALIGN(tlv_len, 4);
  590. data += sizeof(*tlv) + ALIGN(tlv_len, 4);
  591. switch (tlv_type) {
  592. case IWL_UCODE_TLV_INST:
  593. set_sec_data(pieces, IWL_UCODE_REGULAR,
  594. IWL_UCODE_SECTION_INST, tlv_data);
  595. set_sec_size(pieces, IWL_UCODE_REGULAR,
  596. IWL_UCODE_SECTION_INST, tlv_len);
  597. set_sec_offset(pieces, IWL_UCODE_REGULAR,
  598. IWL_UCODE_SECTION_INST,
  599. IWLAGN_RTC_INST_LOWER_BOUND);
  600. break;
  601. case IWL_UCODE_TLV_DATA:
  602. set_sec_data(pieces, IWL_UCODE_REGULAR,
  603. IWL_UCODE_SECTION_DATA, tlv_data);
  604. set_sec_size(pieces, IWL_UCODE_REGULAR,
  605. IWL_UCODE_SECTION_DATA, tlv_len);
  606. set_sec_offset(pieces, IWL_UCODE_REGULAR,
  607. IWL_UCODE_SECTION_DATA,
  608. IWLAGN_RTC_DATA_LOWER_BOUND);
  609. break;
  610. case IWL_UCODE_TLV_INIT:
  611. set_sec_data(pieces, IWL_UCODE_INIT,
  612. IWL_UCODE_SECTION_INST, tlv_data);
  613. set_sec_size(pieces, IWL_UCODE_INIT,
  614. IWL_UCODE_SECTION_INST, tlv_len);
  615. set_sec_offset(pieces, IWL_UCODE_INIT,
  616. IWL_UCODE_SECTION_INST,
  617. IWLAGN_RTC_INST_LOWER_BOUND);
  618. break;
  619. case IWL_UCODE_TLV_INIT_DATA:
  620. set_sec_data(pieces, IWL_UCODE_INIT,
  621. IWL_UCODE_SECTION_DATA, tlv_data);
  622. set_sec_size(pieces, IWL_UCODE_INIT,
  623. IWL_UCODE_SECTION_DATA, tlv_len);
  624. set_sec_offset(pieces, IWL_UCODE_INIT,
  625. IWL_UCODE_SECTION_DATA,
  626. IWLAGN_RTC_DATA_LOWER_BOUND);
  627. break;
  628. case IWL_UCODE_TLV_BOOT:
  629. IWL_ERR(drv, "Found unexpected BOOT ucode\n");
  630. break;
  631. case IWL_UCODE_TLV_PROBE_MAX_LEN:
  632. if (tlv_len != sizeof(u32))
  633. goto invalid_tlv_len;
  634. capa->max_probe_length =
  635. le32_to_cpup((const __le32 *)tlv_data);
  636. break;
  637. case IWL_UCODE_TLV_PAN:
  638. if (tlv_len)
  639. goto invalid_tlv_len;
  640. capa->flags |= IWL_UCODE_TLV_FLAGS_PAN;
  641. break;
  642. case IWL_UCODE_TLV_FLAGS:
  643. /* must be at least one u32 */
  644. if (tlv_len < sizeof(u32))
  645. goto invalid_tlv_len;
  646. /* and a proper number of u32s */
  647. if (tlv_len % sizeof(u32))
  648. goto invalid_tlv_len;
  649. /*
  650. * This driver only reads the first u32 as
  651. * right now no more features are defined,
  652. * if that changes then either the driver
  653. * will not work with the new firmware, or
  654. * it'll not take advantage of new features.
  655. */
  656. capa->flags = le32_to_cpup((const __le32 *)tlv_data);
  657. break;
  658. case IWL_UCODE_TLV_API_CHANGES_SET:
  659. if (tlv_len != sizeof(struct iwl_ucode_api))
  660. goto invalid_tlv_len;
  661. iwl_set_ucode_api_flags(drv, tlv_data, capa);
  662. break;
  663. case IWL_UCODE_TLV_ENABLED_CAPABILITIES:
  664. if (tlv_len != sizeof(struct iwl_ucode_capa))
  665. goto invalid_tlv_len;
  666. iwl_set_ucode_capabilities(drv, tlv_data, capa);
  667. break;
  668. case IWL_UCODE_TLV_INIT_EVTLOG_PTR:
  669. if (tlv_len != sizeof(u32))
  670. goto invalid_tlv_len;
  671. pieces->init_evtlog_ptr =
  672. le32_to_cpup((const __le32 *)tlv_data);
  673. break;
  674. case IWL_UCODE_TLV_INIT_EVTLOG_SIZE:
  675. if (tlv_len != sizeof(u32))
  676. goto invalid_tlv_len;
  677. pieces->init_evtlog_size =
  678. le32_to_cpup((const __le32 *)tlv_data);
  679. break;
  680. case IWL_UCODE_TLV_INIT_ERRLOG_PTR:
  681. if (tlv_len != sizeof(u32))
  682. goto invalid_tlv_len;
  683. pieces->init_errlog_ptr =
  684. le32_to_cpup((const __le32 *)tlv_data);
  685. break;
  686. case IWL_UCODE_TLV_RUNT_EVTLOG_PTR:
  687. if (tlv_len != sizeof(u32))
  688. goto invalid_tlv_len;
  689. pieces->inst_evtlog_ptr =
  690. le32_to_cpup((const __le32 *)tlv_data);
  691. break;
  692. case IWL_UCODE_TLV_RUNT_EVTLOG_SIZE:
  693. if (tlv_len != sizeof(u32))
  694. goto invalid_tlv_len;
  695. pieces->inst_evtlog_size =
  696. le32_to_cpup((const __le32 *)tlv_data);
  697. break;
  698. case IWL_UCODE_TLV_RUNT_ERRLOG_PTR:
  699. if (tlv_len != sizeof(u32))
  700. goto invalid_tlv_len;
  701. pieces->inst_errlog_ptr =
  702. le32_to_cpup((const __le32 *)tlv_data);
  703. break;
  704. case IWL_UCODE_TLV_ENHANCE_SENS_TBL:
  705. if (tlv_len)
  706. goto invalid_tlv_len;
  707. drv->fw.enhance_sensitivity_table = true;
  708. break;
  709. case IWL_UCODE_TLV_WOWLAN_INST:
  710. set_sec_data(pieces, IWL_UCODE_WOWLAN,
  711. IWL_UCODE_SECTION_INST, tlv_data);
  712. set_sec_size(pieces, IWL_UCODE_WOWLAN,
  713. IWL_UCODE_SECTION_INST, tlv_len);
  714. set_sec_offset(pieces, IWL_UCODE_WOWLAN,
  715. IWL_UCODE_SECTION_INST,
  716. IWLAGN_RTC_INST_LOWER_BOUND);
  717. break;
  718. case IWL_UCODE_TLV_WOWLAN_DATA:
  719. set_sec_data(pieces, IWL_UCODE_WOWLAN,
  720. IWL_UCODE_SECTION_DATA, tlv_data);
  721. set_sec_size(pieces, IWL_UCODE_WOWLAN,
  722. IWL_UCODE_SECTION_DATA, tlv_len);
  723. set_sec_offset(pieces, IWL_UCODE_WOWLAN,
  724. IWL_UCODE_SECTION_DATA,
  725. IWLAGN_RTC_DATA_LOWER_BOUND);
  726. break;
  727. case IWL_UCODE_TLV_PHY_CALIBRATION_SIZE:
  728. if (tlv_len != sizeof(u32))
  729. goto invalid_tlv_len;
  730. capa->standard_phy_calibration_size =
  731. le32_to_cpup((const __le32 *)tlv_data);
  732. break;
  733. case IWL_UCODE_TLV_SEC_RT:
  734. iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_REGULAR,
  735. tlv_len);
  736. drv->fw.type = IWL_FW_MVM;
  737. break;
  738. case IWL_UCODE_TLV_SEC_INIT:
  739. iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_INIT,
  740. tlv_len);
  741. drv->fw.type = IWL_FW_MVM;
  742. break;
  743. case IWL_UCODE_TLV_SEC_WOWLAN:
  744. iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_WOWLAN,
  745. tlv_len);
  746. drv->fw.type = IWL_FW_MVM;
  747. break;
  748. case IWL_UCODE_TLV_DEF_CALIB:
  749. if (tlv_len != sizeof(struct iwl_tlv_calib_data))
  750. goto invalid_tlv_len;
  751. if (iwl_set_default_calib(drv, tlv_data))
  752. goto tlv_error;
  753. break;
  754. case IWL_UCODE_TLV_PHY_SKU:
  755. if (tlv_len != sizeof(u32))
  756. goto invalid_tlv_len;
  757. drv->fw.phy_config = le32_to_cpup((const __le32 *)tlv_data);
  758. drv->fw.valid_tx_ant = (drv->fw.phy_config &
  759. FW_PHY_CFG_TX_CHAIN) >>
  760. FW_PHY_CFG_TX_CHAIN_POS;
  761. drv->fw.valid_rx_ant = (drv->fw.phy_config &
  762. FW_PHY_CFG_RX_CHAIN) >>
  763. FW_PHY_CFG_RX_CHAIN_POS;
  764. break;
  765. case IWL_UCODE_TLV_SECURE_SEC_RT:
  766. iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_REGULAR,
  767. tlv_len);
  768. drv->fw.type = IWL_FW_MVM;
  769. break;
  770. case IWL_UCODE_TLV_SECURE_SEC_INIT:
  771. iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_INIT,
  772. tlv_len);
  773. drv->fw.type = IWL_FW_MVM;
  774. break;
  775. case IWL_UCODE_TLV_SECURE_SEC_WOWLAN:
  776. iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_WOWLAN,
  777. tlv_len);
  778. drv->fw.type = IWL_FW_MVM;
  779. break;
  780. case IWL_UCODE_TLV_NUM_OF_CPU:
  781. if (tlv_len != sizeof(u32))
  782. goto invalid_tlv_len;
  783. num_of_cpus =
  784. le32_to_cpup((const __le32 *)tlv_data);
  785. if (num_of_cpus == 2) {
  786. drv->fw.img[IWL_UCODE_REGULAR].is_dual_cpus =
  787. true;
  788. drv->fw.img[IWL_UCODE_INIT].is_dual_cpus =
  789. true;
  790. drv->fw.img[IWL_UCODE_WOWLAN].is_dual_cpus =
  791. true;
  792. } else if ((num_of_cpus > 2) || (num_of_cpus < 1)) {
  793. IWL_ERR(drv, "Driver support upto 2 CPUs\n");
  794. return -EINVAL;
  795. }
  796. break;
  797. case IWL_UCODE_TLV_N_SCAN_CHANNELS:
  798. if (tlv_len != sizeof(u32))
  799. goto invalid_tlv_len;
  800. capa->n_scan_channels =
  801. le32_to_cpup((const __le32 *)tlv_data);
  802. break;
  803. case IWL_UCODE_TLV_FW_VERSION: {
  804. const __le32 *ptr = (const void *)tlv_data;
  805. u32 major, minor;
  806. u8 local_comp;
  807. if (tlv_len != sizeof(u32) * 3)
  808. goto invalid_tlv_len;
  809. major = le32_to_cpup(ptr++);
  810. minor = le32_to_cpup(ptr++);
  811. local_comp = le32_to_cpup(ptr);
  812. if (major >= 35)
  813. snprintf(drv->fw.fw_version,
  814. sizeof(drv->fw.fw_version),
  815. "%u.%08x.%u %s", major, minor,
  816. local_comp, iwl_reduced_fw_name(drv));
  817. else
  818. snprintf(drv->fw.fw_version,
  819. sizeof(drv->fw.fw_version),
  820. "%u.%u.%u %s", major, minor,
  821. local_comp, iwl_reduced_fw_name(drv));
  822. break;
  823. }
  824. case IWL_UCODE_TLV_FW_DBG_DEST: {
  825. const struct iwl_fw_dbg_dest_tlv *dest = NULL;
  826. const struct iwl_fw_dbg_dest_tlv_v1 *dest_v1 = NULL;
  827. u8 mon_mode;
  828. pieces->dbg_dest_ver = (const u8 *)tlv_data;
  829. if (*pieces->dbg_dest_ver == 1) {
  830. dest = (const void *)tlv_data;
  831. } else if (*pieces->dbg_dest_ver == 0) {
  832. dest_v1 = (const void *)tlv_data;
  833. } else {
  834. IWL_ERR(drv,
  835. "The version is %d, and it is invalid\n",
  836. *pieces->dbg_dest_ver);
  837. break;
  838. }
  839. if (pieces->dbg_dest_tlv_init) {
  840. IWL_ERR(drv,
  841. "dbg destination ignored, already exists\n");
  842. break;
  843. }
  844. pieces->dbg_dest_tlv_init = true;
  845. if (dest_v1) {
  846. pieces->dbg_dest_tlv_v1 = dest_v1;
  847. mon_mode = dest_v1->monitor_mode;
  848. } else {
  849. pieces->dbg_dest_tlv = dest;
  850. mon_mode = dest->monitor_mode;
  851. }
  852. IWL_INFO(drv, "Found debug destination: %s\n",
  853. get_fw_dbg_mode_string(mon_mode));
  854. drv->fw.dbg.n_dest_reg = (dest_v1) ?
  855. tlv_len -
  856. offsetof(struct iwl_fw_dbg_dest_tlv_v1,
  857. reg_ops) :
  858. tlv_len -
  859. offsetof(struct iwl_fw_dbg_dest_tlv,
  860. reg_ops);
  861. drv->fw.dbg.n_dest_reg /=
  862. sizeof(drv->fw.dbg.dest_tlv->reg_ops[0]);
  863. break;
  864. }
  865. case IWL_UCODE_TLV_FW_DBG_CONF: {
  866. const struct iwl_fw_dbg_conf_tlv *conf =
  867. (const void *)tlv_data;
  868. if (!pieces->dbg_dest_tlv_init) {
  869. IWL_ERR(drv,
  870. "Ignore dbg config %d - no destination configured\n",
  871. conf->id);
  872. break;
  873. }
  874. if (conf->id >= ARRAY_SIZE(drv->fw.dbg.conf_tlv)) {
  875. IWL_ERR(drv,
  876. "Skip unknown configuration: %d\n",
  877. conf->id);
  878. break;
  879. }
  880. if (pieces->dbg_conf_tlv[conf->id]) {
  881. IWL_ERR(drv,
  882. "Ignore duplicate dbg config %d\n",
  883. conf->id);
  884. break;
  885. }
  886. if (conf->usniffer)
  887. usniffer_req = true;
  888. IWL_INFO(drv, "Found debug configuration: %d\n",
  889. conf->id);
  890. pieces->dbg_conf_tlv[conf->id] = conf;
  891. pieces->dbg_conf_tlv_len[conf->id] = tlv_len;
  892. break;
  893. }
  894. case IWL_UCODE_TLV_FW_DBG_TRIGGER: {
  895. const struct iwl_fw_dbg_trigger_tlv *trigger =
  896. (const void *)tlv_data;
  897. u32 trigger_id = le32_to_cpu(trigger->id);
  898. if (trigger_id >= ARRAY_SIZE(drv->fw.dbg.trigger_tlv)) {
  899. IWL_ERR(drv,
  900. "Skip unknown trigger: %u\n",
  901. trigger->id);
  902. break;
  903. }
  904. if (pieces->dbg_trigger_tlv[trigger_id]) {
  905. IWL_ERR(drv,
  906. "Ignore duplicate dbg trigger %u\n",
  907. trigger->id);
  908. break;
  909. }
  910. IWL_INFO(drv, "Found debug trigger: %u\n", trigger->id);
  911. pieces->dbg_trigger_tlv[trigger_id] = trigger;
  912. pieces->dbg_trigger_tlv_len[trigger_id] = tlv_len;
  913. break;
  914. }
  915. case IWL_UCODE_TLV_FW_DBG_DUMP_LST: {
  916. if (tlv_len != sizeof(u32)) {
  917. IWL_ERR(drv,
  918. "dbg lst mask size incorrect, skip\n");
  919. break;
  920. }
  921. drv->fw.dbg.dump_mask =
  922. le32_to_cpup((const __le32 *)tlv_data);
  923. break;
  924. }
  925. case IWL_UCODE_TLV_SEC_RT_USNIFFER:
  926. *usniffer_images = true;
  927. iwl_store_ucode_sec(pieces, tlv_data,
  928. IWL_UCODE_REGULAR_USNIFFER,
  929. tlv_len);
  930. break;
  931. case IWL_UCODE_TLV_PAGING:
  932. if (tlv_len != sizeof(u32))
  933. goto invalid_tlv_len;
  934. paging_mem_size = le32_to_cpup((const __le32 *)tlv_data);
  935. IWL_DEBUG_FW(drv,
  936. "Paging: paging enabled (size = %u bytes)\n",
  937. paging_mem_size);
  938. if (paging_mem_size > MAX_PAGING_IMAGE_SIZE) {
  939. IWL_ERR(drv,
  940. "Paging: driver supports up to %lu bytes for paging image\n",
  941. MAX_PAGING_IMAGE_SIZE);
  942. return -EINVAL;
  943. }
  944. if (paging_mem_size & (FW_PAGING_SIZE - 1)) {
  945. IWL_ERR(drv,
  946. "Paging: image isn't multiple %lu\n",
  947. FW_PAGING_SIZE);
  948. return -EINVAL;
  949. }
  950. drv->fw.img[IWL_UCODE_REGULAR].paging_mem_size =
  951. paging_mem_size;
  952. usniffer_img = IWL_UCODE_REGULAR_USNIFFER;
  953. drv->fw.img[usniffer_img].paging_mem_size =
  954. paging_mem_size;
  955. break;
  956. case IWL_UCODE_TLV_FW_GSCAN_CAPA:
  957. /* ignored */
  958. break;
  959. case IWL_UCODE_TLV_FW_MEM_SEG: {
  960. const struct iwl_fw_dbg_mem_seg_tlv *dbg_mem =
  961. (const void *)tlv_data;
  962. size_t size;
  963. struct iwl_fw_dbg_mem_seg_tlv *n;
  964. if (tlv_len != (sizeof(*dbg_mem)))
  965. goto invalid_tlv_len;
  966. IWL_DEBUG_INFO(drv, "Found debug memory segment: %u\n",
  967. dbg_mem->data_type);
  968. size = sizeof(*pieces->dbg_mem_tlv) *
  969. (pieces->n_mem_tlv + 1);
  970. n = krealloc(pieces->dbg_mem_tlv, size, GFP_KERNEL);
  971. if (!n)
  972. return -ENOMEM;
  973. pieces->dbg_mem_tlv = n;
  974. pieces->dbg_mem_tlv[pieces->n_mem_tlv] = *dbg_mem;
  975. pieces->n_mem_tlv++;
  976. break;
  977. }
  978. case IWL_UCODE_TLV_IML: {
  979. drv->fw.iml_len = tlv_len;
  980. drv->fw.iml = kmemdup(tlv_data, tlv_len, GFP_KERNEL);
  981. if (!drv->fw.iml)
  982. return -ENOMEM;
  983. break;
  984. }
  985. case IWL_UCODE_TLV_FW_RECOVERY_INFO: {
  986. const struct {
  987. __le32 buf_addr;
  988. __le32 buf_size;
  989. } *recov_info = (const void *)tlv_data;
  990. if (tlv_len != sizeof(*recov_info))
  991. goto invalid_tlv_len;
  992. capa->error_log_addr =
  993. le32_to_cpu(recov_info->buf_addr);
  994. capa->error_log_size =
  995. le32_to_cpu(recov_info->buf_size);
  996. }
  997. break;
  998. case IWL_UCODE_TLV_FW_FSEQ_VERSION: {
  999. const struct {
  1000. u8 version[32];
  1001. u8 sha1[20];
  1002. } *fseq_ver = (const void *)tlv_data;
  1003. if (tlv_len != sizeof(*fseq_ver))
  1004. goto invalid_tlv_len;
  1005. IWL_INFO(drv, "TLV_FW_FSEQ_VERSION: %s\n",
  1006. fseq_ver->version);
  1007. }
  1008. break;
  1009. case IWL_UCODE_TLV_FW_NUM_STATIONS:
  1010. if (tlv_len != sizeof(u32))
  1011. goto invalid_tlv_len;
  1012. if (le32_to_cpup((const __le32 *)tlv_data) >
  1013. IWL_MVM_STATION_COUNT_MAX) {
  1014. IWL_ERR(drv,
  1015. "%d is an invalid number of station\n",
  1016. le32_to_cpup((const __le32 *)tlv_data));
  1017. goto tlv_error;
  1018. }
  1019. capa->num_stations =
  1020. le32_to_cpup((const __le32 *)tlv_data);
  1021. break;
  1022. case IWL_UCODE_TLV_UMAC_DEBUG_ADDRS: {
  1023. const struct iwl_umac_debug_addrs *dbg_ptrs =
  1024. (const void *)tlv_data;
  1025. if (tlv_len != sizeof(*dbg_ptrs))
  1026. goto invalid_tlv_len;
  1027. if (drv->trans->trans_cfg->device_family <
  1028. IWL_DEVICE_FAMILY_22000)
  1029. break;
  1030. drv->trans->dbg.umac_error_event_table =
  1031. le32_to_cpu(dbg_ptrs->error_info_addr) &
  1032. ~FW_ADDR_CACHE_CONTROL;
  1033. drv->trans->dbg.error_event_table_tlv_status |=
  1034. IWL_ERROR_EVENT_TABLE_UMAC;
  1035. break;
  1036. }
  1037. case IWL_UCODE_TLV_LMAC_DEBUG_ADDRS: {
  1038. const struct iwl_lmac_debug_addrs *dbg_ptrs =
  1039. (const void *)tlv_data;
  1040. if (tlv_len != sizeof(*dbg_ptrs))
  1041. goto invalid_tlv_len;
  1042. if (drv->trans->trans_cfg->device_family <
  1043. IWL_DEVICE_FAMILY_22000)
  1044. break;
  1045. drv->trans->dbg.lmac_error_event_table[0] =
  1046. le32_to_cpu(dbg_ptrs->error_event_table_ptr) &
  1047. ~FW_ADDR_CACHE_CONTROL;
  1048. drv->trans->dbg.error_event_table_tlv_status |=
  1049. IWL_ERROR_EVENT_TABLE_LMAC1;
  1050. break;
  1051. }
  1052. case IWL_UCODE_TLV_TYPE_REGIONS:
  1053. iwl_parse_dbg_tlv_assert_tables(drv, tlv);
  1054. fallthrough;
  1055. case IWL_UCODE_TLV_TYPE_DEBUG_INFO:
  1056. case IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION:
  1057. case IWL_UCODE_TLV_TYPE_HCMD:
  1058. case IWL_UCODE_TLV_TYPE_TRIGGERS:
  1059. case IWL_UCODE_TLV_TYPE_CONF_SET:
  1060. if (iwlwifi_mod_params.enable_ini)
  1061. iwl_dbg_tlv_alloc(drv->trans, tlv, false);
  1062. break;
  1063. case IWL_UCODE_TLV_CMD_VERSIONS:
  1064. if (tlv_len % sizeof(struct iwl_fw_cmd_version)) {
  1065. IWL_ERR(drv,
  1066. "Invalid length for command versions: %u\n",
  1067. tlv_len);
  1068. tlv_len /= sizeof(struct iwl_fw_cmd_version);
  1069. tlv_len *= sizeof(struct iwl_fw_cmd_version);
  1070. }
  1071. if (WARN_ON(capa->cmd_versions))
  1072. return -EINVAL;
  1073. capa->cmd_versions = kmemdup(tlv_data, tlv_len,
  1074. GFP_KERNEL);
  1075. if (!capa->cmd_versions)
  1076. return -ENOMEM;
  1077. capa->n_cmd_versions =
  1078. tlv_len / sizeof(struct iwl_fw_cmd_version);
  1079. break;
  1080. case IWL_UCODE_TLV_PHY_INTEGRATION_VERSION:
  1081. if (drv->fw.phy_integration_ver) {
  1082. IWL_ERR(drv,
  1083. "phy integration str ignored, already exists\n");
  1084. break;
  1085. }
  1086. drv->fw.phy_integration_ver =
  1087. kmemdup(tlv_data, tlv_len, GFP_KERNEL);
  1088. if (!drv->fw.phy_integration_ver)
  1089. return -ENOMEM;
  1090. drv->fw.phy_integration_ver_len = tlv_len;
  1091. break;
  1092. case IWL_UCODE_TLV_SEC_TABLE_ADDR:
  1093. case IWL_UCODE_TLV_D3_KEK_KCK_ADDR:
  1094. iwl_drv_set_dump_exclude(drv, tlv_type,
  1095. tlv_data, tlv_len);
  1096. break;
  1097. default:
  1098. IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type);
  1099. break;
  1100. }
  1101. }
  1102. if (!fw_has_capa(capa, IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED) &&
  1103. usniffer_req && !*usniffer_images) {
  1104. IWL_ERR(drv,
  1105. "user selected to work with usniffer but usniffer image isn't available in ucode package\n");
  1106. return -EINVAL;
  1107. }
  1108. if (len) {
  1109. IWL_ERR(drv, "invalid TLV after parsing: %zd\n", len);
  1110. iwl_print_hex_dump(drv, IWL_DL_FW, data, len);
  1111. return -EINVAL;
  1112. }
  1113. return 0;
  1114. invalid_tlv_len:
  1115. IWL_ERR(drv, "TLV %d has invalid size: %u\n", tlv_type, tlv_len);
  1116. tlv_error:
  1117. iwl_print_hex_dump(drv, IWL_DL_FW, tlv_data, tlv_len);
  1118. return -EINVAL;
  1119. }
  1120. static int iwl_alloc_ucode(struct iwl_drv *drv,
  1121. struct iwl_firmware_pieces *pieces,
  1122. enum iwl_ucode_type type)
  1123. {
  1124. int i;
  1125. struct fw_desc *sec;
  1126. sec = kcalloc(pieces->img[type].sec_counter, sizeof(*sec), GFP_KERNEL);
  1127. if (!sec)
  1128. return -ENOMEM;
  1129. drv->fw.img[type].sec = sec;
  1130. drv->fw.img[type].num_sec = pieces->img[type].sec_counter;
  1131. for (i = 0; i < pieces->img[type].sec_counter; i++)
  1132. if (iwl_alloc_fw_desc(drv, &sec[i], get_sec(pieces, type, i)))
  1133. return -ENOMEM;
  1134. return 0;
  1135. }
  1136. static int validate_sec_sizes(struct iwl_drv *drv,
  1137. struct iwl_firmware_pieces *pieces,
  1138. const struct iwl_cfg *cfg)
  1139. {
  1140. IWL_DEBUG_INFO(drv, "f/w package hdr runtime inst size = %zd\n",
  1141. get_sec_size(pieces, IWL_UCODE_REGULAR,
  1142. IWL_UCODE_SECTION_INST));
  1143. IWL_DEBUG_INFO(drv, "f/w package hdr runtime data size = %zd\n",
  1144. get_sec_size(pieces, IWL_UCODE_REGULAR,
  1145. IWL_UCODE_SECTION_DATA));
  1146. IWL_DEBUG_INFO(drv, "f/w package hdr init inst size = %zd\n",
  1147. get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST));
  1148. IWL_DEBUG_INFO(drv, "f/w package hdr init data size = %zd\n",
  1149. get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA));
  1150. /* Verify that uCode images will fit in card's SRAM. */
  1151. if (get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST) >
  1152. cfg->max_inst_size) {
  1153. IWL_ERR(drv, "uCode instr len %zd too large to fit in\n",
  1154. get_sec_size(pieces, IWL_UCODE_REGULAR,
  1155. IWL_UCODE_SECTION_INST));
  1156. return -1;
  1157. }
  1158. if (get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA) >
  1159. cfg->max_data_size) {
  1160. IWL_ERR(drv, "uCode data len %zd too large to fit in\n",
  1161. get_sec_size(pieces, IWL_UCODE_REGULAR,
  1162. IWL_UCODE_SECTION_DATA));
  1163. return -1;
  1164. }
  1165. if (get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST) >
  1166. cfg->max_inst_size) {
  1167. IWL_ERR(drv, "uCode init instr len %zd too large to fit in\n",
  1168. get_sec_size(pieces, IWL_UCODE_INIT,
  1169. IWL_UCODE_SECTION_INST));
  1170. return -1;
  1171. }
  1172. if (get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA) >
  1173. cfg->max_data_size) {
  1174. IWL_ERR(drv, "uCode init data len %zd too large to fit in\n",
  1175. get_sec_size(pieces, IWL_UCODE_REGULAR,
  1176. IWL_UCODE_SECTION_DATA));
  1177. return -1;
  1178. }
  1179. return 0;
  1180. }
  1181. static struct iwl_op_mode *
  1182. _iwl_op_mode_start(struct iwl_drv *drv, struct iwlwifi_opmode_table *op)
  1183. {
  1184. const struct iwl_op_mode_ops *ops = op->ops;
  1185. struct dentry *dbgfs_dir = NULL;
  1186. struct iwl_op_mode *op_mode = NULL;
  1187. int retry, max_retry = !!iwlwifi_mod_params.fw_restart * IWL_MAX_INIT_RETRY;
  1188. for (retry = 0; retry <= max_retry; retry++) {
  1189. #ifdef CONFIG_IWLWIFI_DEBUGFS
  1190. drv->dbgfs_op_mode = debugfs_create_dir(op->name,
  1191. drv->dbgfs_drv);
  1192. dbgfs_dir = drv->dbgfs_op_mode;
  1193. #endif
  1194. op_mode = ops->start(drv->trans, drv->trans->cfg,
  1195. &drv->fw, dbgfs_dir);
  1196. if (op_mode)
  1197. return op_mode;
  1198. IWL_ERR(drv, "retry init count %d\n", retry);
  1199. #ifdef CONFIG_IWLWIFI_DEBUGFS
  1200. debugfs_remove_recursive(drv->dbgfs_op_mode);
  1201. drv->dbgfs_op_mode = NULL;
  1202. #endif
  1203. }
  1204. return NULL;
  1205. }
  1206. static void _iwl_op_mode_stop(struct iwl_drv *drv)
  1207. {
  1208. /* op_mode can be NULL if its start failed */
  1209. if (drv->op_mode) {
  1210. iwl_op_mode_stop(drv->op_mode);
  1211. drv->op_mode = NULL;
  1212. #ifdef CONFIG_IWLWIFI_DEBUGFS
  1213. debugfs_remove_recursive(drv->dbgfs_op_mode);
  1214. drv->dbgfs_op_mode = NULL;
  1215. #endif
  1216. }
  1217. }
  1218. /*
  1219. * iwl_req_fw_callback - callback when firmware was loaded
  1220. *
  1221. * If loaded successfully, copies the firmware into buffers
  1222. * for the card to fetch (via DMA).
  1223. */
  1224. static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
  1225. {
  1226. struct iwl_drv *drv = context;
  1227. struct iwl_fw *fw = &drv->fw;
  1228. const struct iwl_ucode_header *ucode;
  1229. struct iwlwifi_opmode_table *op;
  1230. int err;
  1231. struct iwl_firmware_pieces *pieces;
  1232. const unsigned int api_max = drv->trans->cfg->ucode_api_max;
  1233. const unsigned int api_min = drv->trans->cfg->ucode_api_min;
  1234. size_t trigger_tlv_sz[FW_DBG_TRIGGER_MAX];
  1235. u32 api_ver;
  1236. int i;
  1237. bool load_module = false;
  1238. bool usniffer_images = false;
  1239. bool failure = true;
  1240. fw->ucode_capa.max_probe_length = IWL_DEFAULT_MAX_PROBE_LENGTH;
  1241. fw->ucode_capa.standard_phy_calibration_size =
  1242. IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
  1243. fw->ucode_capa.n_scan_channels = IWL_DEFAULT_SCAN_CHANNELS;
  1244. fw->ucode_capa.num_stations = IWL_MVM_STATION_COUNT_MAX;
  1245. /* dump all fw memory areas by default */
  1246. fw->dbg.dump_mask = 0xffffffff;
  1247. pieces = kzalloc(sizeof(*pieces), GFP_KERNEL);
  1248. if (!pieces)
  1249. goto out_free_fw;
  1250. if (!ucode_raw)
  1251. goto try_again;
  1252. IWL_DEBUG_FW_INFO(drv, "Loaded firmware file '%s' (%zd bytes).\n",
  1253. drv->firmware_name, ucode_raw->size);
  1254. /* Make sure that we got at least the API version number */
  1255. if (ucode_raw->size < 4) {
  1256. IWL_ERR(drv, "File size way too small!\n");
  1257. goto try_again;
  1258. }
  1259. /* Data from ucode file: header followed by uCode images */
  1260. ucode = (const struct iwl_ucode_header *)ucode_raw->data;
  1261. if (ucode->ver)
  1262. err = iwl_parse_v1_v2_firmware(drv, ucode_raw, pieces);
  1263. else
  1264. err = iwl_parse_tlv_firmware(drv, ucode_raw, pieces,
  1265. &fw->ucode_capa, &usniffer_images);
  1266. if (err)
  1267. goto try_again;
  1268. if (fw_has_api(&drv->fw.ucode_capa, IWL_UCODE_TLV_API_NEW_VERSION))
  1269. api_ver = drv->fw.ucode_ver;
  1270. else
  1271. api_ver = IWL_UCODE_API(drv->fw.ucode_ver);
  1272. /*
  1273. * api_ver should match the api version forming part of the
  1274. * firmware filename ... but we don't check for that and only rely
  1275. * on the API version read from firmware header from here on forward
  1276. */
  1277. if (api_ver < api_min || api_ver > api_max) {
  1278. IWL_ERR(drv,
  1279. "Driver unable to support your firmware API. "
  1280. "Driver supports v%u, firmware is v%u.\n",
  1281. api_max, api_ver);
  1282. goto try_again;
  1283. }
  1284. /*
  1285. * In mvm uCode there is no difference between data and instructions
  1286. * sections.
  1287. */
  1288. if (fw->type == IWL_FW_DVM && validate_sec_sizes(drv, pieces,
  1289. drv->trans->cfg))
  1290. goto try_again;
  1291. /* Allocate ucode buffers for card's bus-master loading ... */
  1292. /* Runtime instructions and 2 copies of data:
  1293. * 1) unmodified from disk
  1294. * 2) backup cache for save/restore during power-downs
  1295. */
  1296. for (i = 0; i < IWL_UCODE_TYPE_MAX; i++)
  1297. if (iwl_alloc_ucode(drv, pieces, i))
  1298. goto out_free_fw;
  1299. if (pieces->dbg_dest_tlv_init) {
  1300. size_t dbg_dest_size = sizeof(*drv->fw.dbg.dest_tlv) +
  1301. sizeof(drv->fw.dbg.dest_tlv->reg_ops[0]) *
  1302. drv->fw.dbg.n_dest_reg;
  1303. drv->fw.dbg.dest_tlv = kmalloc(dbg_dest_size, GFP_KERNEL);
  1304. if (!drv->fw.dbg.dest_tlv)
  1305. goto out_free_fw;
  1306. if (*pieces->dbg_dest_ver == 0) {
  1307. memcpy(drv->fw.dbg.dest_tlv, pieces->dbg_dest_tlv_v1,
  1308. dbg_dest_size);
  1309. } else {
  1310. struct iwl_fw_dbg_dest_tlv_v1 *dest_tlv =
  1311. drv->fw.dbg.dest_tlv;
  1312. dest_tlv->version = pieces->dbg_dest_tlv->version;
  1313. dest_tlv->monitor_mode =
  1314. pieces->dbg_dest_tlv->monitor_mode;
  1315. dest_tlv->size_power =
  1316. pieces->dbg_dest_tlv->size_power;
  1317. dest_tlv->wrap_count =
  1318. pieces->dbg_dest_tlv->wrap_count;
  1319. dest_tlv->write_ptr_reg =
  1320. pieces->dbg_dest_tlv->write_ptr_reg;
  1321. dest_tlv->base_shift =
  1322. pieces->dbg_dest_tlv->base_shift;
  1323. memcpy(dest_tlv->reg_ops,
  1324. pieces->dbg_dest_tlv->reg_ops,
  1325. sizeof(drv->fw.dbg.dest_tlv->reg_ops[0]) *
  1326. drv->fw.dbg.n_dest_reg);
  1327. /* In version 1 of the destination tlv, which is
  1328. * relevant for internal buffer exclusively,
  1329. * the base address is part of given with the length
  1330. * of the buffer, and the size shift is give instead of
  1331. * end shift. We now store these values in base_reg,
  1332. * and end shift, and when dumping the data we'll
  1333. * manipulate it for extracting both the length and
  1334. * base address */
  1335. dest_tlv->base_reg = pieces->dbg_dest_tlv->cfg_reg;
  1336. dest_tlv->end_shift =
  1337. pieces->dbg_dest_tlv->size_shift;
  1338. }
  1339. }
  1340. for (i = 0; i < ARRAY_SIZE(drv->fw.dbg.conf_tlv); i++) {
  1341. if (pieces->dbg_conf_tlv[i]) {
  1342. drv->fw.dbg.conf_tlv[i] =
  1343. kmemdup(pieces->dbg_conf_tlv[i],
  1344. pieces->dbg_conf_tlv_len[i],
  1345. GFP_KERNEL);
  1346. if (!drv->fw.dbg.conf_tlv[i])
  1347. goto out_free_fw;
  1348. }
  1349. }
  1350. memset(&trigger_tlv_sz, 0xff, sizeof(trigger_tlv_sz));
  1351. trigger_tlv_sz[FW_DBG_TRIGGER_MISSED_BEACONS] =
  1352. sizeof(struct iwl_fw_dbg_trigger_missed_bcon);
  1353. trigger_tlv_sz[FW_DBG_TRIGGER_CHANNEL_SWITCH] = 0;
  1354. trigger_tlv_sz[FW_DBG_TRIGGER_FW_NOTIF] =
  1355. sizeof(struct iwl_fw_dbg_trigger_cmd);
  1356. trigger_tlv_sz[FW_DBG_TRIGGER_MLME] =
  1357. sizeof(struct iwl_fw_dbg_trigger_mlme);
  1358. trigger_tlv_sz[FW_DBG_TRIGGER_STATS] =
  1359. sizeof(struct iwl_fw_dbg_trigger_stats);
  1360. trigger_tlv_sz[FW_DBG_TRIGGER_RSSI] =
  1361. sizeof(struct iwl_fw_dbg_trigger_low_rssi);
  1362. trigger_tlv_sz[FW_DBG_TRIGGER_TXQ_TIMERS] =
  1363. sizeof(struct iwl_fw_dbg_trigger_txq_timer);
  1364. trigger_tlv_sz[FW_DBG_TRIGGER_TIME_EVENT] =
  1365. sizeof(struct iwl_fw_dbg_trigger_time_event);
  1366. trigger_tlv_sz[FW_DBG_TRIGGER_BA] =
  1367. sizeof(struct iwl_fw_dbg_trigger_ba);
  1368. trigger_tlv_sz[FW_DBG_TRIGGER_TDLS] =
  1369. sizeof(struct iwl_fw_dbg_trigger_tdls);
  1370. for (i = 0; i < ARRAY_SIZE(drv->fw.dbg.trigger_tlv); i++) {
  1371. if (pieces->dbg_trigger_tlv[i]) {
  1372. /*
  1373. * If the trigger isn't long enough, WARN and exit.
  1374. * Someone is trying to debug something and he won't
  1375. * be able to catch the bug he is trying to chase.
  1376. * We'd better be noisy to be sure he knows what's
  1377. * going on.
  1378. */
  1379. if (WARN_ON(pieces->dbg_trigger_tlv_len[i] <
  1380. (trigger_tlv_sz[i] +
  1381. sizeof(struct iwl_fw_dbg_trigger_tlv))))
  1382. goto out_free_fw;
  1383. drv->fw.dbg.trigger_tlv_len[i] =
  1384. pieces->dbg_trigger_tlv_len[i];
  1385. drv->fw.dbg.trigger_tlv[i] =
  1386. kmemdup(pieces->dbg_trigger_tlv[i],
  1387. drv->fw.dbg.trigger_tlv_len[i],
  1388. GFP_KERNEL);
  1389. if (!drv->fw.dbg.trigger_tlv[i])
  1390. goto out_free_fw;
  1391. }
  1392. }
  1393. /* Now that we can no longer fail, copy information */
  1394. drv->fw.dbg.mem_tlv = pieces->dbg_mem_tlv;
  1395. pieces->dbg_mem_tlv = NULL;
  1396. drv->fw.dbg.n_mem_tlv = pieces->n_mem_tlv;
  1397. /*
  1398. * The (size - 16) / 12 formula is based on the information recorded
  1399. * for each event, which is of mode 1 (including timestamp) for all
  1400. * new microcodes that include this information.
  1401. */
  1402. fw->init_evtlog_ptr = pieces->init_evtlog_ptr;
  1403. if (pieces->init_evtlog_size)
  1404. fw->init_evtlog_size = (pieces->init_evtlog_size - 16)/12;
  1405. else
  1406. fw->init_evtlog_size =
  1407. drv->trans->trans_cfg->base_params->max_event_log_size;
  1408. fw->init_errlog_ptr = pieces->init_errlog_ptr;
  1409. fw->inst_evtlog_ptr = pieces->inst_evtlog_ptr;
  1410. if (pieces->inst_evtlog_size)
  1411. fw->inst_evtlog_size = (pieces->inst_evtlog_size - 16)/12;
  1412. else
  1413. fw->inst_evtlog_size =
  1414. drv->trans->trans_cfg->base_params->max_event_log_size;
  1415. fw->inst_errlog_ptr = pieces->inst_errlog_ptr;
  1416. /*
  1417. * figure out the offset of chain noise reset and gain commands
  1418. * base on the size of standard phy calibration commands table size
  1419. */
  1420. if (fw->ucode_capa.standard_phy_calibration_size >
  1421. IWL_MAX_PHY_CALIBRATE_TBL_SIZE)
  1422. fw->ucode_capa.standard_phy_calibration_size =
  1423. IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE;
  1424. /* We have our copies now, allow OS release its copies */
  1425. release_firmware(ucode_raw);
  1426. iwl_dbg_tlv_load_bin(drv->trans->dev, drv->trans);
  1427. mutex_lock(&iwlwifi_opmode_table_mtx);
  1428. switch (fw->type) {
  1429. case IWL_FW_DVM:
  1430. op = &iwlwifi_opmode_table[DVM_OP_MODE];
  1431. break;
  1432. default:
  1433. WARN(1, "Invalid fw type %d\n", fw->type);
  1434. fallthrough;
  1435. case IWL_FW_MVM:
  1436. op = &iwlwifi_opmode_table[MVM_OP_MODE];
  1437. break;
  1438. }
  1439. IWL_INFO(drv, "loaded firmware version %s op_mode %s\n",
  1440. drv->fw.fw_version, op->name);
  1441. /* add this device to the list of devices using this op_mode */
  1442. list_add_tail(&drv->list, &op->drv);
  1443. if (op->ops) {
  1444. drv->op_mode = _iwl_op_mode_start(drv, op);
  1445. if (!drv->op_mode) {
  1446. mutex_unlock(&iwlwifi_opmode_table_mtx);
  1447. goto out_unbind;
  1448. }
  1449. } else {
  1450. load_module = true;
  1451. }
  1452. mutex_unlock(&iwlwifi_opmode_table_mtx);
  1453. /*
  1454. * Complete the firmware request last so that
  1455. * a driver unbind (stop) doesn't run while we
  1456. * are doing the start() above.
  1457. */
  1458. complete(&drv->request_firmware_complete);
  1459. /*
  1460. * Load the module last so we don't block anything
  1461. * else from proceeding if the module fails to load
  1462. * or hangs loading.
  1463. */
  1464. if (load_module)
  1465. request_module("%s", op->name);
  1466. failure = false;
  1467. goto free;
  1468. try_again:
  1469. /* try next, if any */
  1470. release_firmware(ucode_raw);
  1471. if (iwl_request_firmware(drv, false))
  1472. goto out_unbind;
  1473. goto free;
  1474. out_free_fw:
  1475. release_firmware(ucode_raw);
  1476. out_unbind:
  1477. complete(&drv->request_firmware_complete);
  1478. device_release_driver(drv->trans->dev);
  1479. /* drv has just been freed by the release */
  1480. failure = false;
  1481. free:
  1482. if (failure)
  1483. iwl_dealloc_ucode(drv);
  1484. if (pieces) {
  1485. for (i = 0; i < ARRAY_SIZE(pieces->img); i++)
  1486. kfree(pieces->img[i].sec);
  1487. kfree(pieces->dbg_mem_tlv);
  1488. kfree(pieces);
  1489. }
  1490. }
  1491. struct iwl_drv *iwl_drv_start(struct iwl_trans *trans)
  1492. {
  1493. struct iwl_drv *drv;
  1494. int ret;
  1495. drv = kzalloc(sizeof(*drv), GFP_KERNEL);
  1496. if (!drv) {
  1497. ret = -ENOMEM;
  1498. goto err;
  1499. }
  1500. drv->trans = trans;
  1501. drv->dev = trans->dev;
  1502. init_completion(&drv->request_firmware_complete);
  1503. INIT_LIST_HEAD(&drv->list);
  1504. #ifdef CONFIG_IWLWIFI_DEBUGFS
  1505. /* Create the device debugfs entries. */
  1506. drv->dbgfs_drv = debugfs_create_dir(dev_name(trans->dev),
  1507. iwl_dbgfs_root);
  1508. /* Create transport layer debugfs dir */
  1509. drv->trans->dbgfs_dir = debugfs_create_dir("trans", drv->dbgfs_drv);
  1510. #endif
  1511. drv->trans->dbg.domains_bitmap = IWL_TRANS_FW_DBG_DOMAIN(drv->trans);
  1512. if (iwlwifi_mod_params.enable_ini != ENABLE_INI) {
  1513. /* We have a non-default value in the module parameter,
  1514. * take its value
  1515. */
  1516. drv->trans->dbg.domains_bitmap &= 0xffff;
  1517. if (iwlwifi_mod_params.enable_ini != IWL_FW_INI_PRESET_DISABLE) {
  1518. if (iwlwifi_mod_params.enable_ini > ENABLE_INI) {
  1519. IWL_ERR(trans,
  1520. "invalid enable_ini module parameter value: max = %d, using 0 instead\n",
  1521. ENABLE_INI);
  1522. iwlwifi_mod_params.enable_ini = 0;
  1523. }
  1524. drv->trans->dbg.domains_bitmap =
  1525. BIT(IWL_FW_DBG_DOMAIN_POS + iwlwifi_mod_params.enable_ini);
  1526. }
  1527. }
  1528. ret = iwl_request_firmware(drv, true);
  1529. if (ret) {
  1530. IWL_ERR(trans, "Couldn't request the fw\n");
  1531. goto err_fw;
  1532. }
  1533. return drv;
  1534. err_fw:
  1535. #ifdef CONFIG_IWLWIFI_DEBUGFS
  1536. debugfs_remove_recursive(drv->dbgfs_drv);
  1537. iwl_dbg_tlv_free(drv->trans);
  1538. #endif
  1539. kfree(drv);
  1540. err:
  1541. return ERR_PTR(ret);
  1542. }
  1543. void iwl_drv_stop(struct iwl_drv *drv)
  1544. {
  1545. wait_for_completion(&drv->request_firmware_complete);
  1546. _iwl_op_mode_stop(drv);
  1547. iwl_dealloc_ucode(drv);
  1548. mutex_lock(&iwlwifi_opmode_table_mtx);
  1549. /*
  1550. * List is empty (this item wasn't added)
  1551. * when firmware loading failed -- in that
  1552. * case we can't remove it from any list.
  1553. */
  1554. if (!list_empty(&drv->list))
  1555. list_del(&drv->list);
  1556. mutex_unlock(&iwlwifi_opmode_table_mtx);
  1557. #ifdef CONFIG_IWLWIFI_DEBUGFS
  1558. drv->trans->ops->debugfs_cleanup(drv->trans);
  1559. debugfs_remove_recursive(drv->dbgfs_drv);
  1560. #endif
  1561. iwl_dbg_tlv_free(drv->trans);
  1562. kfree(drv);
  1563. }
  1564. /* shared module parameters */
  1565. struct iwl_mod_params iwlwifi_mod_params = {
  1566. .fw_restart = true,
  1567. .bt_coex_active = true,
  1568. .power_level = IWL_POWER_INDEX_1,
  1569. .uapsd_disable = IWL_DISABLE_UAPSD_BSS | IWL_DISABLE_UAPSD_P2P_CLIENT,
  1570. .enable_ini = ENABLE_INI,
  1571. /* the rest are 0 by default */
  1572. };
  1573. IWL_EXPORT_SYMBOL(iwlwifi_mod_params);
  1574. int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops)
  1575. {
  1576. int i;
  1577. struct iwl_drv *drv;
  1578. struct iwlwifi_opmode_table *op;
  1579. mutex_lock(&iwlwifi_opmode_table_mtx);
  1580. for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++) {
  1581. op = &iwlwifi_opmode_table[i];
  1582. if (strcmp(op->name, name))
  1583. continue;
  1584. op->ops = ops;
  1585. /* TODO: need to handle exceptional case */
  1586. list_for_each_entry(drv, &op->drv, list)
  1587. drv->op_mode = _iwl_op_mode_start(drv, op);
  1588. mutex_unlock(&iwlwifi_opmode_table_mtx);
  1589. return 0;
  1590. }
  1591. mutex_unlock(&iwlwifi_opmode_table_mtx);
  1592. return -EIO;
  1593. }
  1594. IWL_EXPORT_SYMBOL(iwl_opmode_register);
  1595. void iwl_opmode_deregister(const char *name)
  1596. {
  1597. int i;
  1598. struct iwl_drv *drv;
  1599. mutex_lock(&iwlwifi_opmode_table_mtx);
  1600. for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++) {
  1601. if (strcmp(iwlwifi_opmode_table[i].name, name))
  1602. continue;
  1603. iwlwifi_opmode_table[i].ops = NULL;
  1604. /* call the stop routine for all devices */
  1605. list_for_each_entry(drv, &iwlwifi_opmode_table[i].drv, list)
  1606. _iwl_op_mode_stop(drv);
  1607. mutex_unlock(&iwlwifi_opmode_table_mtx);
  1608. return;
  1609. }
  1610. mutex_unlock(&iwlwifi_opmode_table_mtx);
  1611. }
  1612. IWL_EXPORT_SYMBOL(iwl_opmode_deregister);
  1613. static int __init iwl_drv_init(void)
  1614. {
  1615. int i, err;
  1616. for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++)
  1617. INIT_LIST_HEAD(&iwlwifi_opmode_table[i].drv);
  1618. pr_info(DRV_DESCRIPTION "\n");
  1619. #ifdef CONFIG_IWLWIFI_DEBUGFS
  1620. /* Create the root of iwlwifi debugfs subsystem. */
  1621. iwl_dbgfs_root = debugfs_create_dir(DRV_NAME, NULL);
  1622. #endif
  1623. err = iwl_pci_register_driver();
  1624. if (err)
  1625. goto cleanup_debugfs;
  1626. return 0;
  1627. cleanup_debugfs:
  1628. #ifdef CONFIG_IWLWIFI_DEBUGFS
  1629. debugfs_remove_recursive(iwl_dbgfs_root);
  1630. #endif
  1631. return err;
  1632. }
  1633. module_init(iwl_drv_init);
  1634. static void __exit iwl_drv_exit(void)
  1635. {
  1636. iwl_pci_unregister_driver();
  1637. #ifdef CONFIG_IWLWIFI_DEBUGFS
  1638. debugfs_remove_recursive(iwl_dbgfs_root);
  1639. #endif
  1640. }
  1641. module_exit(iwl_drv_exit);
  1642. #ifdef CONFIG_IWLWIFI_DEBUG
  1643. module_param_named(debug, iwlwifi_mod_params.debug_level, uint, 0644);
  1644. MODULE_PARM_DESC(debug, "debug output mask");
  1645. #endif
  1646. module_param_named(swcrypto, iwlwifi_mod_params.swcrypto, int, 0444);
  1647. MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
  1648. module_param_named(11n_disable, iwlwifi_mod_params.disable_11n, uint, 0444);
  1649. MODULE_PARM_DESC(11n_disable,
  1650. "disable 11n functionality, bitmap: 1: full, 2: disable agg TX, 4: disable agg RX, 8 enable agg TX");
  1651. module_param_named(amsdu_size, iwlwifi_mod_params.amsdu_size, int, 0444);
  1652. MODULE_PARM_DESC(amsdu_size,
  1653. "amsdu size 0: 12K for multi Rx queue devices, 2K for AX210 devices, "
  1654. "4K for other devices 1:4K 2:8K 3:12K (16K buffers) 4: 2K (default 0)");
  1655. module_param_named(fw_restart, iwlwifi_mod_params.fw_restart, bool, 0444);
  1656. MODULE_PARM_DESC(fw_restart, "restart firmware in case of error (default true)");
  1657. module_param_named(nvm_file, iwlwifi_mod_params.nvm_file, charp, 0444);
  1658. MODULE_PARM_DESC(nvm_file, "NVM file name");
  1659. module_param_named(uapsd_disable, iwlwifi_mod_params.uapsd_disable, uint, 0644);
  1660. MODULE_PARM_DESC(uapsd_disable,
  1661. "disable U-APSD functionality bitmap 1: BSS 2: P2P Client (default: 3)");
  1662. module_param_named(enable_ini, iwlwifi_mod_params.enable_ini, uint, 0444);
  1663. MODULE_PARM_DESC(enable_ini,
  1664. "0:disable, 1-15:FW_DBG_PRESET Values, 16:enabled without preset value defined,"
  1665. "Debug INI TLV FW debug infrastructure (default: 16)");
  1666. /*
  1667. * set bt_coex_active to true, uCode will do kill/defer
  1668. * every time the priority line is asserted (BT is sending signals on the
  1669. * priority line in the PCIx).
  1670. * set bt_coex_active to false, uCode will ignore the BT activity and
  1671. * perform the normal operation
  1672. *
  1673. * User might experience transmit issue on some platform due to WiFi/BT
  1674. * co-exist problem. The possible behaviors are:
  1675. * Able to scan and finding all the available AP
  1676. * Not able to associate with any AP
  1677. * On those platforms, WiFi communication can be restored by set
  1678. * "bt_coex_active" module parameter to "false"
  1679. *
  1680. * default: bt_coex_active = true (BT_COEX_ENABLE)
  1681. */
  1682. module_param_named(bt_coex_active, iwlwifi_mod_params.bt_coex_active,
  1683. bool, 0444);
  1684. MODULE_PARM_DESC(bt_coex_active, "enable wifi/bt co-exist (default: enable)");
  1685. module_param_named(led_mode, iwlwifi_mod_params.led_mode, int, 0444);
  1686. MODULE_PARM_DESC(led_mode, "0=system default, "
  1687. "1=On(RF On)/Off(RF Off), 2=blinking, 3=Off (default: 0)");
  1688. module_param_named(power_save, iwlwifi_mod_params.power_save, bool, 0444);
  1689. MODULE_PARM_DESC(power_save,
  1690. "enable WiFi power management (default: disable)");
  1691. module_param_named(power_level, iwlwifi_mod_params.power_level, int, 0444);
  1692. MODULE_PARM_DESC(power_level,
  1693. "default power save level (range from 1 - 5, default: 1)");
  1694. module_param_named(disable_11ac, iwlwifi_mod_params.disable_11ac, bool, 0444);
  1695. MODULE_PARM_DESC(disable_11ac, "Disable VHT capabilities (default: false)");
  1696. module_param_named(remove_when_gone,
  1697. iwlwifi_mod_params.remove_when_gone, bool,
  1698. 0444);
  1699. MODULE_PARM_DESC(remove_when_gone,
  1700. "Remove dev from PCIe bus if it is deemed inaccessible (default: false)");
  1701. module_param_named(disable_11ax, iwlwifi_mod_params.disable_11ax, bool,
  1702. S_IRUGO);
  1703. MODULE_PARM_DESC(disable_11ax, "Disable HE capabilities (default: false)");