iwl-dbg-tlv.c 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389
  1. // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
  2. /*
  3. * Copyright (C) 2018-2022 Intel Corporation
  4. */
  5. #include <linux/firmware.h>
  6. #include "iwl-drv.h"
  7. #include "iwl-trans.h"
  8. #include "iwl-dbg-tlv.h"
  9. #include "fw/dbg.h"
  10. #include "fw/runtime.h"
  11. /**
  12. * enum iwl_dbg_tlv_type - debug TLV types
  13. * @IWL_DBG_TLV_TYPE_DEBUG_INFO: debug info TLV
  14. * @IWL_DBG_TLV_TYPE_BUF_ALLOC: buffer allocation TLV
  15. * @IWL_DBG_TLV_TYPE_HCMD: host command TLV
  16. * @IWL_DBG_TLV_TYPE_REGION: region TLV
  17. * @IWL_DBG_TLV_TYPE_TRIGGER: trigger TLV
  18. * @IWL_DBG_TLV_TYPE_CONF_SET: conf set TLV
  19. * @IWL_DBG_TLV_TYPE_NUM: number of debug TLVs
  20. */
  21. enum iwl_dbg_tlv_type {
  22. IWL_DBG_TLV_TYPE_DEBUG_INFO =
  23. IWL_UCODE_TLV_TYPE_DEBUG_INFO - IWL_UCODE_TLV_DEBUG_BASE,
  24. IWL_DBG_TLV_TYPE_BUF_ALLOC,
  25. IWL_DBG_TLV_TYPE_HCMD,
  26. IWL_DBG_TLV_TYPE_REGION,
  27. IWL_DBG_TLV_TYPE_TRIGGER,
  28. IWL_DBG_TLV_TYPE_CONF_SET,
  29. IWL_DBG_TLV_TYPE_NUM,
  30. };
  31. /**
  32. * struct iwl_dbg_tlv_ver_data - debug TLV version struct
  33. * @min_ver: min version supported
  34. * @max_ver: max version supported
  35. */
  36. struct iwl_dbg_tlv_ver_data {
  37. int min_ver;
  38. int max_ver;
  39. };
  40. /**
  41. * struct iwl_dbg_tlv_timer_node - timer node struct
  42. * @list: list of &struct iwl_dbg_tlv_timer_node
  43. * @timer: timer
  44. * @fwrt: &struct iwl_fw_runtime
  45. * @tlv: TLV attach to the timer node
  46. */
  47. struct iwl_dbg_tlv_timer_node {
  48. struct list_head list;
  49. struct timer_list timer;
  50. struct iwl_fw_runtime *fwrt;
  51. struct iwl_ucode_tlv *tlv;
  52. };
  53. static const struct iwl_dbg_tlv_ver_data
  54. dbg_ver_table[IWL_DBG_TLV_TYPE_NUM] = {
  55. [IWL_DBG_TLV_TYPE_DEBUG_INFO] = {.min_ver = 1, .max_ver = 1,},
  56. [IWL_DBG_TLV_TYPE_BUF_ALLOC] = {.min_ver = 1, .max_ver = 1,},
  57. [IWL_DBG_TLV_TYPE_HCMD] = {.min_ver = 1, .max_ver = 1,},
  58. [IWL_DBG_TLV_TYPE_REGION] = {.min_ver = 1, .max_ver = 3,},
  59. [IWL_DBG_TLV_TYPE_TRIGGER] = {.min_ver = 1, .max_ver = 1,},
  60. [IWL_DBG_TLV_TYPE_CONF_SET] = {.min_ver = 1, .max_ver = 1,},
  61. };
  62. static int iwl_dbg_tlv_add(const struct iwl_ucode_tlv *tlv,
  63. struct list_head *list)
  64. {
  65. u32 len = le32_to_cpu(tlv->length);
  66. struct iwl_dbg_tlv_node *node;
  67. node = kzalloc(sizeof(*node) + len, GFP_KERNEL);
  68. if (!node)
  69. return -ENOMEM;
  70. memcpy(&node->tlv, tlv, sizeof(node->tlv));
  71. memcpy(node->tlv.data, tlv->data, len);
  72. list_add_tail(&node->list, list);
  73. return 0;
  74. }
  75. static bool iwl_dbg_tlv_ver_support(const struct iwl_ucode_tlv *tlv)
  76. {
  77. const struct iwl_fw_ini_header *hdr = (const void *)&tlv->data[0];
  78. u32 type = le32_to_cpu(tlv->type);
  79. u32 tlv_idx = type - IWL_UCODE_TLV_DEBUG_BASE;
  80. u32 ver = le32_to_cpu(hdr->version);
  81. if (ver < dbg_ver_table[tlv_idx].min_ver ||
  82. ver > dbg_ver_table[tlv_idx].max_ver)
  83. return false;
  84. return true;
  85. }
  86. static int iwl_dbg_tlv_alloc_debug_info(struct iwl_trans *trans,
  87. const struct iwl_ucode_tlv *tlv)
  88. {
  89. const struct iwl_fw_ini_debug_info_tlv *debug_info = (const void *)tlv->data;
  90. if (le32_to_cpu(tlv->length) != sizeof(*debug_info))
  91. return -EINVAL;
  92. IWL_DEBUG_FW(trans, "WRT: Loading debug cfg: %s\n",
  93. debug_info->debug_cfg_name);
  94. return iwl_dbg_tlv_add(tlv, &trans->dbg.debug_info_tlv_list);
  95. }
  96. static int iwl_dbg_tlv_alloc_buf_alloc(struct iwl_trans *trans,
  97. const struct iwl_ucode_tlv *tlv)
  98. {
  99. const struct iwl_fw_ini_allocation_tlv *alloc = (const void *)tlv->data;
  100. u32 buf_location;
  101. u32 alloc_id;
  102. if (le32_to_cpu(tlv->length) != sizeof(*alloc))
  103. return -EINVAL;
  104. buf_location = le32_to_cpu(alloc->buf_location);
  105. alloc_id = le32_to_cpu(alloc->alloc_id);
  106. if (buf_location == IWL_FW_INI_LOCATION_INVALID ||
  107. buf_location >= IWL_FW_INI_LOCATION_NUM)
  108. goto err;
  109. if (alloc_id == IWL_FW_INI_ALLOCATION_INVALID ||
  110. alloc_id >= IWL_FW_INI_ALLOCATION_NUM)
  111. goto err;
  112. if (buf_location == IWL_FW_INI_LOCATION_NPK_PATH &&
  113. alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1)
  114. goto err;
  115. if (buf_location == IWL_FW_INI_LOCATION_SRAM_PATH &&
  116. alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1)
  117. goto err;
  118. if (buf_location == IWL_FW_INI_LOCATION_DRAM_PATH &&
  119. alloc->req_size == 0) {
  120. IWL_ERR(trans, "WRT: Invalid DRAM buffer allocation requested size (0)\n");
  121. return -EINVAL;
  122. }
  123. trans->dbg.fw_mon_cfg[alloc_id] = *alloc;
  124. return 0;
  125. err:
  126. IWL_ERR(trans,
  127. "WRT: Invalid allocation id %u and/or location id %u for allocation TLV\n",
  128. alloc_id, buf_location);
  129. return -EINVAL;
  130. }
  131. static int iwl_dbg_tlv_alloc_hcmd(struct iwl_trans *trans,
  132. const struct iwl_ucode_tlv *tlv)
  133. {
  134. const struct iwl_fw_ini_hcmd_tlv *hcmd = (const void *)tlv->data;
  135. u32 tp = le32_to_cpu(hcmd->time_point);
  136. if (le32_to_cpu(tlv->length) <= sizeof(*hcmd))
  137. return -EINVAL;
  138. /* Host commands can not be sent in early time point since the FW
  139. * is not ready
  140. */
  141. if (tp == IWL_FW_INI_TIME_POINT_INVALID ||
  142. tp >= IWL_FW_INI_TIME_POINT_NUM ||
  143. tp == IWL_FW_INI_TIME_POINT_EARLY) {
  144. IWL_ERR(trans,
  145. "WRT: Invalid time point %u for host command TLV\n",
  146. tp);
  147. return -EINVAL;
  148. }
  149. return iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].hcmd_list);
  150. }
  151. static int iwl_dbg_tlv_alloc_region(struct iwl_trans *trans,
  152. const struct iwl_ucode_tlv *tlv)
  153. {
  154. const struct iwl_fw_ini_region_tlv *reg = (const void *)tlv->data;
  155. struct iwl_ucode_tlv **active_reg;
  156. u32 id = le32_to_cpu(reg->id);
  157. u8 type = reg->type;
  158. u32 tlv_len = sizeof(*tlv) + le32_to_cpu(tlv->length);
  159. /*
  160. * The higher part of the ID from version 2 is debug policy.
  161. * The id will be only lsb 16 bits, so mask it out.
  162. */
  163. if (le32_to_cpu(reg->hdr.version) >= 2)
  164. id &= IWL_FW_INI_REGION_ID_MASK;
  165. if (le32_to_cpu(tlv->length) < sizeof(*reg))
  166. return -EINVAL;
  167. /* for safe use of a string from FW, limit it to IWL_FW_INI_MAX_NAME */
  168. IWL_DEBUG_FW(trans, "WRT: parsing region: %.*s\n",
  169. IWL_FW_INI_MAX_NAME, reg->name);
  170. if (id >= IWL_FW_INI_MAX_REGION_ID) {
  171. IWL_ERR(trans, "WRT: Invalid region id %u\n", id);
  172. return -EINVAL;
  173. }
  174. if (type <= IWL_FW_INI_REGION_INVALID ||
  175. type >= IWL_FW_INI_REGION_NUM) {
  176. IWL_ERR(trans, "WRT: Invalid region type %u\n", type);
  177. return -EINVAL;
  178. }
  179. if (type == IWL_FW_INI_REGION_PCI_IOSF_CONFIG &&
  180. !trans->ops->read_config32) {
  181. IWL_ERR(trans, "WRT: Unsupported region type %u\n", type);
  182. return -EOPNOTSUPP;
  183. }
  184. if (type == IWL_FW_INI_REGION_INTERNAL_BUFFER) {
  185. trans->dbg.imr_data.sram_addr =
  186. le32_to_cpu(reg->internal_buffer.base_addr);
  187. trans->dbg.imr_data.sram_size =
  188. le32_to_cpu(reg->internal_buffer.size);
  189. }
  190. active_reg = &trans->dbg.active_regions[id];
  191. if (*active_reg) {
  192. IWL_WARN(trans, "WRT: Overriding region id %u\n", id);
  193. kfree(*active_reg);
  194. }
  195. *active_reg = kmemdup(tlv, tlv_len, GFP_KERNEL);
  196. if (!*active_reg)
  197. return -ENOMEM;
  198. IWL_DEBUG_FW(trans, "WRT: Enabling region id %u type %u\n", id, type);
  199. return 0;
  200. }
  201. static int iwl_dbg_tlv_alloc_trigger(struct iwl_trans *trans,
  202. const struct iwl_ucode_tlv *tlv)
  203. {
  204. const struct iwl_fw_ini_trigger_tlv *trig = (const void *)tlv->data;
  205. struct iwl_fw_ini_trigger_tlv *dup_trig;
  206. u32 tp = le32_to_cpu(trig->time_point);
  207. u32 rf = le32_to_cpu(trig->reset_fw);
  208. struct iwl_ucode_tlv *dup = NULL;
  209. int ret;
  210. if (le32_to_cpu(tlv->length) < sizeof(*trig))
  211. return -EINVAL;
  212. if (tp <= IWL_FW_INI_TIME_POINT_INVALID ||
  213. tp >= IWL_FW_INI_TIME_POINT_NUM) {
  214. IWL_ERR(trans,
  215. "WRT: Invalid time point %u for trigger TLV\n",
  216. tp);
  217. return -EINVAL;
  218. }
  219. IWL_DEBUG_FW(trans,
  220. "WRT: time point %u for trigger TLV with reset_fw %u\n",
  221. tp, rf);
  222. trans->dbg.last_tp_resetfw = 0xFF;
  223. if (!le32_to_cpu(trig->occurrences)) {
  224. dup = kmemdup(tlv, sizeof(*tlv) + le32_to_cpu(tlv->length),
  225. GFP_KERNEL);
  226. if (!dup)
  227. return -ENOMEM;
  228. dup_trig = (void *)dup->data;
  229. dup_trig->occurrences = cpu_to_le32(-1);
  230. tlv = dup;
  231. }
  232. ret = iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].trig_list);
  233. kfree(dup);
  234. return ret;
  235. }
  236. static int iwl_dbg_tlv_config_set(struct iwl_trans *trans,
  237. const struct iwl_ucode_tlv *tlv)
  238. {
  239. const struct iwl_fw_ini_conf_set_tlv *conf_set = (const void *)tlv->data;
  240. u32 tp = le32_to_cpu(conf_set->time_point);
  241. u32 type = le32_to_cpu(conf_set->set_type);
  242. if (tp <= IWL_FW_INI_TIME_POINT_INVALID ||
  243. tp >= IWL_FW_INI_TIME_POINT_NUM) {
  244. IWL_DEBUG_FW(trans,
  245. "WRT: Invalid time point %u for config set TLV\n", tp);
  246. return -EINVAL;
  247. }
  248. if (type <= IWL_FW_INI_CONFIG_SET_TYPE_INVALID ||
  249. type >= IWL_FW_INI_CONFIG_SET_TYPE_MAX_NUM) {
  250. IWL_DEBUG_FW(trans,
  251. "WRT: Invalid config set type %u for config set TLV\n", type);
  252. return -EINVAL;
  253. }
  254. return iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].config_list);
  255. }
  256. static int (*dbg_tlv_alloc[])(struct iwl_trans *trans,
  257. const struct iwl_ucode_tlv *tlv) = {
  258. [IWL_DBG_TLV_TYPE_DEBUG_INFO] = iwl_dbg_tlv_alloc_debug_info,
  259. [IWL_DBG_TLV_TYPE_BUF_ALLOC] = iwl_dbg_tlv_alloc_buf_alloc,
  260. [IWL_DBG_TLV_TYPE_HCMD] = iwl_dbg_tlv_alloc_hcmd,
  261. [IWL_DBG_TLV_TYPE_REGION] = iwl_dbg_tlv_alloc_region,
  262. [IWL_DBG_TLV_TYPE_TRIGGER] = iwl_dbg_tlv_alloc_trigger,
  263. [IWL_DBG_TLV_TYPE_CONF_SET] = iwl_dbg_tlv_config_set,
  264. };
  265. void iwl_dbg_tlv_alloc(struct iwl_trans *trans, const struct iwl_ucode_tlv *tlv,
  266. bool ext)
  267. {
  268. enum iwl_ini_cfg_state *cfg_state = ext ?
  269. &trans->dbg.external_ini_cfg : &trans->dbg.internal_ini_cfg;
  270. const struct iwl_fw_ini_header *hdr = (const void *)&tlv->data[0];
  271. u32 type;
  272. u32 tlv_idx;
  273. u32 domain;
  274. int ret;
  275. if (le32_to_cpu(tlv->length) < sizeof(*hdr))
  276. return;
  277. type = le32_to_cpu(tlv->type);
  278. tlv_idx = type - IWL_UCODE_TLV_DEBUG_BASE;
  279. domain = le32_to_cpu(hdr->domain);
  280. if (domain != IWL_FW_INI_DOMAIN_ALWAYS_ON &&
  281. !(domain & trans->dbg.domains_bitmap)) {
  282. IWL_DEBUG_FW(trans,
  283. "WRT: Skipping TLV with disabled domain 0x%0x (0x%0x)\n",
  284. domain, trans->dbg.domains_bitmap);
  285. return;
  286. }
  287. if (tlv_idx >= ARRAY_SIZE(dbg_tlv_alloc) || !dbg_tlv_alloc[tlv_idx]) {
  288. IWL_ERR(trans, "WRT: Unsupported TLV type 0x%x\n", type);
  289. goto out_err;
  290. }
  291. if (!iwl_dbg_tlv_ver_support(tlv)) {
  292. IWL_ERR(trans, "WRT: Unsupported TLV 0x%x version %u\n", type,
  293. le32_to_cpu(hdr->version));
  294. goto out_err;
  295. }
  296. ret = dbg_tlv_alloc[tlv_idx](trans, tlv);
  297. if (ret) {
  298. IWL_ERR(trans,
  299. "WRT: Failed to allocate TLV 0x%x, ret %d, (ext=%d)\n",
  300. type, ret, ext);
  301. goto out_err;
  302. }
  303. if (*cfg_state == IWL_INI_CFG_STATE_NOT_LOADED)
  304. *cfg_state = IWL_INI_CFG_STATE_LOADED;
  305. return;
  306. out_err:
  307. *cfg_state = IWL_INI_CFG_STATE_CORRUPTED;
  308. }
  309. void iwl_dbg_tlv_del_timers(struct iwl_trans *trans)
  310. {
  311. struct list_head *timer_list = &trans->dbg.periodic_trig_list;
  312. struct iwl_dbg_tlv_timer_node *node, *tmp;
  313. list_for_each_entry_safe(node, tmp, timer_list, list) {
  314. del_timer_sync(&node->timer);
  315. list_del(&node->list);
  316. kfree(node);
  317. }
  318. }
  319. IWL_EXPORT_SYMBOL(iwl_dbg_tlv_del_timers);
  320. static void iwl_dbg_tlv_fragments_free(struct iwl_trans *trans,
  321. enum iwl_fw_ini_allocation_id alloc_id)
  322. {
  323. struct iwl_fw_mon *fw_mon;
  324. int i;
  325. if (alloc_id <= IWL_FW_INI_ALLOCATION_INVALID ||
  326. alloc_id >= IWL_FW_INI_ALLOCATION_NUM)
  327. return;
  328. fw_mon = &trans->dbg.fw_mon_ini[alloc_id];
  329. for (i = 0; i < fw_mon->num_frags; i++) {
  330. struct iwl_dram_data *frag = &fw_mon->frags[i];
  331. dma_free_coherent(trans->dev, frag->size, frag->block,
  332. frag->physical);
  333. frag->physical = 0;
  334. frag->block = NULL;
  335. frag->size = 0;
  336. }
  337. kfree(fw_mon->frags);
  338. fw_mon->frags = NULL;
  339. fw_mon->num_frags = 0;
  340. }
  341. void iwl_dbg_tlv_free(struct iwl_trans *trans)
  342. {
  343. struct iwl_dbg_tlv_node *tlv_node, *tlv_node_tmp;
  344. int i;
  345. iwl_dbg_tlv_del_timers(trans);
  346. for (i = 0; i < ARRAY_SIZE(trans->dbg.active_regions); i++) {
  347. struct iwl_ucode_tlv **active_reg =
  348. &trans->dbg.active_regions[i];
  349. kfree(*active_reg);
  350. *active_reg = NULL;
  351. }
  352. list_for_each_entry_safe(tlv_node, tlv_node_tmp,
  353. &trans->dbg.debug_info_tlv_list, list) {
  354. list_del(&tlv_node->list);
  355. kfree(tlv_node);
  356. }
  357. for (i = 0; i < ARRAY_SIZE(trans->dbg.time_point); i++) {
  358. struct iwl_dbg_tlv_time_point_data *tp =
  359. &trans->dbg.time_point[i];
  360. list_for_each_entry_safe(tlv_node, tlv_node_tmp, &tp->trig_list,
  361. list) {
  362. list_del(&tlv_node->list);
  363. kfree(tlv_node);
  364. }
  365. list_for_each_entry_safe(tlv_node, tlv_node_tmp, &tp->hcmd_list,
  366. list) {
  367. list_del(&tlv_node->list);
  368. kfree(tlv_node);
  369. }
  370. list_for_each_entry_safe(tlv_node, tlv_node_tmp,
  371. &tp->active_trig_list, list) {
  372. list_del(&tlv_node->list);
  373. kfree(tlv_node);
  374. }
  375. list_for_each_entry_safe(tlv_node, tlv_node_tmp,
  376. &tp->config_list, list) {
  377. list_del(&tlv_node->list);
  378. kfree(tlv_node);
  379. }
  380. }
  381. for (i = 0; i < ARRAY_SIZE(trans->dbg.fw_mon_ini); i++)
  382. iwl_dbg_tlv_fragments_free(trans, i);
  383. }
  384. static int iwl_dbg_tlv_parse_bin(struct iwl_trans *trans, const u8 *data,
  385. size_t len)
  386. {
  387. const struct iwl_ucode_tlv *tlv;
  388. u32 tlv_len;
  389. while (len >= sizeof(*tlv)) {
  390. len -= sizeof(*tlv);
  391. tlv = (const void *)data;
  392. tlv_len = le32_to_cpu(tlv->length);
  393. if (len < tlv_len) {
  394. IWL_ERR(trans, "invalid TLV len: %zd/%u\n",
  395. len, tlv_len);
  396. return -EINVAL;
  397. }
  398. len -= ALIGN(tlv_len, 4);
  399. data += sizeof(*tlv) + ALIGN(tlv_len, 4);
  400. iwl_dbg_tlv_alloc(trans, tlv, true);
  401. }
  402. return 0;
  403. }
  404. void iwl_dbg_tlv_load_bin(struct device *dev, struct iwl_trans *trans)
  405. {
  406. const struct firmware *fw;
  407. const char *yoyo_bin = "iwl-debug-yoyo.bin";
  408. int res;
  409. if (!iwlwifi_mod_params.enable_ini ||
  410. trans->trans_cfg->device_family <= IWL_DEVICE_FAMILY_8000)
  411. return;
  412. res = firmware_request_nowarn(&fw, yoyo_bin, dev);
  413. IWL_DEBUG_FW(trans, "%s %s\n", res ? "didn't load" : "loaded", yoyo_bin);
  414. if (res)
  415. return;
  416. iwl_dbg_tlv_parse_bin(trans, fw->data, fw->size);
  417. release_firmware(fw);
  418. }
  419. void iwl_dbg_tlv_init(struct iwl_trans *trans)
  420. {
  421. int i;
  422. INIT_LIST_HEAD(&trans->dbg.debug_info_tlv_list);
  423. INIT_LIST_HEAD(&trans->dbg.periodic_trig_list);
  424. for (i = 0; i < ARRAY_SIZE(trans->dbg.time_point); i++) {
  425. struct iwl_dbg_tlv_time_point_data *tp =
  426. &trans->dbg.time_point[i];
  427. INIT_LIST_HEAD(&tp->trig_list);
  428. INIT_LIST_HEAD(&tp->hcmd_list);
  429. INIT_LIST_HEAD(&tp->active_trig_list);
  430. INIT_LIST_HEAD(&tp->config_list);
  431. }
  432. }
  433. static int iwl_dbg_tlv_alloc_fragment(struct iwl_fw_runtime *fwrt,
  434. struct iwl_dram_data *frag, u32 pages)
  435. {
  436. void *block = NULL;
  437. dma_addr_t physical;
  438. if (!frag || frag->size || !pages)
  439. return -EIO;
  440. /*
  441. * We try to allocate as many pages as we can, starting with
  442. * the requested amount and going down until we can allocate
  443. * something. Because of DIV_ROUND_UP(), pages will never go
  444. * down to 0 and stop the loop, so stop when pages reaches 1,
  445. * which is too small anyway.
  446. */
  447. while (pages > 1) {
  448. block = dma_alloc_coherent(fwrt->dev, pages * PAGE_SIZE,
  449. &physical,
  450. GFP_KERNEL | __GFP_NOWARN);
  451. if (block)
  452. break;
  453. IWL_WARN(fwrt, "WRT: Failed to allocate fragment size %lu\n",
  454. pages * PAGE_SIZE);
  455. pages = DIV_ROUND_UP(pages, 2);
  456. }
  457. if (!block)
  458. return -ENOMEM;
  459. frag->physical = physical;
  460. frag->block = block;
  461. frag->size = pages * PAGE_SIZE;
  462. return pages;
  463. }
  464. static int iwl_dbg_tlv_alloc_fragments(struct iwl_fw_runtime *fwrt,
  465. enum iwl_fw_ini_allocation_id alloc_id)
  466. {
  467. struct iwl_fw_mon *fw_mon;
  468. struct iwl_fw_ini_allocation_tlv *fw_mon_cfg;
  469. u32 num_frags, remain_pages, frag_pages;
  470. int i;
  471. if (alloc_id < IWL_FW_INI_ALLOCATION_INVALID ||
  472. alloc_id >= IWL_FW_INI_ALLOCATION_NUM)
  473. return -EIO;
  474. fw_mon_cfg = &fwrt->trans->dbg.fw_mon_cfg[alloc_id];
  475. fw_mon = &fwrt->trans->dbg.fw_mon_ini[alloc_id];
  476. if (fw_mon->num_frags ||
  477. fw_mon_cfg->buf_location !=
  478. cpu_to_le32(IWL_FW_INI_LOCATION_DRAM_PATH))
  479. return 0;
  480. num_frags = le32_to_cpu(fw_mon_cfg->max_frags_num);
  481. if (fwrt->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) {
  482. if (alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1)
  483. return -EIO;
  484. num_frags = 1;
  485. }
  486. remain_pages = DIV_ROUND_UP(le32_to_cpu(fw_mon_cfg->req_size),
  487. PAGE_SIZE);
  488. num_frags = min_t(u32, num_frags, BUF_ALLOC_MAX_NUM_FRAGS);
  489. num_frags = min_t(u32, num_frags, remain_pages);
  490. frag_pages = DIV_ROUND_UP(remain_pages, num_frags);
  491. fw_mon->frags = kcalloc(num_frags, sizeof(*fw_mon->frags), GFP_KERNEL);
  492. if (!fw_mon->frags)
  493. return -ENOMEM;
  494. for (i = 0; i < num_frags; i++) {
  495. int pages = min_t(u32, frag_pages, remain_pages);
  496. IWL_DEBUG_FW(fwrt,
  497. "WRT: Allocating DRAM buffer (alloc_id=%u, fragment=%u, size=0x%lx)\n",
  498. alloc_id, i, pages * PAGE_SIZE);
  499. pages = iwl_dbg_tlv_alloc_fragment(fwrt, &fw_mon->frags[i],
  500. pages);
  501. if (pages < 0) {
  502. u32 alloc_size = le32_to_cpu(fw_mon_cfg->req_size) -
  503. (remain_pages * PAGE_SIZE);
  504. if (alloc_size < le32_to_cpu(fw_mon_cfg->min_size)) {
  505. iwl_dbg_tlv_fragments_free(fwrt->trans,
  506. alloc_id);
  507. return pages;
  508. }
  509. break;
  510. }
  511. remain_pages -= pages;
  512. fw_mon->num_frags++;
  513. }
  514. return 0;
  515. }
  516. static int iwl_dbg_tlv_apply_buffer(struct iwl_fw_runtime *fwrt,
  517. enum iwl_fw_ini_allocation_id alloc_id)
  518. {
  519. struct iwl_fw_mon *fw_mon;
  520. u32 remain_frags, num_commands;
  521. int i, fw_mon_idx = 0;
  522. if (!fw_has_capa(&fwrt->fw->ucode_capa,
  523. IWL_UCODE_TLV_CAPA_DBG_BUF_ALLOC_CMD_SUPP))
  524. return 0;
  525. if (alloc_id < IWL_FW_INI_ALLOCATION_INVALID ||
  526. alloc_id >= IWL_FW_INI_ALLOCATION_NUM)
  527. return -EIO;
  528. if (le32_to_cpu(fwrt->trans->dbg.fw_mon_cfg[alloc_id].buf_location) !=
  529. IWL_FW_INI_LOCATION_DRAM_PATH)
  530. return 0;
  531. fw_mon = &fwrt->trans->dbg.fw_mon_ini[alloc_id];
  532. /* the first fragment of DBGC1 is given to the FW via register
  533. * or context info
  534. */
  535. if (alloc_id == IWL_FW_INI_ALLOCATION_ID_DBGC1)
  536. fw_mon_idx++;
  537. remain_frags = fw_mon->num_frags - fw_mon_idx;
  538. if (!remain_frags)
  539. return 0;
  540. num_commands = DIV_ROUND_UP(remain_frags, BUF_ALLOC_MAX_NUM_FRAGS);
  541. IWL_DEBUG_FW(fwrt, "WRT: Applying DRAM destination (alloc_id=%u)\n",
  542. alloc_id);
  543. for (i = 0; i < num_commands; i++) {
  544. u32 num_frags = min_t(u32, remain_frags,
  545. BUF_ALLOC_MAX_NUM_FRAGS);
  546. struct iwl_buf_alloc_cmd data = {
  547. .alloc_id = cpu_to_le32(alloc_id),
  548. .num_frags = cpu_to_le32(num_frags),
  549. .buf_location =
  550. cpu_to_le32(IWL_FW_INI_LOCATION_DRAM_PATH),
  551. };
  552. struct iwl_host_cmd hcmd = {
  553. .id = WIDE_ID(DEBUG_GROUP, BUFFER_ALLOCATION),
  554. .data[0] = &data,
  555. .len[0] = sizeof(data),
  556. .flags = CMD_SEND_IN_RFKILL,
  557. };
  558. int ret, j;
  559. for (j = 0; j < num_frags; j++) {
  560. struct iwl_buf_alloc_frag *frag = &data.frags[j];
  561. struct iwl_dram_data *fw_mon_frag =
  562. &fw_mon->frags[fw_mon_idx++];
  563. frag->addr = cpu_to_le64(fw_mon_frag->physical);
  564. frag->size = cpu_to_le32(fw_mon_frag->size);
  565. }
  566. ret = iwl_trans_send_cmd(fwrt->trans, &hcmd);
  567. if (ret)
  568. return ret;
  569. remain_frags -= num_frags;
  570. }
  571. return 0;
  572. }
  573. static void iwl_dbg_tlv_apply_buffers(struct iwl_fw_runtime *fwrt)
  574. {
  575. int ret, i;
  576. if (fw_has_capa(&fwrt->fw->ucode_capa,
  577. IWL_UCODE_TLV_CAPA_DRAM_FRAG_SUPPORT))
  578. return;
  579. for (i = 0; i < IWL_FW_INI_ALLOCATION_NUM; i++) {
  580. ret = iwl_dbg_tlv_apply_buffer(fwrt, i);
  581. if (ret)
  582. IWL_WARN(fwrt,
  583. "WRT: Failed to apply DRAM buffer for allocation id %d, ret=%d\n",
  584. i, ret);
  585. }
  586. }
  587. static int iwl_dbg_tlv_update_dram(struct iwl_fw_runtime *fwrt,
  588. enum iwl_fw_ini_allocation_id alloc_id,
  589. struct iwl_dram_info *dram_info)
  590. {
  591. struct iwl_fw_mon *fw_mon;
  592. u32 remain_frags, num_frags;
  593. int j, fw_mon_idx = 0;
  594. struct iwl_buf_alloc_cmd *data;
  595. if (le32_to_cpu(fwrt->trans->dbg.fw_mon_cfg[alloc_id].buf_location) !=
  596. IWL_FW_INI_LOCATION_DRAM_PATH) {
  597. IWL_DEBUG_FW(fwrt, "DRAM_PATH is not supported alloc_id %u\n", alloc_id);
  598. return -1;
  599. }
  600. fw_mon = &fwrt->trans->dbg.fw_mon_ini[alloc_id];
  601. /* the first fragment of DBGC1 is given to the FW via register
  602. * or context info
  603. */
  604. if (alloc_id == IWL_FW_INI_ALLOCATION_ID_DBGC1)
  605. fw_mon_idx++;
  606. remain_frags = fw_mon->num_frags - fw_mon_idx;
  607. if (!remain_frags)
  608. return -1;
  609. num_frags = min_t(u32, remain_frags, BUF_ALLOC_MAX_NUM_FRAGS);
  610. data = &dram_info->dram_frags[alloc_id - 1];
  611. data->alloc_id = cpu_to_le32(alloc_id);
  612. data->num_frags = cpu_to_le32(num_frags);
  613. data->buf_location = cpu_to_le32(IWL_FW_INI_LOCATION_DRAM_PATH);
  614. IWL_DEBUG_FW(fwrt, "WRT: DRAM buffer details alloc_id=%u, num_frags=%u\n",
  615. cpu_to_le32(alloc_id), cpu_to_le32(num_frags));
  616. for (j = 0; j < num_frags; j++) {
  617. struct iwl_buf_alloc_frag *frag = &data->frags[j];
  618. struct iwl_dram_data *fw_mon_frag = &fw_mon->frags[fw_mon_idx++];
  619. frag->addr = cpu_to_le64(fw_mon_frag->physical);
  620. frag->size = cpu_to_le32(fw_mon_frag->size);
  621. IWL_DEBUG_FW(fwrt, "WRT: DRAM fragment details\n");
  622. IWL_DEBUG_FW(fwrt, "frag=%u, addr=0x%016llx, size=0x%x)\n",
  623. j, cpu_to_le64(fw_mon_frag->physical),
  624. cpu_to_le32(fw_mon_frag->size));
  625. }
  626. return 0;
  627. }
  628. static void iwl_dbg_tlv_update_drams(struct iwl_fw_runtime *fwrt)
  629. {
  630. int ret, i;
  631. bool dram_alloc = false;
  632. struct iwl_dram_data *frags =
  633. &fwrt->trans->dbg.fw_mon_ini[IWL_FW_INI_ALLOCATION_ID_DBGC1].frags[0];
  634. struct iwl_dram_info *dram_info;
  635. if (!frags || !frags->block)
  636. return;
  637. dram_info = frags->block;
  638. if (!fw_has_capa(&fwrt->fw->ucode_capa,
  639. IWL_UCODE_TLV_CAPA_DRAM_FRAG_SUPPORT))
  640. return;
  641. dram_info->first_word = cpu_to_le32(DRAM_INFO_FIRST_MAGIC_WORD);
  642. dram_info->second_word = cpu_to_le32(DRAM_INFO_SECOND_MAGIC_WORD);
  643. for (i = IWL_FW_INI_ALLOCATION_ID_DBGC1;
  644. i <= IWL_FW_INI_ALLOCATION_ID_DBGC3; i++) {
  645. ret = iwl_dbg_tlv_update_dram(fwrt, i, dram_info);
  646. if (!ret)
  647. dram_alloc = true;
  648. else
  649. IWL_WARN(fwrt,
  650. "WRT: Failed to set DRAM buffer for alloc id %d, ret=%d\n",
  651. i, ret);
  652. }
  653. if (dram_alloc)
  654. IWL_DEBUG_FW(fwrt, "block data after %08x\n",
  655. dram_info->first_word);
  656. else
  657. memset(frags->block, 0, sizeof(*dram_info));
  658. }
  659. static void iwl_dbg_tlv_send_hcmds(struct iwl_fw_runtime *fwrt,
  660. struct list_head *hcmd_list)
  661. {
  662. struct iwl_dbg_tlv_node *node;
  663. list_for_each_entry(node, hcmd_list, list) {
  664. struct iwl_fw_ini_hcmd_tlv *hcmd = (void *)node->tlv.data;
  665. struct iwl_fw_ini_hcmd *hcmd_data = &hcmd->hcmd;
  666. u16 hcmd_len = le32_to_cpu(node->tlv.length) - sizeof(*hcmd);
  667. struct iwl_host_cmd cmd = {
  668. .id = WIDE_ID(hcmd_data->group, hcmd_data->id),
  669. .len = { hcmd_len, },
  670. .data = { hcmd_data->data, },
  671. };
  672. iwl_trans_send_cmd(fwrt->trans, &cmd);
  673. }
  674. }
  675. static void iwl_dbg_tlv_apply_config(struct iwl_fw_runtime *fwrt,
  676. struct list_head *conf_list)
  677. {
  678. struct iwl_dbg_tlv_node *node;
  679. list_for_each_entry(node, conf_list, list) {
  680. struct iwl_fw_ini_conf_set_tlv *config_list = (void *)node->tlv.data;
  681. u32 count, address, value;
  682. u32 len = (le32_to_cpu(node->tlv.length) - sizeof(*config_list)) / 8;
  683. u32 type = le32_to_cpu(config_list->set_type);
  684. u32 offset = le32_to_cpu(config_list->addr_offset);
  685. switch (type) {
  686. case IWL_FW_INI_CONFIG_SET_TYPE_DEVICE_PERIPHERY_MAC: {
  687. if (!iwl_trans_grab_nic_access(fwrt->trans)) {
  688. IWL_DEBUG_FW(fwrt, "WRT: failed to get nic access\n");
  689. IWL_DEBUG_FW(fwrt, "WRT: skipping MAC PERIPHERY config\n");
  690. continue;
  691. }
  692. IWL_DEBUG_FW(fwrt, "WRT: MAC PERIPHERY config len: len %u\n", len);
  693. for (count = 0; count < len; count++) {
  694. address = le32_to_cpu(config_list->addr_val[count].address);
  695. value = le32_to_cpu(config_list->addr_val[count].value);
  696. iwl_trans_write_prph(fwrt->trans, address + offset, value);
  697. }
  698. iwl_trans_release_nic_access(fwrt->trans);
  699. break;
  700. }
  701. case IWL_FW_INI_CONFIG_SET_TYPE_DEVICE_MEMORY: {
  702. for (count = 0; count < len; count++) {
  703. address = le32_to_cpu(config_list->addr_val[count].address);
  704. value = le32_to_cpu(config_list->addr_val[count].value);
  705. iwl_trans_write_mem32(fwrt->trans, address + offset, value);
  706. IWL_DEBUG_FW(fwrt, "WRT: DEV_MEM: count %u, add: %u val: %u\n",
  707. count, address, value);
  708. }
  709. break;
  710. }
  711. case IWL_FW_INI_CONFIG_SET_TYPE_CSR: {
  712. for (count = 0; count < len; count++) {
  713. address = le32_to_cpu(config_list->addr_val[count].address);
  714. value = le32_to_cpu(config_list->addr_val[count].value);
  715. iwl_write32(fwrt->trans, address + offset, value);
  716. IWL_DEBUG_FW(fwrt, "WRT: CSR: count %u, add: %u val: %u\n",
  717. count, address, value);
  718. }
  719. break;
  720. }
  721. case IWL_FW_INI_CONFIG_SET_TYPE_DBGC_DRAM_ADDR: {
  722. struct iwl_dbgc1_info dram_info = {};
  723. struct iwl_dram_data *frags = &fwrt->trans->dbg.fw_mon_ini[1].frags[0];
  724. __le64 dram_base_addr;
  725. __le32 dram_size;
  726. u64 dram_addr;
  727. u32 ret;
  728. if (!frags)
  729. break;
  730. dram_base_addr = cpu_to_le64(frags->physical);
  731. dram_size = cpu_to_le32(frags->size);
  732. dram_addr = le64_to_cpu(dram_base_addr);
  733. IWL_DEBUG_FW(fwrt, "WRT: dram_base_addr 0x%016llx, dram_size 0x%x\n",
  734. dram_base_addr, dram_size);
  735. IWL_DEBUG_FW(fwrt, "WRT: config_list->addr_offset: %u\n",
  736. le32_to_cpu(config_list->addr_offset));
  737. for (count = 0; count < len; count++) {
  738. address = le32_to_cpu(config_list->addr_val[count].address);
  739. dram_info.dbgc1_add_lsb =
  740. cpu_to_le32((dram_addr & 0x00000000FFFFFFFFULL) + 0x400);
  741. dram_info.dbgc1_add_msb =
  742. cpu_to_le32((dram_addr & 0xFFFFFFFF00000000ULL) >> 32);
  743. dram_info.dbgc1_size = cpu_to_le32(le32_to_cpu(dram_size) - 0x400);
  744. ret = iwl_trans_write_mem(fwrt->trans,
  745. address + offset, &dram_info, 4);
  746. if (ret) {
  747. IWL_ERR(fwrt, "Failed to write dram_info to HW_SMEM\n");
  748. break;
  749. }
  750. }
  751. break;
  752. }
  753. case IWL_FW_INI_CONFIG_SET_TYPE_PERIPH_SCRATCH_HWM: {
  754. u32 debug_token_config =
  755. le32_to_cpu(config_list->addr_val[0].value);
  756. IWL_DEBUG_FW(fwrt, "WRT: Setting HWM debug token config: %u\n",
  757. debug_token_config);
  758. fwrt->trans->dbg.ucode_preset = debug_token_config;
  759. break;
  760. }
  761. default:
  762. break;
  763. }
  764. }
  765. }
  766. static void iwl_dbg_tlv_periodic_trig_handler(struct timer_list *t)
  767. {
  768. struct iwl_dbg_tlv_timer_node *timer_node =
  769. from_timer(timer_node, t, timer);
  770. struct iwl_fwrt_dump_data dump_data = {
  771. .trig = (void *)timer_node->tlv->data,
  772. };
  773. int ret;
  774. ret = iwl_fw_dbg_ini_collect(timer_node->fwrt, &dump_data, false);
  775. if (!ret || ret == -EBUSY) {
  776. u32 occur = le32_to_cpu(dump_data.trig->occurrences);
  777. u32 collect_interval = le32_to_cpu(dump_data.trig->data[0]);
  778. if (!occur)
  779. return;
  780. mod_timer(t, jiffies + msecs_to_jiffies(collect_interval));
  781. }
  782. }
  783. static void iwl_dbg_tlv_set_periodic_trigs(struct iwl_fw_runtime *fwrt)
  784. {
  785. struct iwl_dbg_tlv_node *node;
  786. struct list_head *trig_list =
  787. &fwrt->trans->dbg.time_point[IWL_FW_INI_TIME_POINT_PERIODIC].active_trig_list;
  788. list_for_each_entry(node, trig_list, list) {
  789. struct iwl_fw_ini_trigger_tlv *trig = (void *)node->tlv.data;
  790. struct iwl_dbg_tlv_timer_node *timer_node;
  791. u32 occur = le32_to_cpu(trig->occurrences), collect_interval;
  792. u32 min_interval = 100;
  793. if (!occur)
  794. continue;
  795. /* make sure there is at least one dword of data for the
  796. * interval value
  797. */
  798. if (le32_to_cpu(node->tlv.length) <
  799. sizeof(*trig) + sizeof(__le32)) {
  800. IWL_ERR(fwrt,
  801. "WRT: Invalid periodic trigger data was not given\n");
  802. continue;
  803. }
  804. if (le32_to_cpu(trig->data[0]) < min_interval) {
  805. IWL_WARN(fwrt,
  806. "WRT: Override min interval from %u to %u msec\n",
  807. le32_to_cpu(trig->data[0]), min_interval);
  808. trig->data[0] = cpu_to_le32(min_interval);
  809. }
  810. collect_interval = le32_to_cpu(trig->data[0]);
  811. timer_node = kzalloc(sizeof(*timer_node), GFP_KERNEL);
  812. if (!timer_node) {
  813. IWL_ERR(fwrt,
  814. "WRT: Failed to allocate periodic trigger\n");
  815. continue;
  816. }
  817. timer_node->fwrt = fwrt;
  818. timer_node->tlv = &node->tlv;
  819. timer_setup(&timer_node->timer,
  820. iwl_dbg_tlv_periodic_trig_handler, 0);
  821. list_add_tail(&timer_node->list,
  822. &fwrt->trans->dbg.periodic_trig_list);
  823. IWL_DEBUG_FW(fwrt, "WRT: Enabling periodic trigger\n");
  824. mod_timer(&timer_node->timer,
  825. jiffies + msecs_to_jiffies(collect_interval));
  826. }
  827. }
  828. static bool is_trig_data_contained(const struct iwl_ucode_tlv *new,
  829. const struct iwl_ucode_tlv *old)
  830. {
  831. const struct iwl_fw_ini_trigger_tlv *new_trig = (const void *)new->data;
  832. const struct iwl_fw_ini_trigger_tlv *old_trig = (const void *)old->data;
  833. const __le32 *new_data = new_trig->data, *old_data = old_trig->data;
  834. u32 new_dwords_num = iwl_tlv_array_len(new, new_trig, data);
  835. u32 old_dwords_num = iwl_tlv_array_len(old, old_trig, data);
  836. int i, j;
  837. for (i = 0; i < new_dwords_num; i++) {
  838. bool match = false;
  839. for (j = 0; j < old_dwords_num; j++) {
  840. if (new_data[i] == old_data[j]) {
  841. match = true;
  842. break;
  843. }
  844. }
  845. if (!match)
  846. return false;
  847. }
  848. return true;
  849. }
  850. static int iwl_dbg_tlv_override_trig_node(struct iwl_fw_runtime *fwrt,
  851. struct iwl_ucode_tlv *trig_tlv,
  852. struct iwl_dbg_tlv_node *node)
  853. {
  854. struct iwl_ucode_tlv *node_tlv = &node->tlv;
  855. struct iwl_fw_ini_trigger_tlv *node_trig = (void *)node_tlv->data;
  856. struct iwl_fw_ini_trigger_tlv *trig = (void *)trig_tlv->data;
  857. u32 policy = le32_to_cpu(trig->apply_policy);
  858. u32 size = le32_to_cpu(trig_tlv->length);
  859. u32 trig_data_len = size - sizeof(*trig);
  860. u32 offset = 0;
  861. if (!(policy & IWL_FW_INI_APPLY_POLICY_OVERRIDE_DATA)) {
  862. u32 data_len = le32_to_cpu(node_tlv->length) -
  863. sizeof(*node_trig);
  864. IWL_DEBUG_FW(fwrt,
  865. "WRT: Appending trigger data (time point %u)\n",
  866. le32_to_cpu(trig->time_point));
  867. offset += data_len;
  868. size += data_len;
  869. } else {
  870. IWL_DEBUG_FW(fwrt,
  871. "WRT: Overriding trigger data (time point %u)\n",
  872. le32_to_cpu(trig->time_point));
  873. }
  874. if (size != le32_to_cpu(node_tlv->length)) {
  875. struct list_head *prev = node->list.prev;
  876. struct iwl_dbg_tlv_node *tmp;
  877. list_del(&node->list);
  878. tmp = krealloc(node, sizeof(*node) + size, GFP_KERNEL);
  879. if (!tmp) {
  880. IWL_WARN(fwrt,
  881. "WRT: No memory to override trigger (time point %u)\n",
  882. le32_to_cpu(trig->time_point));
  883. list_add(&node->list, prev);
  884. return -ENOMEM;
  885. }
  886. list_add(&tmp->list, prev);
  887. node_tlv = &tmp->tlv;
  888. node_trig = (void *)node_tlv->data;
  889. }
  890. memcpy(node_trig->data + offset, trig->data, trig_data_len);
  891. node_tlv->length = cpu_to_le32(size);
  892. if (policy & IWL_FW_INI_APPLY_POLICY_OVERRIDE_CFG) {
  893. IWL_DEBUG_FW(fwrt,
  894. "WRT: Overriding trigger configuration (time point %u)\n",
  895. le32_to_cpu(trig->time_point));
  896. /* the first 11 dwords are configuration related */
  897. memcpy(node_trig, trig, sizeof(__le32) * 11);
  898. }
  899. if (policy & IWL_FW_INI_APPLY_POLICY_OVERRIDE_REGIONS) {
  900. IWL_DEBUG_FW(fwrt,
  901. "WRT: Overriding trigger regions (time point %u)\n",
  902. le32_to_cpu(trig->time_point));
  903. node_trig->regions_mask = trig->regions_mask;
  904. } else {
  905. IWL_DEBUG_FW(fwrt,
  906. "WRT: Appending trigger regions (time point %u)\n",
  907. le32_to_cpu(trig->time_point));
  908. node_trig->regions_mask |= trig->regions_mask;
  909. }
  910. return 0;
  911. }
  912. static int
  913. iwl_dbg_tlv_add_active_trigger(struct iwl_fw_runtime *fwrt,
  914. struct list_head *trig_list,
  915. struct iwl_ucode_tlv *trig_tlv)
  916. {
  917. struct iwl_fw_ini_trigger_tlv *trig = (void *)trig_tlv->data;
  918. struct iwl_dbg_tlv_node *node, *match = NULL;
  919. u32 policy = le32_to_cpu(trig->apply_policy);
  920. list_for_each_entry(node, trig_list, list) {
  921. if (!(policy & IWL_FW_INI_APPLY_POLICY_MATCH_TIME_POINT))
  922. break;
  923. if (!(policy & IWL_FW_INI_APPLY_POLICY_MATCH_DATA) ||
  924. is_trig_data_contained(trig_tlv, &node->tlv)) {
  925. match = node;
  926. break;
  927. }
  928. }
  929. if (!match) {
  930. IWL_DEBUG_FW(fwrt, "WRT: Enabling trigger (time point %u)\n",
  931. le32_to_cpu(trig->time_point));
  932. return iwl_dbg_tlv_add(trig_tlv, trig_list);
  933. }
  934. return iwl_dbg_tlv_override_trig_node(fwrt, trig_tlv, match);
  935. }
  936. static void
  937. iwl_dbg_tlv_gen_active_trig_list(struct iwl_fw_runtime *fwrt,
  938. struct iwl_dbg_tlv_time_point_data *tp)
  939. {
  940. struct iwl_dbg_tlv_node *node;
  941. struct list_head *trig_list = &tp->trig_list;
  942. struct list_head *active_trig_list = &tp->active_trig_list;
  943. list_for_each_entry(node, trig_list, list) {
  944. struct iwl_ucode_tlv *tlv = &node->tlv;
  945. iwl_dbg_tlv_add_active_trigger(fwrt, active_trig_list, tlv);
  946. }
  947. }
  948. static bool iwl_dbg_tlv_check_fw_pkt(struct iwl_fw_runtime *fwrt,
  949. struct iwl_fwrt_dump_data *dump_data,
  950. union iwl_dbg_tlv_tp_data *tp_data,
  951. u32 trig_data)
  952. {
  953. struct iwl_rx_packet *pkt = tp_data->fw_pkt;
  954. struct iwl_cmd_header *wanted_hdr = (void *)&trig_data;
  955. if (pkt && (pkt->hdr.cmd == wanted_hdr->cmd &&
  956. pkt->hdr.group_id == wanted_hdr->group_id)) {
  957. struct iwl_rx_packet *fw_pkt =
  958. kmemdup(pkt,
  959. sizeof(*pkt) + iwl_rx_packet_payload_len(pkt),
  960. GFP_ATOMIC);
  961. if (!fw_pkt)
  962. return false;
  963. dump_data->fw_pkt = fw_pkt;
  964. return true;
  965. }
  966. return false;
  967. }
  968. static int
  969. iwl_dbg_tlv_tp_trigger(struct iwl_fw_runtime *fwrt, bool sync,
  970. struct list_head *active_trig_list,
  971. union iwl_dbg_tlv_tp_data *tp_data,
  972. bool (*data_check)(struct iwl_fw_runtime *fwrt,
  973. struct iwl_fwrt_dump_data *dump_data,
  974. union iwl_dbg_tlv_tp_data *tp_data,
  975. u32 trig_data))
  976. {
  977. struct iwl_dbg_tlv_node *node;
  978. list_for_each_entry(node, active_trig_list, list) {
  979. struct iwl_fwrt_dump_data dump_data = {
  980. .trig = (void *)node->tlv.data,
  981. };
  982. u32 num_data = iwl_tlv_array_len(&node->tlv, dump_data.trig,
  983. data);
  984. int ret, i;
  985. u32 tp = le32_to_cpu(dump_data.trig->time_point);
  986. if (!num_data) {
  987. ret = iwl_fw_dbg_ini_collect(fwrt, &dump_data, sync);
  988. if (ret)
  989. return ret;
  990. }
  991. for (i = 0; i < num_data; i++) {
  992. if (!data_check ||
  993. data_check(fwrt, &dump_data, tp_data,
  994. le32_to_cpu(dump_data.trig->data[i]))) {
  995. ret = iwl_fw_dbg_ini_collect(fwrt, &dump_data, sync);
  996. if (ret)
  997. return ret;
  998. break;
  999. }
  1000. }
  1001. fwrt->trans->dbg.restart_required = FALSE;
  1002. IWL_DEBUG_INFO(fwrt, "WRT: tp %d, reset_fw %d\n",
  1003. tp, dump_data.trig->reset_fw);
  1004. IWL_DEBUG_INFO(fwrt, "WRT: restart_required %d, last_tp_resetfw %d\n",
  1005. fwrt->trans->dbg.restart_required,
  1006. fwrt->trans->dbg.last_tp_resetfw);
  1007. if (fwrt->trans->trans_cfg->device_family ==
  1008. IWL_DEVICE_FAMILY_9000) {
  1009. fwrt->trans->dbg.restart_required = TRUE;
  1010. } else if (tp == IWL_FW_INI_TIME_POINT_FW_ASSERT &&
  1011. fwrt->trans->dbg.last_tp_resetfw ==
  1012. IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY) {
  1013. fwrt->trans->dbg.restart_required = FALSE;
  1014. fwrt->trans->dbg.last_tp_resetfw = 0xFF;
  1015. IWL_DEBUG_FW(fwrt, "WRT: FW_ASSERT due to reset_fw_mode-no restart\n");
  1016. } else if (le32_to_cpu(dump_data.trig->reset_fw) ==
  1017. IWL_FW_INI_RESET_FW_MODE_STOP_AND_RELOAD_FW) {
  1018. IWL_DEBUG_INFO(fwrt, "WRT: stop and reload firmware\n");
  1019. fwrt->trans->dbg.restart_required = TRUE;
  1020. } else if (le32_to_cpu(dump_data.trig->reset_fw) ==
  1021. IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY) {
  1022. IWL_DEBUG_INFO(fwrt, "WRT: stop only and no reload firmware\n");
  1023. fwrt->trans->dbg.restart_required = FALSE;
  1024. fwrt->trans->dbg.last_tp_resetfw =
  1025. le32_to_cpu(dump_data.trig->reset_fw);
  1026. } else if (le32_to_cpu(dump_data.trig->reset_fw) ==
  1027. IWL_FW_INI_RESET_FW_MODE_NOTHING) {
  1028. IWL_DEBUG_INFO(fwrt,
  1029. "WRT: nothing need to be done after debug collection\n");
  1030. } else {
  1031. IWL_ERR(fwrt, "WRT: wrong resetfw %d\n",
  1032. le32_to_cpu(dump_data.trig->reset_fw));
  1033. }
  1034. }
  1035. return 0;
  1036. }
  1037. static void iwl_dbg_tlv_init_cfg(struct iwl_fw_runtime *fwrt)
  1038. {
  1039. enum iwl_fw_ini_buffer_location *ini_dest = &fwrt->trans->dbg.ini_dest;
  1040. int ret, i;
  1041. u32 failed_alloc = 0;
  1042. if (*ini_dest != IWL_FW_INI_LOCATION_INVALID)
  1043. return;
  1044. IWL_DEBUG_FW(fwrt,
  1045. "WRT: Generating active triggers list, domain 0x%x\n",
  1046. fwrt->trans->dbg.domains_bitmap);
  1047. for (i = 0; i < ARRAY_SIZE(fwrt->trans->dbg.time_point); i++) {
  1048. struct iwl_dbg_tlv_time_point_data *tp =
  1049. &fwrt->trans->dbg.time_point[i];
  1050. iwl_dbg_tlv_gen_active_trig_list(fwrt, tp);
  1051. }
  1052. *ini_dest = IWL_FW_INI_LOCATION_INVALID;
  1053. for (i = 0; i < IWL_FW_INI_ALLOCATION_NUM; i++) {
  1054. struct iwl_fw_ini_allocation_tlv *fw_mon_cfg =
  1055. &fwrt->trans->dbg.fw_mon_cfg[i];
  1056. u32 dest = le32_to_cpu(fw_mon_cfg->buf_location);
  1057. if (dest == IWL_FW_INI_LOCATION_INVALID) {
  1058. failed_alloc |= BIT(i);
  1059. continue;
  1060. }
  1061. if (*ini_dest == IWL_FW_INI_LOCATION_INVALID)
  1062. *ini_dest = dest;
  1063. if (dest != *ini_dest)
  1064. continue;
  1065. ret = iwl_dbg_tlv_alloc_fragments(fwrt, i);
  1066. if (ret) {
  1067. IWL_WARN(fwrt,
  1068. "WRT: Failed to allocate DRAM buffer for allocation id %d, ret=%d\n",
  1069. i, ret);
  1070. failed_alloc |= BIT(i);
  1071. }
  1072. }
  1073. if (!failed_alloc)
  1074. return;
  1075. for (i = 0; i < ARRAY_SIZE(fwrt->trans->dbg.active_regions) && failed_alloc; i++) {
  1076. struct iwl_fw_ini_region_tlv *reg;
  1077. struct iwl_ucode_tlv **active_reg =
  1078. &fwrt->trans->dbg.active_regions[i];
  1079. u32 reg_type;
  1080. if (!*active_reg) {
  1081. fwrt->trans->dbg.unsupported_region_msk |= BIT(i);
  1082. continue;
  1083. }
  1084. reg = (void *)(*active_reg)->data;
  1085. reg_type = reg->type;
  1086. if (reg_type != IWL_FW_INI_REGION_DRAM_BUFFER ||
  1087. !(BIT(le32_to_cpu(reg->dram_alloc_id)) & failed_alloc))
  1088. continue;
  1089. IWL_DEBUG_FW(fwrt,
  1090. "WRT: removing allocation id %d from region id %d\n",
  1091. le32_to_cpu(reg->dram_alloc_id), i);
  1092. failed_alloc &= ~le32_to_cpu(reg->dram_alloc_id);
  1093. fwrt->trans->dbg.unsupported_region_msk |= BIT(i);
  1094. kfree(*active_reg);
  1095. *active_reg = NULL;
  1096. }
  1097. }
  1098. void _iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt,
  1099. enum iwl_fw_ini_time_point tp_id,
  1100. union iwl_dbg_tlv_tp_data *tp_data,
  1101. bool sync)
  1102. {
  1103. struct list_head *hcmd_list, *trig_list, *conf_list;
  1104. if (!iwl_trans_dbg_ini_valid(fwrt->trans) ||
  1105. tp_id == IWL_FW_INI_TIME_POINT_INVALID ||
  1106. tp_id >= IWL_FW_INI_TIME_POINT_NUM)
  1107. return;
  1108. hcmd_list = &fwrt->trans->dbg.time_point[tp_id].hcmd_list;
  1109. trig_list = &fwrt->trans->dbg.time_point[tp_id].active_trig_list;
  1110. conf_list = &fwrt->trans->dbg.time_point[tp_id].config_list;
  1111. switch (tp_id) {
  1112. case IWL_FW_INI_TIME_POINT_EARLY:
  1113. iwl_dbg_tlv_init_cfg(fwrt);
  1114. iwl_dbg_tlv_apply_config(fwrt, conf_list);
  1115. iwl_dbg_tlv_update_drams(fwrt);
  1116. iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data, NULL);
  1117. break;
  1118. case IWL_FW_INI_TIME_POINT_AFTER_ALIVE:
  1119. iwl_dbg_tlv_apply_buffers(fwrt);
  1120. iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list);
  1121. iwl_dbg_tlv_apply_config(fwrt, conf_list);
  1122. iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data, NULL);
  1123. break;
  1124. case IWL_FW_INI_TIME_POINT_PERIODIC:
  1125. iwl_dbg_tlv_set_periodic_trigs(fwrt);
  1126. iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list);
  1127. break;
  1128. case IWL_FW_INI_TIME_POINT_FW_RSP_OR_NOTIF:
  1129. case IWL_FW_INI_TIME_POINT_MISSED_BEACONS:
  1130. case IWL_FW_INI_TIME_POINT_FW_DHC_NOTIFICATION:
  1131. iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list);
  1132. iwl_dbg_tlv_apply_config(fwrt, conf_list);
  1133. iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data,
  1134. iwl_dbg_tlv_check_fw_pkt);
  1135. break;
  1136. default:
  1137. iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list);
  1138. iwl_dbg_tlv_apply_config(fwrt, conf_list);
  1139. iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data, NULL);
  1140. break;
  1141. }
  1142. }
  1143. IWL_EXPORT_SYMBOL(_iwl_dbg_tlv_time_point);