amd.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * AMD CPU Microcode Update Driver for Linux
  4. *
  5. * This driver allows to upgrade microcode on F10h AMD
  6. * CPUs and later.
  7. *
  8. * Copyright (C) 2008-2011 Advanced Micro Devices Inc.
  9. * 2013-2018 Borislav Petkov <[email protected]>
  10. *
  11. * Author: Peter Oruba <[email protected]>
  12. *
  13. * Based on work by:
  14. * Tigran Aivazian <[email protected]>
  15. *
  16. * early loader:
  17. * Copyright (C) 2013 Advanced Micro Devices, Inc.
  18. *
  19. * Author: Jacob Shin <[email protected]>
  20. * Fixes: Borislav Petkov <[email protected]>
  21. */
  22. #define pr_fmt(fmt) "microcode: " fmt
  23. #include <linux/earlycpio.h>
  24. #include <linux/firmware.h>
  25. #include <linux/uaccess.h>
  26. #include <linux/vmalloc.h>
  27. #include <linux/initrd.h>
  28. #include <linux/kernel.h>
  29. #include <linux/pci.h>
  30. #include <asm/microcode_amd.h>
  31. #include <asm/microcode.h>
  32. #include <asm/processor.h>
  33. #include <asm/setup.h>
  34. #include <asm/cpu.h>
  35. #include <asm/msr.h>
  36. static struct equiv_cpu_table {
  37. unsigned int num_entries;
  38. struct equiv_cpu_entry *entry;
  39. } equiv_table;
  40. /*
  41. * This points to the current valid container of microcode patches which we will
  42. * save from the initrd/builtin before jettisoning its contents. @mc is the
  43. * microcode patch we found to match.
  44. */
  45. struct cont_desc {
  46. struct microcode_amd *mc;
  47. u32 cpuid_1_eax;
  48. u32 psize;
  49. u8 *data;
  50. size_t size;
  51. };
  52. static u32 ucode_new_rev;
  53. /* One blob per node. */
  54. static u8 amd_ucode_patch[MAX_NUMNODES][PATCH_MAX_SIZE];
  55. /*
  56. * Microcode patch container file is prepended to the initrd in cpio
  57. * format. See Documentation/x86/microcode.rst
  58. */
  59. static const char
  60. ucode_path[] __maybe_unused = "kernel/x86/microcode/AuthenticAMD.bin";
  61. static u16 find_equiv_id(struct equiv_cpu_table *et, u32 sig)
  62. {
  63. unsigned int i;
  64. if (!et || !et->num_entries)
  65. return 0;
  66. for (i = 0; i < et->num_entries; i++) {
  67. struct equiv_cpu_entry *e = &et->entry[i];
  68. if (sig == e->installed_cpu)
  69. return e->equiv_cpu;
  70. e++;
  71. }
  72. return 0;
  73. }
  74. /*
  75. * Check whether there is a valid microcode container file at the beginning
  76. * of @buf of size @buf_size. Set @early to use this function in the early path.
  77. */
  78. static bool verify_container(const u8 *buf, size_t buf_size, bool early)
  79. {
  80. u32 cont_magic;
  81. if (buf_size <= CONTAINER_HDR_SZ) {
  82. if (!early)
  83. pr_debug("Truncated microcode container header.\n");
  84. return false;
  85. }
  86. cont_magic = *(const u32 *)buf;
  87. if (cont_magic != UCODE_MAGIC) {
  88. if (!early)
  89. pr_debug("Invalid magic value (0x%08x).\n", cont_magic);
  90. return false;
  91. }
  92. return true;
  93. }
  94. /*
  95. * Check whether there is a valid, non-truncated CPU equivalence table at the
  96. * beginning of @buf of size @buf_size. Set @early to use this function in the
  97. * early path.
  98. */
  99. static bool verify_equivalence_table(const u8 *buf, size_t buf_size, bool early)
  100. {
  101. const u32 *hdr = (const u32 *)buf;
  102. u32 cont_type, equiv_tbl_len;
  103. if (!verify_container(buf, buf_size, early))
  104. return false;
  105. cont_type = hdr[1];
  106. if (cont_type != UCODE_EQUIV_CPU_TABLE_TYPE) {
  107. if (!early)
  108. pr_debug("Wrong microcode container equivalence table type: %u.\n",
  109. cont_type);
  110. return false;
  111. }
  112. buf_size -= CONTAINER_HDR_SZ;
  113. equiv_tbl_len = hdr[2];
  114. if (equiv_tbl_len < sizeof(struct equiv_cpu_entry) ||
  115. buf_size < equiv_tbl_len) {
  116. if (!early)
  117. pr_debug("Truncated equivalence table.\n");
  118. return false;
  119. }
  120. return true;
  121. }
  122. /*
  123. * Check whether there is a valid, non-truncated microcode patch section at the
  124. * beginning of @buf of size @buf_size. Set @early to use this function in the
  125. * early path.
  126. *
  127. * On success, @sh_psize returns the patch size according to the section header,
  128. * to the caller.
  129. */
  130. static bool
  131. __verify_patch_section(const u8 *buf, size_t buf_size, u32 *sh_psize, bool early)
  132. {
  133. u32 p_type, p_size;
  134. const u32 *hdr;
  135. if (buf_size < SECTION_HDR_SIZE) {
  136. if (!early)
  137. pr_debug("Truncated patch section.\n");
  138. return false;
  139. }
  140. hdr = (const u32 *)buf;
  141. p_type = hdr[0];
  142. p_size = hdr[1];
  143. if (p_type != UCODE_UCODE_TYPE) {
  144. if (!early)
  145. pr_debug("Invalid type field (0x%x) in container file section header.\n",
  146. p_type);
  147. return false;
  148. }
  149. if (p_size < sizeof(struct microcode_header_amd)) {
  150. if (!early)
  151. pr_debug("Patch of size %u too short.\n", p_size);
  152. return false;
  153. }
  154. *sh_psize = p_size;
  155. return true;
  156. }
  157. /*
  158. * Check whether the passed remaining file @buf_size is large enough to contain
  159. * a patch of the indicated @sh_psize (and also whether this size does not
  160. * exceed the per-family maximum). @sh_psize is the size read from the section
  161. * header.
  162. */
  163. static unsigned int __verify_patch_size(u8 family, u32 sh_psize, size_t buf_size)
  164. {
  165. u32 max_size;
  166. if (family >= 0x15)
  167. return min_t(u32, sh_psize, buf_size);
  168. #define F1XH_MPB_MAX_SIZE 2048
  169. #define F14H_MPB_MAX_SIZE 1824
  170. switch (family) {
  171. case 0x10 ... 0x12:
  172. max_size = F1XH_MPB_MAX_SIZE;
  173. break;
  174. case 0x14:
  175. max_size = F14H_MPB_MAX_SIZE;
  176. break;
  177. default:
  178. WARN(1, "%s: WTF family: 0x%x\n", __func__, family);
  179. return 0;
  180. }
  181. if (sh_psize > min_t(u32, buf_size, max_size))
  182. return 0;
  183. return sh_psize;
  184. }
  185. /*
  186. * Verify the patch in @buf.
  187. *
  188. * Returns:
  189. * negative: on error
  190. * positive: patch is not for this family, skip it
  191. * 0: success
  192. */
  193. static int
  194. verify_patch(u8 family, const u8 *buf, size_t buf_size, u32 *patch_size, bool early)
  195. {
  196. struct microcode_header_amd *mc_hdr;
  197. unsigned int ret;
  198. u32 sh_psize;
  199. u16 proc_id;
  200. u8 patch_fam;
  201. if (!__verify_patch_section(buf, buf_size, &sh_psize, early))
  202. return -1;
  203. /*
  204. * The section header length is not included in this indicated size
  205. * but is present in the leftover file length so we need to subtract
  206. * it before passing this value to the function below.
  207. */
  208. buf_size -= SECTION_HDR_SIZE;
  209. /*
  210. * Check if the remaining buffer is big enough to contain a patch of
  211. * size sh_psize, as the section claims.
  212. */
  213. if (buf_size < sh_psize) {
  214. if (!early)
  215. pr_debug("Patch of size %u truncated.\n", sh_psize);
  216. return -1;
  217. }
  218. ret = __verify_patch_size(family, sh_psize, buf_size);
  219. if (!ret) {
  220. if (!early)
  221. pr_debug("Per-family patch size mismatch.\n");
  222. return -1;
  223. }
  224. *patch_size = sh_psize;
  225. mc_hdr = (struct microcode_header_amd *)(buf + SECTION_HDR_SIZE);
  226. if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) {
  227. if (!early)
  228. pr_err("Patch-ID 0x%08x: chipset-specific code unsupported.\n", mc_hdr->patch_id);
  229. return -1;
  230. }
  231. proc_id = mc_hdr->processor_rev_id;
  232. patch_fam = 0xf + (proc_id >> 12);
  233. if (patch_fam != family)
  234. return 1;
  235. return 0;
  236. }
  237. /*
  238. * This scans the ucode blob for the proper container as we can have multiple
  239. * containers glued together. Returns the equivalence ID from the equivalence
  240. * table or 0 if none found.
  241. * Returns the amount of bytes consumed while scanning. @desc contains all the
  242. * data we're going to use in later stages of the application.
  243. */
  244. static size_t parse_container(u8 *ucode, size_t size, struct cont_desc *desc)
  245. {
  246. struct equiv_cpu_table table;
  247. size_t orig_size = size;
  248. u32 *hdr = (u32 *)ucode;
  249. u16 eq_id;
  250. u8 *buf;
  251. if (!verify_equivalence_table(ucode, size, true))
  252. return 0;
  253. buf = ucode;
  254. table.entry = (struct equiv_cpu_entry *)(buf + CONTAINER_HDR_SZ);
  255. table.num_entries = hdr[2] / sizeof(struct equiv_cpu_entry);
  256. /*
  257. * Find the equivalence ID of our CPU in this table. Even if this table
  258. * doesn't contain a patch for the CPU, scan through the whole container
  259. * so that it can be skipped in case there are other containers appended.
  260. */
  261. eq_id = find_equiv_id(&table, desc->cpuid_1_eax);
  262. buf += hdr[2] + CONTAINER_HDR_SZ;
  263. size -= hdr[2] + CONTAINER_HDR_SZ;
  264. /*
  265. * Scan through the rest of the container to find where it ends. We do
  266. * some basic sanity-checking too.
  267. */
  268. while (size > 0) {
  269. struct microcode_amd *mc;
  270. u32 patch_size;
  271. int ret;
  272. ret = verify_patch(x86_family(desc->cpuid_1_eax), buf, size, &patch_size, true);
  273. if (ret < 0) {
  274. /*
  275. * Patch verification failed, skip to the next
  276. * container, if there's one:
  277. */
  278. goto out;
  279. } else if (ret > 0) {
  280. goto skip;
  281. }
  282. mc = (struct microcode_amd *)(buf + SECTION_HDR_SIZE);
  283. if (eq_id == mc->hdr.processor_rev_id) {
  284. desc->psize = patch_size;
  285. desc->mc = mc;
  286. }
  287. skip:
  288. /* Skip patch section header too: */
  289. buf += patch_size + SECTION_HDR_SIZE;
  290. size -= patch_size + SECTION_HDR_SIZE;
  291. }
  292. /*
  293. * If we have found a patch (desc->mc), it means we're looking at the
  294. * container which has a patch for this CPU so return 0 to mean, @ucode
  295. * already points to the proper container. Otherwise, we return the size
  296. * we scanned so that we can advance to the next container in the
  297. * buffer.
  298. */
  299. if (desc->mc) {
  300. desc->data = ucode;
  301. desc->size = orig_size - size;
  302. return 0;
  303. }
  304. out:
  305. return orig_size - size;
  306. }
  307. /*
  308. * Scan the ucode blob for the proper container as we can have multiple
  309. * containers glued together.
  310. */
  311. static void scan_containers(u8 *ucode, size_t size, struct cont_desc *desc)
  312. {
  313. while (size) {
  314. size_t s = parse_container(ucode, size, desc);
  315. if (!s)
  316. return;
  317. /* catch wraparound */
  318. if (size >= s) {
  319. ucode += s;
  320. size -= s;
  321. } else {
  322. return;
  323. }
  324. }
  325. }
  326. static int __apply_microcode_amd(struct microcode_amd *mc)
  327. {
  328. u32 rev, dummy;
  329. native_wrmsrl(MSR_AMD64_PATCH_LOADER, (u64)(long)&mc->hdr.data_code);
  330. /* verify patch application was successful */
  331. native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
  332. if (rev != mc->hdr.patch_id)
  333. return -1;
  334. return 0;
  335. }
  336. /*
  337. * Early load occurs before we can vmalloc(). So we look for the microcode
  338. * patch container file in initrd, traverse equivalent cpu table, look for a
  339. * matching microcode patch, and update, all in initrd memory in place.
  340. * When vmalloc() is available for use later -- on 64-bit during first AP load,
  341. * and on 32-bit during save_microcode_in_initrd_amd() -- we can call
  342. * load_microcode_amd() to save equivalent cpu table and microcode patches in
  343. * kernel heap memory.
  344. *
  345. * Returns true if container found (sets @desc), false otherwise.
  346. */
  347. static bool
  348. apply_microcode_early_amd(u32 cpuid_1_eax, void *ucode, size_t size, bool save_patch)
  349. {
  350. struct cont_desc desc = { 0 };
  351. u8 (*patch)[PATCH_MAX_SIZE];
  352. struct microcode_amd *mc;
  353. u32 rev, dummy, *new_rev;
  354. bool ret = false;
  355. #ifdef CONFIG_X86_32
  356. new_rev = (u32 *)__pa_nodebug(&ucode_new_rev);
  357. patch = (u8 (*)[PATCH_MAX_SIZE])__pa_nodebug(&amd_ucode_patch);
  358. #else
  359. new_rev = &ucode_new_rev;
  360. patch = &amd_ucode_patch[0];
  361. #endif
  362. desc.cpuid_1_eax = cpuid_1_eax;
  363. scan_containers(ucode, size, &desc);
  364. mc = desc.mc;
  365. if (!mc)
  366. return ret;
  367. native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
  368. /*
  369. * Allow application of the same revision to pick up SMT-specific
  370. * changes even if the revision of the other SMT thread is already
  371. * up-to-date.
  372. */
  373. if (rev > mc->hdr.patch_id)
  374. return ret;
  375. if (!__apply_microcode_amd(mc)) {
  376. *new_rev = mc->hdr.patch_id;
  377. ret = true;
  378. if (save_patch)
  379. memcpy(patch, mc, min_t(u32, desc.psize, PATCH_MAX_SIZE));
  380. }
  381. return ret;
  382. }
  383. static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family)
  384. {
  385. char fw_name[36] = "amd-ucode/microcode_amd.bin";
  386. struct firmware fw;
  387. if (IS_ENABLED(CONFIG_X86_32))
  388. return false;
  389. if (family >= 0x15)
  390. snprintf(fw_name, sizeof(fw_name),
  391. "amd-ucode/microcode_amd_fam%.2xh.bin", family);
  392. if (firmware_request_builtin(&fw, fw_name)) {
  393. cp->size = fw.size;
  394. cp->data = (void *)fw.data;
  395. return true;
  396. }
  397. return false;
  398. }
  399. static void __load_ucode_amd(unsigned int cpuid_1_eax, struct cpio_data *ret)
  400. {
  401. struct ucode_cpu_info *uci;
  402. struct cpio_data cp;
  403. const char *path;
  404. bool use_pa;
  405. if (IS_ENABLED(CONFIG_X86_32)) {
  406. uci = (struct ucode_cpu_info *)__pa_nodebug(ucode_cpu_info);
  407. path = (const char *)__pa_nodebug(ucode_path);
  408. use_pa = true;
  409. } else {
  410. uci = ucode_cpu_info;
  411. path = ucode_path;
  412. use_pa = false;
  413. }
  414. if (!get_builtin_microcode(&cp, x86_family(cpuid_1_eax)))
  415. cp = find_microcode_in_initrd(path, use_pa);
  416. /* Needed in load_microcode_amd() */
  417. uci->cpu_sig.sig = cpuid_1_eax;
  418. *ret = cp;
  419. }
  420. void __init load_ucode_amd_bsp(unsigned int cpuid_1_eax)
  421. {
  422. struct cpio_data cp = { };
  423. __load_ucode_amd(cpuid_1_eax, &cp);
  424. if (!(cp.data && cp.size))
  425. return;
  426. apply_microcode_early_amd(cpuid_1_eax, cp.data, cp.size, true);
  427. }
  428. void load_ucode_amd_ap(unsigned int cpuid_1_eax)
  429. {
  430. struct microcode_amd *mc;
  431. struct cpio_data cp;
  432. u32 *new_rev, rev, dummy;
  433. if (IS_ENABLED(CONFIG_X86_32)) {
  434. mc = (struct microcode_amd *)__pa_nodebug(amd_ucode_patch);
  435. new_rev = (u32 *)__pa_nodebug(&ucode_new_rev);
  436. } else {
  437. mc = (struct microcode_amd *)amd_ucode_patch;
  438. new_rev = &ucode_new_rev;
  439. }
  440. native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
  441. /*
  442. * Check whether a new patch has been saved already. Also, allow application of
  443. * the same revision in order to pick up SMT-thread-specific configuration even
  444. * if the sibling SMT thread already has an up-to-date revision.
  445. */
  446. if (*new_rev && rev <= mc->hdr.patch_id) {
  447. if (!__apply_microcode_amd(mc)) {
  448. *new_rev = mc->hdr.patch_id;
  449. return;
  450. }
  451. }
  452. __load_ucode_amd(cpuid_1_eax, &cp);
  453. if (!(cp.data && cp.size))
  454. return;
  455. apply_microcode_early_amd(cpuid_1_eax, cp.data, cp.size, false);
  456. }
  457. static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size);
  458. int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
  459. {
  460. struct cont_desc desc = { 0 };
  461. enum ucode_state ret;
  462. struct cpio_data cp;
  463. cp = find_microcode_in_initrd(ucode_path, false);
  464. if (!(cp.data && cp.size))
  465. return -EINVAL;
  466. desc.cpuid_1_eax = cpuid_1_eax;
  467. scan_containers(cp.data, cp.size, &desc);
  468. if (!desc.mc)
  469. return -EINVAL;
  470. ret = load_microcode_amd(x86_family(cpuid_1_eax), desc.data, desc.size);
  471. if (ret > UCODE_UPDATED)
  472. return -EINVAL;
  473. return 0;
  474. }
  475. void reload_ucode_amd(unsigned int cpu)
  476. {
  477. u32 rev, dummy __always_unused;
  478. struct microcode_amd *mc;
  479. mc = (struct microcode_amd *)amd_ucode_patch[cpu_to_node(cpu)];
  480. rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
  481. if (rev < mc->hdr.patch_id) {
  482. if (!__apply_microcode_amd(mc)) {
  483. ucode_new_rev = mc->hdr.patch_id;
  484. pr_info("reload patch_level=0x%08x\n", ucode_new_rev);
  485. }
  486. }
  487. }
  488. static u16 __find_equiv_id(unsigned int cpu)
  489. {
  490. struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
  491. return find_equiv_id(&equiv_table, uci->cpu_sig.sig);
  492. }
  493. /*
  494. * a small, trivial cache of per-family ucode patches
  495. */
  496. static struct ucode_patch *cache_find_patch(u16 equiv_cpu)
  497. {
  498. struct ucode_patch *p;
  499. list_for_each_entry(p, &microcode_cache, plist)
  500. if (p->equiv_cpu == equiv_cpu)
  501. return p;
  502. return NULL;
  503. }
  504. static void update_cache(struct ucode_patch *new_patch)
  505. {
  506. struct ucode_patch *p;
  507. list_for_each_entry(p, &microcode_cache, plist) {
  508. if (p->equiv_cpu == new_patch->equiv_cpu) {
  509. if (p->patch_id >= new_patch->patch_id) {
  510. /* we already have the latest patch */
  511. kfree(new_patch->data);
  512. kfree(new_patch);
  513. return;
  514. }
  515. list_replace(&p->plist, &new_patch->plist);
  516. kfree(p->data);
  517. kfree(p);
  518. return;
  519. }
  520. }
  521. /* no patch found, add it */
  522. list_add_tail(&new_patch->plist, &microcode_cache);
  523. }
  524. static void free_cache(void)
  525. {
  526. struct ucode_patch *p, *tmp;
  527. list_for_each_entry_safe(p, tmp, &microcode_cache, plist) {
  528. __list_del(p->plist.prev, p->plist.next);
  529. kfree(p->data);
  530. kfree(p);
  531. }
  532. }
  533. static struct ucode_patch *find_patch(unsigned int cpu)
  534. {
  535. u16 equiv_id;
  536. equiv_id = __find_equiv_id(cpu);
  537. if (!equiv_id)
  538. return NULL;
  539. return cache_find_patch(equiv_id);
  540. }
  541. static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig)
  542. {
  543. struct cpuinfo_x86 *c = &cpu_data(cpu);
  544. struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
  545. struct ucode_patch *p;
  546. csig->sig = cpuid_eax(0x00000001);
  547. csig->rev = c->microcode;
  548. /*
  549. * a patch could have been loaded early, set uci->mc so that
  550. * mc_bp_resume() can call apply_microcode()
  551. */
  552. p = find_patch(cpu);
  553. if (p && (p->patch_id == csig->rev))
  554. uci->mc = p->data;
  555. pr_info("CPU%d: patch_level=0x%08x\n", cpu, csig->rev);
  556. return 0;
  557. }
  558. static enum ucode_state apply_microcode_amd(int cpu)
  559. {
  560. struct cpuinfo_x86 *c = &cpu_data(cpu);
  561. struct microcode_amd *mc_amd;
  562. struct ucode_cpu_info *uci;
  563. struct ucode_patch *p;
  564. enum ucode_state ret;
  565. u32 rev, dummy __always_unused;
  566. BUG_ON(raw_smp_processor_id() != cpu);
  567. uci = ucode_cpu_info + cpu;
  568. p = find_patch(cpu);
  569. if (!p)
  570. return UCODE_NFOUND;
  571. mc_amd = p->data;
  572. uci->mc = p->data;
  573. rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
  574. /* need to apply patch? */
  575. if (rev > mc_amd->hdr.patch_id) {
  576. ret = UCODE_OK;
  577. goto out;
  578. }
  579. if (__apply_microcode_amd(mc_amd)) {
  580. pr_err("CPU%d: update failed for patch_level=0x%08x\n",
  581. cpu, mc_amd->hdr.patch_id);
  582. return UCODE_ERROR;
  583. }
  584. rev = mc_amd->hdr.patch_id;
  585. ret = UCODE_UPDATED;
  586. pr_info("CPU%d: new patch_level=0x%08x\n", cpu, rev);
  587. out:
  588. uci->cpu_sig.rev = rev;
  589. c->microcode = rev;
  590. /* Update boot_cpu_data's revision too, if we're on the BSP: */
  591. if (c->cpu_index == boot_cpu_data.cpu_index)
  592. boot_cpu_data.microcode = rev;
  593. return ret;
  594. }
  595. static size_t install_equiv_cpu_table(const u8 *buf, size_t buf_size)
  596. {
  597. u32 equiv_tbl_len;
  598. const u32 *hdr;
  599. if (!verify_equivalence_table(buf, buf_size, false))
  600. return 0;
  601. hdr = (const u32 *)buf;
  602. equiv_tbl_len = hdr[2];
  603. equiv_table.entry = vmalloc(equiv_tbl_len);
  604. if (!equiv_table.entry) {
  605. pr_err("failed to allocate equivalent CPU table\n");
  606. return 0;
  607. }
  608. memcpy(equiv_table.entry, buf + CONTAINER_HDR_SZ, equiv_tbl_len);
  609. equiv_table.num_entries = equiv_tbl_len / sizeof(struct equiv_cpu_entry);
  610. /* add header length */
  611. return equiv_tbl_len + CONTAINER_HDR_SZ;
  612. }
  613. static void free_equiv_cpu_table(void)
  614. {
  615. vfree(equiv_table.entry);
  616. memset(&equiv_table, 0, sizeof(equiv_table));
  617. }
  618. static void cleanup(void)
  619. {
  620. free_equiv_cpu_table();
  621. free_cache();
  622. }
  623. /*
  624. * Return a non-negative value even if some of the checks failed so that
  625. * we can skip over the next patch. If we return a negative value, we
  626. * signal a grave error like a memory allocation has failed and the
  627. * driver cannot continue functioning normally. In such cases, we tear
  628. * down everything we've used up so far and exit.
  629. */
  630. static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover,
  631. unsigned int *patch_size)
  632. {
  633. struct microcode_header_amd *mc_hdr;
  634. struct ucode_patch *patch;
  635. u16 proc_id;
  636. int ret;
  637. ret = verify_patch(family, fw, leftover, patch_size, false);
  638. if (ret)
  639. return ret;
  640. patch = kzalloc(sizeof(*patch), GFP_KERNEL);
  641. if (!patch) {
  642. pr_err("Patch allocation failure.\n");
  643. return -EINVAL;
  644. }
  645. patch->data = kmemdup(fw + SECTION_HDR_SIZE, *patch_size, GFP_KERNEL);
  646. if (!patch->data) {
  647. pr_err("Patch data allocation failure.\n");
  648. kfree(patch);
  649. return -EINVAL;
  650. }
  651. patch->size = *patch_size;
  652. mc_hdr = (struct microcode_header_amd *)(fw + SECTION_HDR_SIZE);
  653. proc_id = mc_hdr->processor_rev_id;
  654. INIT_LIST_HEAD(&patch->plist);
  655. patch->patch_id = mc_hdr->patch_id;
  656. patch->equiv_cpu = proc_id;
  657. pr_debug("%s: Added patch_id: 0x%08x, proc_id: 0x%04x\n",
  658. __func__, patch->patch_id, proc_id);
  659. /* ... and add to cache. */
  660. update_cache(patch);
  661. return 0;
  662. }
  663. static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
  664. size_t size)
  665. {
  666. u8 *fw = (u8 *)data;
  667. size_t offset;
  668. offset = install_equiv_cpu_table(data, size);
  669. if (!offset)
  670. return UCODE_ERROR;
  671. fw += offset;
  672. size -= offset;
  673. if (*(u32 *)fw != UCODE_UCODE_TYPE) {
  674. pr_err("invalid type field in container file section header\n");
  675. free_equiv_cpu_table();
  676. return UCODE_ERROR;
  677. }
  678. while (size > 0) {
  679. unsigned int crnt_size = 0;
  680. int ret;
  681. ret = verify_and_add_patch(family, fw, size, &crnt_size);
  682. if (ret < 0)
  683. return UCODE_ERROR;
  684. fw += crnt_size + SECTION_HDR_SIZE;
  685. size -= (crnt_size + SECTION_HDR_SIZE);
  686. }
  687. return UCODE_OK;
  688. }
  689. static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size)
  690. {
  691. struct cpuinfo_x86 *c;
  692. unsigned int nid, cpu;
  693. struct ucode_patch *p;
  694. enum ucode_state ret;
  695. /* free old equiv table */
  696. free_equiv_cpu_table();
  697. ret = __load_microcode_amd(family, data, size);
  698. if (ret != UCODE_OK) {
  699. cleanup();
  700. return ret;
  701. }
  702. for_each_node(nid) {
  703. cpu = cpumask_first(cpumask_of_node(nid));
  704. c = &cpu_data(cpu);
  705. p = find_patch(cpu);
  706. if (!p)
  707. continue;
  708. if (c->microcode >= p->patch_id)
  709. continue;
  710. ret = UCODE_NEW;
  711. memset(&amd_ucode_patch[nid], 0, PATCH_MAX_SIZE);
  712. memcpy(&amd_ucode_patch[nid], p->data, min_t(u32, p->size, PATCH_MAX_SIZE));
  713. }
  714. return ret;
  715. }
  716. /*
  717. * AMD microcode firmware naming convention, up to family 15h they are in
  718. * the legacy file:
  719. *
  720. * amd-ucode/microcode_amd.bin
  721. *
  722. * This legacy file is always smaller than 2K in size.
  723. *
  724. * Beginning with family 15h, they are in family-specific firmware files:
  725. *
  726. * amd-ucode/microcode_amd_fam15h.bin
  727. * amd-ucode/microcode_amd_fam16h.bin
  728. * ...
  729. *
  730. * These might be larger than 2K.
  731. */
  732. static enum ucode_state request_microcode_amd(int cpu, struct device *device,
  733. bool refresh_fw)
  734. {
  735. char fw_name[36] = "amd-ucode/microcode_amd.bin";
  736. struct cpuinfo_x86 *c = &cpu_data(cpu);
  737. enum ucode_state ret = UCODE_NFOUND;
  738. const struct firmware *fw;
  739. /* reload ucode container only on the boot cpu */
  740. if (!refresh_fw)
  741. return UCODE_OK;
  742. if (c->x86 >= 0x15)
  743. snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86);
  744. if (request_firmware_direct(&fw, (const char *)fw_name, device)) {
  745. pr_debug("failed to load file %s\n", fw_name);
  746. goto out;
  747. }
  748. ret = UCODE_ERROR;
  749. if (!verify_container(fw->data, fw->size, false))
  750. goto fw_release;
  751. ret = load_microcode_amd(c->x86, fw->data, fw->size);
  752. fw_release:
  753. release_firmware(fw);
  754. out:
  755. return ret;
  756. }
  757. static void microcode_fini_cpu_amd(int cpu)
  758. {
  759. struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
  760. uci->mc = NULL;
  761. }
  762. static struct microcode_ops microcode_amd_ops = {
  763. .request_microcode_fw = request_microcode_amd,
  764. .collect_cpu_info = collect_cpu_info_amd,
  765. .apply_microcode = apply_microcode_amd,
  766. .microcode_fini_cpu = microcode_fini_cpu_amd,
  767. };
  768. struct microcode_ops * __init init_amd_microcode(void)
  769. {
  770. struct cpuinfo_x86 *c = &boot_cpu_data;
  771. if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) {
  772. pr_warn("AMD CPU family 0x%x not supported\n", c->x86);
  773. return NULL;
  774. }
  775. if (ucode_new_rev)
  776. pr_info_once("microcode updated early to new patch_level=0x%08x\n",
  777. ucode_new_rev);
  778. return &microcode_amd_ops;
  779. }
  780. void __exit exit_amd_microcode(void)
  781. {
  782. cleanup();
  783. }