main.c 41 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * main.c - Multi purpose firmware loading support
  4. *
  5. * Copyright (c) 2003 Manuel Estrada Sainz
  6. *
  7. * Please see Documentation/driver-api/firmware/ for more information.
  8. *
  9. */
  10. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  11. #include <linux/capability.h>
  12. #include <linux/device.h>
  13. #include <linux/kernel_read_file.h>
  14. #include <linux/module.h>
  15. #include <linux/init.h>
  16. #include <linux/initrd.h>
  17. #include <linux/timer.h>
  18. #include <linux/vmalloc.h>
  19. #include <linux/interrupt.h>
  20. #include <linux/bitops.h>
  21. #include <linux/mutex.h>
  22. #include <linux/workqueue.h>
  23. #include <linux/highmem.h>
  24. #include <linux/firmware.h>
  25. #include <linux/slab.h>
  26. #include <linux/sched.h>
  27. #include <linux/file.h>
  28. #include <linux/list.h>
  29. #include <linux/fs.h>
  30. #include <linux/async.h>
  31. #include <linux/pm.h>
  32. #include <linux/suspend.h>
  33. #include <linux/syscore_ops.h>
  34. #include <linux/reboot.h>
  35. #include <linux/security.h>
  36. #include <linux/zstd.h>
  37. #include <linux/xz.h>
  38. #include <generated/utsrelease.h>
  39. #include "../base.h"
  40. #include "firmware.h"
  41. #include "fallback.h"
  42. MODULE_AUTHOR("Manuel Estrada Sainz");
  43. MODULE_DESCRIPTION("Multi purpose firmware loading support");
  44. MODULE_LICENSE("GPL");
  45. struct firmware_cache {
  46. /* firmware_buf instance will be added into the below list */
  47. spinlock_t lock;
  48. struct list_head head;
  49. int state;
  50. #ifdef CONFIG_FW_CACHE
  51. /*
  52. * Names of firmware images which have been cached successfully
  53. * will be added into the below list so that device uncache
  54. * helper can trace which firmware images have been cached
  55. * before.
  56. */
  57. spinlock_t name_lock;
  58. struct list_head fw_names;
  59. struct delayed_work work;
  60. struct notifier_block pm_notify;
  61. #endif
  62. };
  63. struct fw_cache_entry {
  64. struct list_head list;
  65. const char *name;
  66. };
  67. struct fw_name_devm {
  68. unsigned long magic;
  69. const char *name;
  70. };
  71. static inline struct fw_priv *to_fw_priv(struct kref *ref)
  72. {
  73. return container_of(ref, struct fw_priv, ref);
  74. }
  75. #define FW_LOADER_NO_CACHE 0
  76. #define FW_LOADER_START_CACHE 1
  77. /* fw_lock could be moved to 'struct fw_sysfs' but since it is just
  78. * guarding for corner cases a global lock should be OK */
  79. DEFINE_MUTEX(fw_lock);
  80. struct firmware_cache fw_cache;
  81. void fw_state_init(struct fw_priv *fw_priv)
  82. {
  83. struct fw_state *fw_st = &fw_priv->fw_st;
  84. init_completion(&fw_st->completion);
  85. fw_st->status = FW_STATUS_UNKNOWN;
  86. }
  87. static inline int fw_state_wait(struct fw_priv *fw_priv)
  88. {
  89. return __fw_state_wait_common(fw_priv, MAX_SCHEDULE_TIMEOUT);
  90. }
  91. static void fw_cache_piggyback_on_request(struct fw_priv *fw_priv);
  92. static struct fw_priv *__allocate_fw_priv(const char *fw_name,
  93. struct firmware_cache *fwc,
  94. void *dbuf,
  95. size_t size,
  96. size_t offset,
  97. u32 opt_flags)
  98. {
  99. struct fw_priv *fw_priv;
  100. /* For a partial read, the buffer must be preallocated. */
  101. if ((opt_flags & FW_OPT_PARTIAL) && !dbuf)
  102. return NULL;
  103. /* Only partial reads are allowed to use an offset. */
  104. if (offset != 0 && !(opt_flags & FW_OPT_PARTIAL))
  105. return NULL;
  106. fw_priv = kzalloc(sizeof(*fw_priv), GFP_ATOMIC);
  107. if (!fw_priv)
  108. return NULL;
  109. fw_priv->fw_name = kstrdup_const(fw_name, GFP_ATOMIC);
  110. if (!fw_priv->fw_name) {
  111. kfree(fw_priv);
  112. return NULL;
  113. }
  114. kref_init(&fw_priv->ref);
  115. fw_priv->fwc = fwc;
  116. fw_priv->data = dbuf;
  117. fw_priv->allocated_size = size;
  118. fw_priv->offset = offset;
  119. fw_priv->opt_flags = opt_flags;
  120. fw_state_init(fw_priv);
  121. #ifdef CONFIG_FW_LOADER_USER_HELPER
  122. INIT_LIST_HEAD(&fw_priv->pending_list);
  123. #endif
  124. pr_debug("%s: fw-%s fw_priv=%p\n", __func__, fw_name, fw_priv);
  125. return fw_priv;
  126. }
  127. static struct fw_priv *__lookup_fw_priv(const char *fw_name)
  128. {
  129. struct fw_priv *tmp;
  130. struct firmware_cache *fwc = &fw_cache;
  131. list_for_each_entry(tmp, &fwc->head, list)
  132. if (!strcmp(tmp->fw_name, fw_name))
  133. return tmp;
  134. return NULL;
  135. }
  136. /* Returns 1 for batching firmware requests with the same name */
  137. int alloc_lookup_fw_priv(const char *fw_name, struct firmware_cache *fwc,
  138. struct fw_priv **fw_priv, void *dbuf, size_t size,
  139. size_t offset, u32 opt_flags)
  140. {
  141. struct fw_priv *tmp;
  142. spin_lock(&fwc->lock);
  143. /*
  144. * Do not merge requests that are marked to be non-cached or
  145. * are performing partial reads.
  146. */
  147. if (!(opt_flags & (FW_OPT_NOCACHE | FW_OPT_PARTIAL))) {
  148. tmp = __lookup_fw_priv(fw_name);
  149. if (tmp) {
  150. kref_get(&tmp->ref);
  151. spin_unlock(&fwc->lock);
  152. *fw_priv = tmp;
  153. pr_debug("batched request - sharing the same struct fw_priv and lookup for multiple requests\n");
  154. return 1;
  155. }
  156. }
  157. tmp = __allocate_fw_priv(fw_name, fwc, dbuf, size, offset, opt_flags);
  158. if (tmp) {
  159. INIT_LIST_HEAD(&tmp->list);
  160. if (!(opt_flags & FW_OPT_NOCACHE))
  161. list_add(&tmp->list, &fwc->head);
  162. }
  163. spin_unlock(&fwc->lock);
  164. *fw_priv = tmp;
  165. return tmp ? 0 : -ENOMEM;
  166. }
  167. static void __free_fw_priv(struct kref *ref)
  168. __releases(&fwc->lock)
  169. {
  170. struct fw_priv *fw_priv = to_fw_priv(ref);
  171. struct firmware_cache *fwc = fw_priv->fwc;
  172. pr_debug("%s: fw-%s fw_priv=%p data=%p size=%u\n",
  173. __func__, fw_priv->fw_name, fw_priv, fw_priv->data,
  174. (unsigned int)fw_priv->size);
  175. list_del(&fw_priv->list);
  176. spin_unlock(&fwc->lock);
  177. if (fw_is_paged_buf(fw_priv))
  178. fw_free_paged_buf(fw_priv);
  179. else if (!fw_priv->allocated_size)
  180. vfree(fw_priv->data);
  181. kfree_const(fw_priv->fw_name);
  182. kfree(fw_priv);
  183. }
  184. void free_fw_priv(struct fw_priv *fw_priv)
  185. {
  186. struct firmware_cache *fwc = fw_priv->fwc;
  187. spin_lock(&fwc->lock);
  188. if (!kref_put(&fw_priv->ref, __free_fw_priv))
  189. spin_unlock(&fwc->lock);
  190. }
  191. #ifdef CONFIG_FW_LOADER_PAGED_BUF
  192. bool fw_is_paged_buf(struct fw_priv *fw_priv)
  193. {
  194. return fw_priv->is_paged_buf;
  195. }
  196. void fw_free_paged_buf(struct fw_priv *fw_priv)
  197. {
  198. int i;
  199. if (!fw_priv->pages)
  200. return;
  201. vunmap(fw_priv->data);
  202. for (i = 0; i < fw_priv->nr_pages; i++)
  203. __free_page(fw_priv->pages[i]);
  204. kvfree(fw_priv->pages);
  205. fw_priv->pages = NULL;
  206. fw_priv->page_array_size = 0;
  207. fw_priv->nr_pages = 0;
  208. fw_priv->data = NULL;
  209. fw_priv->size = 0;
  210. }
  211. int fw_grow_paged_buf(struct fw_priv *fw_priv, int pages_needed)
  212. {
  213. /* If the array of pages is too small, grow it */
  214. if (fw_priv->page_array_size < pages_needed) {
  215. int new_array_size = max(pages_needed,
  216. fw_priv->page_array_size * 2);
  217. struct page **new_pages;
  218. new_pages = kvmalloc_array(new_array_size, sizeof(void *),
  219. GFP_KERNEL);
  220. if (!new_pages)
  221. return -ENOMEM;
  222. memcpy(new_pages, fw_priv->pages,
  223. fw_priv->page_array_size * sizeof(void *));
  224. memset(&new_pages[fw_priv->page_array_size], 0, sizeof(void *) *
  225. (new_array_size - fw_priv->page_array_size));
  226. kvfree(fw_priv->pages);
  227. fw_priv->pages = new_pages;
  228. fw_priv->page_array_size = new_array_size;
  229. }
  230. while (fw_priv->nr_pages < pages_needed) {
  231. fw_priv->pages[fw_priv->nr_pages] =
  232. alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
  233. if (!fw_priv->pages[fw_priv->nr_pages])
  234. return -ENOMEM;
  235. fw_priv->nr_pages++;
  236. }
  237. return 0;
  238. }
  239. int fw_map_paged_buf(struct fw_priv *fw_priv)
  240. {
  241. /* one pages buffer should be mapped/unmapped only once */
  242. if (!fw_priv->pages)
  243. return 0;
  244. vunmap(fw_priv->data);
  245. fw_priv->data = vmap(fw_priv->pages, fw_priv->nr_pages, 0,
  246. PAGE_KERNEL_RO);
  247. if (!fw_priv->data)
  248. return -ENOMEM;
  249. return 0;
  250. }
  251. #endif
  252. /*
  253. * ZSTD-compressed firmware support
  254. */
  255. #ifdef CONFIG_FW_LOADER_COMPRESS_ZSTD
  256. static int fw_decompress_zstd(struct device *dev, struct fw_priv *fw_priv,
  257. size_t in_size, const void *in_buffer)
  258. {
  259. size_t len, out_size, workspace_size;
  260. void *workspace, *out_buf;
  261. zstd_dctx *ctx;
  262. int err;
  263. if (fw_priv->allocated_size) {
  264. out_size = fw_priv->allocated_size;
  265. out_buf = fw_priv->data;
  266. } else {
  267. zstd_frame_header params;
  268. if (zstd_get_frame_header(&params, in_buffer, in_size) ||
  269. params.frameContentSize == ZSTD_CONTENTSIZE_UNKNOWN) {
  270. dev_dbg(dev, "%s: invalid zstd header\n", __func__);
  271. return -EINVAL;
  272. }
  273. out_size = params.frameContentSize;
  274. out_buf = vzalloc(out_size);
  275. if (!out_buf)
  276. return -ENOMEM;
  277. }
  278. workspace_size = zstd_dctx_workspace_bound();
  279. workspace = kvzalloc(workspace_size, GFP_KERNEL);
  280. if (!workspace) {
  281. err = -ENOMEM;
  282. goto error;
  283. }
  284. ctx = zstd_init_dctx(workspace, workspace_size);
  285. if (!ctx) {
  286. dev_dbg(dev, "%s: failed to initialize context\n", __func__);
  287. err = -EINVAL;
  288. goto error;
  289. }
  290. len = zstd_decompress_dctx(ctx, out_buf, out_size, in_buffer, in_size);
  291. if (zstd_is_error(len)) {
  292. dev_dbg(dev, "%s: failed to decompress: %d\n", __func__,
  293. zstd_get_error_code(len));
  294. err = -EINVAL;
  295. goto error;
  296. }
  297. if (!fw_priv->allocated_size)
  298. fw_priv->data = out_buf;
  299. fw_priv->size = len;
  300. err = 0;
  301. error:
  302. kvfree(workspace);
  303. if (err && !fw_priv->allocated_size)
  304. vfree(out_buf);
  305. return err;
  306. }
  307. #endif /* CONFIG_FW_LOADER_COMPRESS_ZSTD */
  308. /*
  309. * XZ-compressed firmware support
  310. */
  311. #ifdef CONFIG_FW_LOADER_COMPRESS_XZ
  312. /* show an error and return the standard error code */
  313. static int fw_decompress_xz_error(struct device *dev, enum xz_ret xz_ret)
  314. {
  315. if (xz_ret != XZ_STREAM_END) {
  316. dev_warn(dev, "xz decompression failed (xz_ret=%d)\n", xz_ret);
  317. return xz_ret == XZ_MEM_ERROR ? -ENOMEM : -EINVAL;
  318. }
  319. return 0;
  320. }
  321. /* single-shot decompression onto the pre-allocated buffer */
  322. static int fw_decompress_xz_single(struct device *dev, struct fw_priv *fw_priv,
  323. size_t in_size, const void *in_buffer)
  324. {
  325. struct xz_dec *xz_dec;
  326. struct xz_buf xz_buf;
  327. enum xz_ret xz_ret;
  328. xz_dec = xz_dec_init(XZ_SINGLE, (u32)-1);
  329. if (!xz_dec)
  330. return -ENOMEM;
  331. xz_buf.in_size = in_size;
  332. xz_buf.in = in_buffer;
  333. xz_buf.in_pos = 0;
  334. xz_buf.out_size = fw_priv->allocated_size;
  335. xz_buf.out = fw_priv->data;
  336. xz_buf.out_pos = 0;
  337. xz_ret = xz_dec_run(xz_dec, &xz_buf);
  338. xz_dec_end(xz_dec);
  339. fw_priv->size = xz_buf.out_pos;
  340. return fw_decompress_xz_error(dev, xz_ret);
  341. }
  342. /* decompression on paged buffer and map it */
  343. static int fw_decompress_xz_pages(struct device *dev, struct fw_priv *fw_priv,
  344. size_t in_size, const void *in_buffer)
  345. {
  346. struct xz_dec *xz_dec;
  347. struct xz_buf xz_buf;
  348. enum xz_ret xz_ret;
  349. struct page *page;
  350. int err = 0;
  351. xz_dec = xz_dec_init(XZ_DYNALLOC, (u32)-1);
  352. if (!xz_dec)
  353. return -ENOMEM;
  354. xz_buf.in_size = in_size;
  355. xz_buf.in = in_buffer;
  356. xz_buf.in_pos = 0;
  357. fw_priv->is_paged_buf = true;
  358. fw_priv->size = 0;
  359. do {
  360. if (fw_grow_paged_buf(fw_priv, fw_priv->nr_pages + 1)) {
  361. err = -ENOMEM;
  362. goto out;
  363. }
  364. /* decompress onto the new allocated page */
  365. page = fw_priv->pages[fw_priv->nr_pages - 1];
  366. xz_buf.out = kmap_local_page(page);
  367. xz_buf.out_pos = 0;
  368. xz_buf.out_size = PAGE_SIZE;
  369. xz_ret = xz_dec_run(xz_dec, &xz_buf);
  370. kunmap_local(xz_buf.out);
  371. fw_priv->size += xz_buf.out_pos;
  372. /* partial decompression means either end or error */
  373. if (xz_buf.out_pos != PAGE_SIZE)
  374. break;
  375. } while (xz_ret == XZ_OK);
  376. err = fw_decompress_xz_error(dev, xz_ret);
  377. if (!err)
  378. err = fw_map_paged_buf(fw_priv);
  379. out:
  380. xz_dec_end(xz_dec);
  381. return err;
  382. }
  383. static int fw_decompress_xz(struct device *dev, struct fw_priv *fw_priv,
  384. size_t in_size, const void *in_buffer)
  385. {
  386. /* if the buffer is pre-allocated, we can perform in single-shot mode */
  387. if (fw_priv->data)
  388. return fw_decompress_xz_single(dev, fw_priv, in_size, in_buffer);
  389. else
  390. return fw_decompress_xz_pages(dev, fw_priv, in_size, in_buffer);
  391. }
  392. #endif /* CONFIG_FW_LOADER_COMPRESS_XZ */
  393. /* direct firmware loading support */
  394. #define CUSTOM_FW_PATH_COUNT 10
  395. #define PATH_SIZE 255
  396. static char fw_path_para[CUSTOM_FW_PATH_COUNT][PATH_SIZE];
  397. static const char * const fw_path[] = {
  398. fw_path_para[0],
  399. fw_path_para[1],
  400. fw_path_para[2],
  401. fw_path_para[3],
  402. fw_path_para[4],
  403. fw_path_para[5],
  404. fw_path_para[6],
  405. fw_path_para[7],
  406. fw_path_para[8],
  407. fw_path_para[9],
  408. "/lib/firmware/updates/" UTS_RELEASE,
  409. "/lib/firmware/updates",
  410. "/lib/firmware/" UTS_RELEASE,
  411. "/lib/firmware"
  412. };
  413. static char strpath[PATH_SIZE * CUSTOM_FW_PATH_COUNT];
  414. static int firmware_param_path_set(const char *val, const struct kernel_param *kp)
  415. {
  416. int i;
  417. char *path, *end;
  418. strscpy(strpath, val, sizeof(strpath));
  419. /* Remove leading and trailing spaces from path */
  420. path = strim(strpath);
  421. for (i = 0; path && i < CUSTOM_FW_PATH_COUNT; i++) {
  422. end = strchr(path, ',');
  423. /* Skip continuous token case, for example ',,,' */
  424. if (end == path) {
  425. i--;
  426. path = ++end;
  427. continue;
  428. }
  429. if (end != NULL)
  430. *end = '\0';
  431. else {
  432. /* end of the string reached and no other tockens ',' */
  433. strscpy(fw_path_para[i], path, PATH_SIZE);
  434. break;
  435. }
  436. strscpy(fw_path_para[i], path, PATH_SIZE);
  437. path = ++end;
  438. }
  439. return 0;
  440. }
  441. static int firmware_param_path_get(char *buffer, const struct kernel_param *kp)
  442. {
  443. int count = 0, i;
  444. for (i = 0; i < CUSTOM_FW_PATH_COUNT; i++)
  445. if (strlen(fw_path_para[i]) != 0)
  446. count += sysfs_emit_at(buffer, count, "%s,", fw_path_para[i]);
  447. return count;
  448. }
  449. /*
  450. * Typical usage is that passing 'firmware_class.path=/vendor,/firwmare_mnt'
  451. * from kernel command line because firmware_class is generally built in
  452. * kernel instead of module. ',' is used as delimiter for setting 10
  453. * custom paths for firmware loader.
  454. */
  455. static const struct kernel_param_ops firmware_param_ops = {
  456. .set = firmware_param_path_set,
  457. .get = firmware_param_path_get,
  458. };
  459. module_param_cb(path, &firmware_param_ops, NULL, 0644);
  460. MODULE_PARM_DESC(path, "customized firmware image search path with a higher priority than default path");
  461. static int
  462. fw_get_filesystem_firmware(struct device *device, struct fw_priv *fw_priv,
  463. const char *suffix,
  464. int (*decompress)(struct device *dev,
  465. struct fw_priv *fw_priv,
  466. size_t in_size,
  467. const void *in_buffer))
  468. {
  469. size_t size;
  470. int i, len;
  471. int rc = -ENOENT;
  472. char *path;
  473. size_t msize = INT_MAX;
  474. void *buffer = NULL;
  475. /* Already populated data member means we're loading into a buffer */
  476. if (!decompress && fw_priv->data) {
  477. buffer = fw_priv->data;
  478. msize = fw_priv->allocated_size;
  479. }
  480. path = __getname();
  481. if (!path)
  482. return -ENOMEM;
  483. wait_for_initramfs();
  484. for (i = 0; i < ARRAY_SIZE(fw_path); i++) {
  485. size_t file_size = 0;
  486. size_t *file_size_ptr = NULL;
  487. /* skip the unset customized path */
  488. if (!fw_path[i][0])
  489. continue;
  490. len = snprintf(path, PATH_MAX, "%s/%s%s",
  491. fw_path[i], fw_priv->fw_name, suffix);
  492. if (len >= PATH_MAX) {
  493. rc = -ENAMETOOLONG;
  494. break;
  495. }
  496. fw_priv->size = 0;
  497. /*
  498. * The total file size is only examined when doing a partial
  499. * read; the "full read" case needs to fail if the whole
  500. * firmware was not completely loaded.
  501. */
  502. if ((fw_priv->opt_flags & FW_OPT_PARTIAL) && buffer)
  503. file_size_ptr = &file_size;
  504. /* load firmware files from the mount namespace of init */
  505. rc = kernel_read_file_from_path_initns(path, fw_priv->offset,
  506. &buffer, msize,
  507. file_size_ptr,
  508. READING_FIRMWARE);
  509. if (rc < 0) {
  510. if (rc != -ENOENT)
  511. dev_warn(device, "loading %s failed with error %d\n",
  512. path, rc);
  513. else
  514. dev_dbg(device, "loading %s failed for no such file or directory.\n",
  515. path);
  516. continue;
  517. }
  518. size = rc;
  519. rc = 0;
  520. dev_dbg(device, "Loading firmware from %s\n", path);
  521. if (decompress) {
  522. dev_dbg(device, "f/w decompressing %s\n",
  523. fw_priv->fw_name);
  524. rc = decompress(device, fw_priv, size, buffer);
  525. /* discard the superfluous original content */
  526. vfree(buffer);
  527. buffer = NULL;
  528. if (rc) {
  529. fw_free_paged_buf(fw_priv);
  530. continue;
  531. }
  532. } else {
  533. dev_dbg(device, "direct-loading %s\n",
  534. fw_priv->fw_name);
  535. if (!fw_priv->data)
  536. fw_priv->data = buffer;
  537. fw_priv->size = size;
  538. }
  539. fw_state_done(fw_priv);
  540. break;
  541. }
  542. __putname(path);
  543. return rc;
  544. }
  545. /* firmware holds the ownership of pages */
  546. static void firmware_free_data(const struct firmware *fw)
  547. {
  548. /* Loaded directly? */
  549. if (!fw->priv) {
  550. vfree(fw->data);
  551. return;
  552. }
  553. free_fw_priv(fw->priv);
  554. }
  555. /* store the pages buffer info firmware from buf */
  556. static void fw_set_page_data(struct fw_priv *fw_priv, struct firmware *fw)
  557. {
  558. fw->priv = fw_priv;
  559. fw->size = fw_priv->size;
  560. fw->data = fw_priv->data;
  561. pr_debug("%s: fw-%s fw_priv=%p data=%p size=%u\n",
  562. __func__, fw_priv->fw_name, fw_priv, fw_priv->data,
  563. (unsigned int)fw_priv->size);
  564. }
  565. #ifdef CONFIG_FW_CACHE
  566. static void fw_name_devm_release(struct device *dev, void *res)
  567. {
  568. struct fw_name_devm *fwn = res;
  569. if (fwn->magic == (unsigned long)&fw_cache)
  570. pr_debug("%s: fw_name-%s devm-%p released\n",
  571. __func__, fwn->name, res);
  572. kfree_const(fwn->name);
  573. }
  574. static int fw_devm_match(struct device *dev, void *res,
  575. void *match_data)
  576. {
  577. struct fw_name_devm *fwn = res;
  578. return (fwn->magic == (unsigned long)&fw_cache) &&
  579. !strcmp(fwn->name, match_data);
  580. }
  581. static struct fw_name_devm *fw_find_devm_name(struct device *dev,
  582. const char *name)
  583. {
  584. struct fw_name_devm *fwn;
  585. fwn = devres_find(dev, fw_name_devm_release,
  586. fw_devm_match, (void *)name);
  587. return fwn;
  588. }
  589. static bool fw_cache_is_setup(struct device *dev, const char *name)
  590. {
  591. struct fw_name_devm *fwn;
  592. fwn = fw_find_devm_name(dev, name);
  593. if (fwn)
  594. return true;
  595. return false;
  596. }
  597. /* add firmware name into devres list */
  598. static int fw_add_devm_name(struct device *dev, const char *name)
  599. {
  600. struct fw_name_devm *fwn;
  601. if (fw_cache_is_setup(dev, name))
  602. return 0;
  603. fwn = devres_alloc(fw_name_devm_release, sizeof(struct fw_name_devm),
  604. GFP_KERNEL);
  605. if (!fwn)
  606. return -ENOMEM;
  607. fwn->name = kstrdup_const(name, GFP_KERNEL);
  608. if (!fwn->name) {
  609. devres_free(fwn);
  610. return -ENOMEM;
  611. }
  612. fwn->magic = (unsigned long)&fw_cache;
  613. devres_add(dev, fwn);
  614. return 0;
  615. }
  616. #else
  617. static bool fw_cache_is_setup(struct device *dev, const char *name)
  618. {
  619. return false;
  620. }
  621. static int fw_add_devm_name(struct device *dev, const char *name)
  622. {
  623. return 0;
  624. }
  625. #endif
  626. int assign_fw(struct firmware *fw, struct device *device)
  627. {
  628. struct fw_priv *fw_priv = fw->priv;
  629. int ret;
  630. mutex_lock(&fw_lock);
  631. if (!fw_priv->size || fw_state_is_aborted(fw_priv)) {
  632. mutex_unlock(&fw_lock);
  633. return -ENOENT;
  634. }
  635. /*
  636. * add firmware name into devres list so that we can auto cache
  637. * and uncache firmware for device.
  638. *
  639. * device may has been deleted already, but the problem
  640. * should be fixed in devres or driver core.
  641. */
  642. /* don't cache firmware handled without uevent */
  643. if (device && (fw_priv->opt_flags & FW_OPT_UEVENT) &&
  644. !(fw_priv->opt_flags & FW_OPT_NOCACHE)) {
  645. ret = fw_add_devm_name(device, fw_priv->fw_name);
  646. if (ret) {
  647. mutex_unlock(&fw_lock);
  648. return ret;
  649. }
  650. }
  651. /*
  652. * After caching firmware image is started, let it piggyback
  653. * on request firmware.
  654. */
  655. if (!(fw_priv->opt_flags & FW_OPT_NOCACHE) &&
  656. fw_priv->fwc->state == FW_LOADER_START_CACHE)
  657. fw_cache_piggyback_on_request(fw_priv);
  658. /* pass the pages buffer to driver at the last minute */
  659. fw_set_page_data(fw_priv, fw);
  660. mutex_unlock(&fw_lock);
  661. return 0;
  662. }
  663. /* prepare firmware and firmware_buf structs;
  664. * return 0 if a firmware is already assigned, 1 if need to load one,
  665. * or a negative error code
  666. */
  667. static int
  668. _request_firmware_prepare(struct firmware **firmware_p, const char *name,
  669. struct device *device, void *dbuf, size_t size,
  670. size_t offset, u32 opt_flags)
  671. {
  672. struct firmware *firmware;
  673. struct fw_priv *fw_priv;
  674. int ret;
  675. *firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL);
  676. if (!firmware) {
  677. dev_err(device, "%s: kmalloc(struct firmware) failed\n",
  678. __func__);
  679. return -ENOMEM;
  680. }
  681. if (firmware_request_builtin_buf(firmware, name, dbuf, size)) {
  682. dev_dbg(device, "using built-in %s\n", name);
  683. return 0; /* assigned */
  684. }
  685. ret = alloc_lookup_fw_priv(name, &fw_cache, &fw_priv, dbuf, size,
  686. offset, opt_flags);
  687. /*
  688. * bind with 'priv' now to avoid warning in failure path
  689. * of requesting firmware.
  690. */
  691. firmware->priv = fw_priv;
  692. if (ret > 0) {
  693. ret = fw_state_wait(fw_priv);
  694. if (!ret) {
  695. fw_set_page_data(fw_priv, firmware);
  696. return 0; /* assigned */
  697. }
  698. }
  699. if (ret < 0)
  700. return ret;
  701. return 1; /* need to load */
  702. }
  703. /*
  704. * Batched requests need only one wake, we need to do this step last due to the
  705. * fallback mechanism. The buf is protected with kref_get(), and it won't be
  706. * released until the last user calls release_firmware().
  707. *
  708. * Failed batched requests are possible as well, in such cases we just share
  709. * the struct fw_priv and won't release it until all requests are woken
  710. * and have gone through this same path.
  711. */
  712. static void fw_abort_batch_reqs(struct firmware *fw)
  713. {
  714. struct fw_priv *fw_priv;
  715. /* Loaded directly? */
  716. if (!fw || !fw->priv)
  717. return;
  718. fw_priv = fw->priv;
  719. mutex_lock(&fw_lock);
  720. if (!fw_state_is_aborted(fw_priv))
  721. fw_state_aborted(fw_priv);
  722. mutex_unlock(&fw_lock);
  723. }
  724. /* called from request_firmware() and request_firmware_work_func() */
  725. static int
  726. _request_firmware(const struct firmware **firmware_p, const char *name,
  727. struct device *device, void *buf, size_t size,
  728. size_t offset, u32 opt_flags)
  729. {
  730. struct firmware *fw = NULL;
  731. struct cred *kern_cred = NULL;
  732. const struct cred *old_cred;
  733. bool nondirect = false;
  734. int ret;
  735. if (!firmware_p)
  736. return -EINVAL;
  737. if (!name || name[0] == '\0') {
  738. ret = -EINVAL;
  739. goto out;
  740. }
  741. ret = _request_firmware_prepare(&fw, name, device, buf, size,
  742. offset, opt_flags);
  743. if (ret <= 0) /* error or already assigned */
  744. goto out;
  745. /*
  746. * We are about to try to access the firmware file. Because we may have been
  747. * called by a driver when serving an unrelated request from userland, we use
  748. * the kernel credentials to read the file.
  749. */
  750. kern_cred = prepare_kernel_cred(NULL);
  751. if (!kern_cred) {
  752. ret = -ENOMEM;
  753. goto out;
  754. }
  755. old_cred = override_creds(kern_cred);
  756. ret = fw_get_filesystem_firmware(device, fw->priv, "", NULL);
  757. /* Only full reads can support decompression, platform, and sysfs. */
  758. if (!(opt_flags & FW_OPT_PARTIAL))
  759. nondirect = true;
  760. #ifdef CONFIG_FW_LOADER_COMPRESS_ZSTD
  761. if (ret == -ENOENT && nondirect)
  762. ret = fw_get_filesystem_firmware(device, fw->priv, ".zst",
  763. fw_decompress_zstd);
  764. #endif
  765. #ifdef CONFIG_FW_LOADER_COMPRESS_XZ
  766. if (ret == -ENOENT && nondirect)
  767. ret = fw_get_filesystem_firmware(device, fw->priv, ".xz",
  768. fw_decompress_xz);
  769. #endif
  770. if (ret == -ENOENT && nondirect)
  771. ret = firmware_fallback_platform(fw->priv);
  772. if (ret) {
  773. if (!(opt_flags & FW_OPT_NO_WARN))
  774. dev_warn(device,
  775. "Direct firmware load for %s failed with error %d\n",
  776. name, ret);
  777. if (nondirect)
  778. ret = firmware_fallback_sysfs(fw, name, device,
  779. opt_flags, ret);
  780. } else
  781. ret = assign_fw(fw, device);
  782. revert_creds(old_cred);
  783. put_cred(kern_cred);
  784. out:
  785. if (ret < 0) {
  786. fw_abort_batch_reqs(fw);
  787. release_firmware(fw);
  788. fw = NULL;
  789. }
  790. *firmware_p = fw;
  791. return ret;
  792. }
  793. /**
  794. * request_firmware() - send firmware request and wait for it
  795. * @firmware_p: pointer to firmware image
  796. * @name: name of firmware file
  797. * @device: device for which firmware is being loaded
  798. *
  799. * @firmware_p will be used to return a firmware image by the name
  800. * of @name for device @device.
  801. *
  802. * Should be called from user context where sleeping is allowed.
  803. *
  804. * @name will be used as $FIRMWARE in the uevent environment and
  805. * should be distinctive enough not to be confused with any other
  806. * firmware image for this or any other device.
  807. *
  808. * Caller must hold the reference count of @device.
  809. *
  810. * The function can be called safely inside device's suspend and
  811. * resume callback.
  812. **/
  813. int
  814. request_firmware(const struct firmware **firmware_p, const char *name,
  815. struct device *device)
  816. {
  817. int ret;
  818. /* Need to pin this module until return */
  819. __module_get(THIS_MODULE);
  820. ret = _request_firmware(firmware_p, name, device, NULL, 0, 0,
  821. FW_OPT_UEVENT);
  822. module_put(THIS_MODULE);
  823. return ret;
  824. }
  825. EXPORT_SYMBOL(request_firmware);
  826. /**
  827. * firmware_request_nowarn() - request for an optional fw module
  828. * @firmware: pointer to firmware image
  829. * @name: name of firmware file
  830. * @device: device for which firmware is being loaded
  831. *
  832. * This function is similar in behaviour to request_firmware(), except it
  833. * doesn't produce warning messages when the file is not found. The sysfs
  834. * fallback mechanism is enabled if direct filesystem lookup fails. However,
  835. * failures to find the firmware file with it are still suppressed. It is
  836. * therefore up to the driver to check for the return value of this call and to
  837. * decide when to inform the users of errors.
  838. **/
  839. int firmware_request_nowarn(const struct firmware **firmware, const char *name,
  840. struct device *device)
  841. {
  842. int ret;
  843. /* Need to pin this module until return */
  844. __module_get(THIS_MODULE);
  845. ret = _request_firmware(firmware, name, device, NULL, 0, 0,
  846. FW_OPT_UEVENT | FW_OPT_NO_WARN);
  847. module_put(THIS_MODULE);
  848. return ret;
  849. }
  850. EXPORT_SYMBOL_GPL(firmware_request_nowarn);
  851. /**
  852. * request_firmware_direct() - load firmware directly without usermode helper
  853. * @firmware_p: pointer to firmware image
  854. * @name: name of firmware file
  855. * @device: device for which firmware is being loaded
  856. *
  857. * This function works pretty much like request_firmware(), but this doesn't
  858. * fall back to usermode helper even if the firmware couldn't be loaded
  859. * directly from fs. Hence it's useful for loading optional firmwares, which
  860. * aren't always present, without extra long timeouts of udev.
  861. **/
  862. int request_firmware_direct(const struct firmware **firmware_p,
  863. const char *name, struct device *device)
  864. {
  865. int ret;
  866. __module_get(THIS_MODULE);
  867. ret = _request_firmware(firmware_p, name, device, NULL, 0, 0,
  868. FW_OPT_UEVENT | FW_OPT_NO_WARN |
  869. FW_OPT_NOFALLBACK_SYSFS);
  870. module_put(THIS_MODULE);
  871. return ret;
  872. }
  873. EXPORT_SYMBOL_GPL(request_firmware_direct);
  874. /**
  875. * firmware_request_platform() - request firmware with platform-fw fallback
  876. * @firmware: pointer to firmware image
  877. * @name: name of firmware file
  878. * @device: device for which firmware is being loaded
  879. *
  880. * This function is similar in behaviour to request_firmware, except that if
  881. * direct filesystem lookup fails, it will fallback to looking for a copy of the
  882. * requested firmware embedded in the platform's main (e.g. UEFI) firmware.
  883. **/
  884. int firmware_request_platform(const struct firmware **firmware,
  885. const char *name, struct device *device)
  886. {
  887. int ret;
  888. /* Need to pin this module until return */
  889. __module_get(THIS_MODULE);
  890. ret = _request_firmware(firmware, name, device, NULL, 0, 0,
  891. FW_OPT_UEVENT | FW_OPT_FALLBACK_PLATFORM);
  892. module_put(THIS_MODULE);
  893. return ret;
  894. }
  895. EXPORT_SYMBOL_GPL(firmware_request_platform);
  896. /**
  897. * firmware_request_cache() - cache firmware for suspend so resume can use it
  898. * @name: name of firmware file
  899. * @device: device for which firmware should be cached for
  900. *
  901. * There are some devices with an optimization that enables the device to not
  902. * require loading firmware on system reboot. This optimization may still
  903. * require the firmware present on resume from suspend. This routine can be
  904. * used to ensure the firmware is present on resume from suspend in these
  905. * situations. This helper is not compatible with drivers which use
  906. * request_firmware_into_buf() or request_firmware_nowait() with no uevent set.
  907. **/
  908. int firmware_request_cache(struct device *device, const char *name)
  909. {
  910. int ret;
  911. mutex_lock(&fw_lock);
  912. ret = fw_add_devm_name(device, name);
  913. mutex_unlock(&fw_lock);
  914. return ret;
  915. }
  916. EXPORT_SYMBOL_GPL(firmware_request_cache);
  917. /**
  918. * request_firmware_into_buf() - load firmware into a previously allocated buffer
  919. * @firmware_p: pointer to firmware image
  920. * @name: name of firmware file
  921. * @device: device for which firmware is being loaded and DMA region allocated
  922. * @buf: address of buffer to load firmware into
  923. * @size: size of buffer
  924. *
  925. * This function works pretty much like request_firmware(), but it doesn't
  926. * allocate a buffer to hold the firmware data. Instead, the firmware
  927. * is loaded directly into the buffer pointed to by @buf and the @firmware_p
  928. * data member is pointed at @buf.
  929. *
  930. * This function doesn't cache firmware either.
  931. */
  932. int
  933. request_firmware_into_buf(const struct firmware **firmware_p, const char *name,
  934. struct device *device, void *buf, size_t size)
  935. {
  936. int ret;
  937. if (fw_cache_is_setup(device, name))
  938. return -EOPNOTSUPP;
  939. __module_get(THIS_MODULE);
  940. ret = _request_firmware(firmware_p, name, device, buf, size, 0,
  941. FW_OPT_UEVENT | FW_OPT_NOCACHE);
  942. module_put(THIS_MODULE);
  943. return ret;
  944. }
  945. EXPORT_SYMBOL(request_firmware_into_buf);
  946. /**
  947. * request_partial_firmware_into_buf() - load partial firmware into a previously allocated buffer
  948. * @firmware_p: pointer to firmware image
  949. * @name: name of firmware file
  950. * @device: device for which firmware is being loaded and DMA region allocated
  951. * @buf: address of buffer to load firmware into
  952. * @size: size of buffer
  953. * @offset: offset into file to read
  954. *
  955. * This function works pretty much like request_firmware_into_buf except
  956. * it allows a partial read of the file.
  957. */
  958. int
  959. request_partial_firmware_into_buf(const struct firmware **firmware_p,
  960. const char *name, struct device *device,
  961. void *buf, size_t size, size_t offset)
  962. {
  963. int ret;
  964. if (fw_cache_is_setup(device, name))
  965. return -EOPNOTSUPP;
  966. __module_get(THIS_MODULE);
  967. ret = _request_firmware(firmware_p, name, device, buf, size, offset,
  968. FW_OPT_UEVENT | FW_OPT_NOCACHE |
  969. FW_OPT_PARTIAL);
  970. module_put(THIS_MODULE);
  971. return ret;
  972. }
  973. EXPORT_SYMBOL(request_partial_firmware_into_buf);
  974. /**
  975. * release_firmware() - release the resource associated with a firmware image
  976. * @fw: firmware resource to release
  977. **/
  978. void release_firmware(const struct firmware *fw)
  979. {
  980. if (fw) {
  981. if (!firmware_is_builtin(fw))
  982. firmware_free_data(fw);
  983. kfree(fw);
  984. }
  985. }
  986. EXPORT_SYMBOL(release_firmware);
  987. /* Async support */
  988. struct firmware_work {
  989. struct work_struct work;
  990. struct module *module;
  991. const char *name;
  992. struct device *device;
  993. void *context;
  994. void (*cont)(const struct firmware *fw, void *context);
  995. u32 opt_flags;
  996. };
  997. static void request_firmware_work_func(struct work_struct *work)
  998. {
  999. struct firmware_work *fw_work;
  1000. const struct firmware *fw;
  1001. fw_work = container_of(work, struct firmware_work, work);
  1002. _request_firmware(&fw, fw_work->name, fw_work->device, NULL, 0, 0,
  1003. fw_work->opt_flags);
  1004. fw_work->cont(fw, fw_work->context);
  1005. put_device(fw_work->device); /* taken in request_firmware_nowait() */
  1006. module_put(fw_work->module);
  1007. kfree_const(fw_work->name);
  1008. kfree(fw_work);
  1009. }
  1010. /**
  1011. * request_firmware_nowait() - asynchronous version of request_firmware
  1012. * @module: module requesting the firmware
  1013. * @uevent: sends uevent to copy the firmware image if this flag
  1014. * is non-zero else the firmware copy must be done manually.
  1015. * @name: name of firmware file
  1016. * @device: device for which firmware is being loaded
  1017. * @gfp: allocation flags
  1018. * @context: will be passed over to @cont, and
  1019. * @fw may be %NULL if firmware request fails.
  1020. * @cont: function will be called asynchronously when the firmware
  1021. * request is over.
  1022. *
  1023. * Caller must hold the reference count of @device.
  1024. *
  1025. * Asynchronous variant of request_firmware() for user contexts:
  1026. * - sleep for as small periods as possible since it may
  1027. * increase kernel boot time of built-in device drivers
  1028. * requesting firmware in their ->probe() methods, if
  1029. * @gfp is GFP_KERNEL.
  1030. *
  1031. * - can't sleep at all if @gfp is GFP_ATOMIC.
  1032. **/
  1033. int
  1034. request_firmware_nowait(
  1035. struct module *module, bool uevent,
  1036. const char *name, struct device *device, gfp_t gfp, void *context,
  1037. void (*cont)(const struct firmware *fw, void *context))
  1038. {
  1039. struct firmware_work *fw_work;
  1040. fw_work = kzalloc(sizeof(struct firmware_work), gfp);
  1041. if (!fw_work)
  1042. return -ENOMEM;
  1043. fw_work->module = module;
  1044. fw_work->name = kstrdup_const(name, gfp);
  1045. if (!fw_work->name) {
  1046. kfree(fw_work);
  1047. return -ENOMEM;
  1048. }
  1049. fw_work->device = device;
  1050. fw_work->context = context;
  1051. fw_work->cont = cont;
  1052. fw_work->opt_flags = FW_OPT_NOWAIT |
  1053. (uevent ? FW_OPT_UEVENT : FW_OPT_USERHELPER);
  1054. if (!uevent && fw_cache_is_setup(device, name)) {
  1055. kfree_const(fw_work->name);
  1056. kfree(fw_work);
  1057. return -EOPNOTSUPP;
  1058. }
  1059. if (!try_module_get(module)) {
  1060. kfree_const(fw_work->name);
  1061. kfree(fw_work);
  1062. return -EFAULT;
  1063. }
  1064. get_device(fw_work->device);
  1065. INIT_WORK(&fw_work->work, request_firmware_work_func);
  1066. schedule_work(&fw_work->work);
  1067. return 0;
  1068. }
  1069. EXPORT_SYMBOL(request_firmware_nowait);
  1070. #ifdef CONFIG_FW_CACHE
  1071. static ASYNC_DOMAIN_EXCLUSIVE(fw_cache_domain);
  1072. /**
  1073. * cache_firmware() - cache one firmware image in kernel memory space
  1074. * @fw_name: the firmware image name
  1075. *
  1076. * Cache firmware in kernel memory so that drivers can use it when
  1077. * system isn't ready for them to request firmware image from userspace.
  1078. * Once it returns successfully, driver can use request_firmware or its
  1079. * nowait version to get the cached firmware without any interacting
  1080. * with userspace
  1081. *
  1082. * Return 0 if the firmware image has been cached successfully
  1083. * Return !0 otherwise
  1084. *
  1085. */
  1086. static int cache_firmware(const char *fw_name)
  1087. {
  1088. int ret;
  1089. const struct firmware *fw;
  1090. pr_debug("%s: %s\n", __func__, fw_name);
  1091. ret = request_firmware(&fw, fw_name, NULL);
  1092. if (!ret)
  1093. kfree(fw);
  1094. pr_debug("%s: %s ret=%d\n", __func__, fw_name, ret);
  1095. return ret;
  1096. }
  1097. static struct fw_priv *lookup_fw_priv(const char *fw_name)
  1098. {
  1099. struct fw_priv *tmp;
  1100. struct firmware_cache *fwc = &fw_cache;
  1101. spin_lock(&fwc->lock);
  1102. tmp = __lookup_fw_priv(fw_name);
  1103. spin_unlock(&fwc->lock);
  1104. return tmp;
  1105. }
  1106. /**
  1107. * uncache_firmware() - remove one cached firmware image
  1108. * @fw_name: the firmware image name
  1109. *
  1110. * Uncache one firmware image which has been cached successfully
  1111. * before.
  1112. *
  1113. * Return 0 if the firmware cache has been removed successfully
  1114. * Return !0 otherwise
  1115. *
  1116. */
  1117. static int uncache_firmware(const char *fw_name)
  1118. {
  1119. struct fw_priv *fw_priv;
  1120. struct firmware fw;
  1121. pr_debug("%s: %s\n", __func__, fw_name);
  1122. if (firmware_request_builtin(&fw, fw_name))
  1123. return 0;
  1124. fw_priv = lookup_fw_priv(fw_name);
  1125. if (fw_priv) {
  1126. free_fw_priv(fw_priv);
  1127. return 0;
  1128. }
  1129. return -EINVAL;
  1130. }
  1131. static struct fw_cache_entry *alloc_fw_cache_entry(const char *name)
  1132. {
  1133. struct fw_cache_entry *fce;
  1134. fce = kzalloc(sizeof(*fce), GFP_ATOMIC);
  1135. if (!fce)
  1136. goto exit;
  1137. fce->name = kstrdup_const(name, GFP_ATOMIC);
  1138. if (!fce->name) {
  1139. kfree(fce);
  1140. fce = NULL;
  1141. goto exit;
  1142. }
  1143. exit:
  1144. return fce;
  1145. }
  1146. static int __fw_entry_found(const char *name)
  1147. {
  1148. struct firmware_cache *fwc = &fw_cache;
  1149. struct fw_cache_entry *fce;
  1150. list_for_each_entry(fce, &fwc->fw_names, list) {
  1151. if (!strcmp(fce->name, name))
  1152. return 1;
  1153. }
  1154. return 0;
  1155. }
  1156. static void fw_cache_piggyback_on_request(struct fw_priv *fw_priv)
  1157. {
  1158. const char *name = fw_priv->fw_name;
  1159. struct firmware_cache *fwc = fw_priv->fwc;
  1160. struct fw_cache_entry *fce;
  1161. spin_lock(&fwc->name_lock);
  1162. if (__fw_entry_found(name))
  1163. goto found;
  1164. fce = alloc_fw_cache_entry(name);
  1165. if (fce) {
  1166. list_add(&fce->list, &fwc->fw_names);
  1167. kref_get(&fw_priv->ref);
  1168. pr_debug("%s: fw: %s\n", __func__, name);
  1169. }
  1170. found:
  1171. spin_unlock(&fwc->name_lock);
  1172. }
  1173. static void free_fw_cache_entry(struct fw_cache_entry *fce)
  1174. {
  1175. kfree_const(fce->name);
  1176. kfree(fce);
  1177. }
  1178. static void __async_dev_cache_fw_image(void *fw_entry,
  1179. async_cookie_t cookie)
  1180. {
  1181. struct fw_cache_entry *fce = fw_entry;
  1182. struct firmware_cache *fwc = &fw_cache;
  1183. int ret;
  1184. ret = cache_firmware(fce->name);
  1185. if (ret) {
  1186. spin_lock(&fwc->name_lock);
  1187. list_del(&fce->list);
  1188. spin_unlock(&fwc->name_lock);
  1189. free_fw_cache_entry(fce);
  1190. }
  1191. }
  1192. /* called with dev->devres_lock held */
  1193. static void dev_create_fw_entry(struct device *dev, void *res,
  1194. void *data)
  1195. {
  1196. struct fw_name_devm *fwn = res;
  1197. const char *fw_name = fwn->name;
  1198. struct list_head *head = data;
  1199. struct fw_cache_entry *fce;
  1200. fce = alloc_fw_cache_entry(fw_name);
  1201. if (fce)
  1202. list_add(&fce->list, head);
  1203. }
  1204. static int devm_name_match(struct device *dev, void *res,
  1205. void *match_data)
  1206. {
  1207. struct fw_name_devm *fwn = res;
  1208. return (fwn->magic == (unsigned long)match_data);
  1209. }
  1210. static void dev_cache_fw_image(struct device *dev, void *data)
  1211. {
  1212. LIST_HEAD(todo);
  1213. struct fw_cache_entry *fce;
  1214. struct fw_cache_entry *fce_next;
  1215. struct firmware_cache *fwc = &fw_cache;
  1216. devres_for_each_res(dev, fw_name_devm_release,
  1217. devm_name_match, &fw_cache,
  1218. dev_create_fw_entry, &todo);
  1219. list_for_each_entry_safe(fce, fce_next, &todo, list) {
  1220. list_del(&fce->list);
  1221. spin_lock(&fwc->name_lock);
  1222. /* only one cache entry for one firmware */
  1223. if (!__fw_entry_found(fce->name)) {
  1224. list_add(&fce->list, &fwc->fw_names);
  1225. } else {
  1226. free_fw_cache_entry(fce);
  1227. fce = NULL;
  1228. }
  1229. spin_unlock(&fwc->name_lock);
  1230. if (fce)
  1231. async_schedule_domain(__async_dev_cache_fw_image,
  1232. (void *)fce,
  1233. &fw_cache_domain);
  1234. }
  1235. }
  1236. static void __device_uncache_fw_images(void)
  1237. {
  1238. struct firmware_cache *fwc = &fw_cache;
  1239. struct fw_cache_entry *fce;
  1240. spin_lock(&fwc->name_lock);
  1241. while (!list_empty(&fwc->fw_names)) {
  1242. fce = list_entry(fwc->fw_names.next,
  1243. struct fw_cache_entry, list);
  1244. list_del(&fce->list);
  1245. spin_unlock(&fwc->name_lock);
  1246. uncache_firmware(fce->name);
  1247. free_fw_cache_entry(fce);
  1248. spin_lock(&fwc->name_lock);
  1249. }
  1250. spin_unlock(&fwc->name_lock);
  1251. }
  1252. /**
  1253. * device_cache_fw_images() - cache devices' firmware
  1254. *
  1255. * If one device called request_firmware or its nowait version
  1256. * successfully before, the firmware names are recored into the
  1257. * device's devres link list, so device_cache_fw_images can call
  1258. * cache_firmware() to cache these firmwares for the device,
  1259. * then the device driver can load its firmwares easily at
  1260. * time when system is not ready to complete loading firmware.
  1261. */
  1262. static void device_cache_fw_images(void)
  1263. {
  1264. struct firmware_cache *fwc = &fw_cache;
  1265. DEFINE_WAIT(wait);
  1266. pr_debug("%s\n", __func__);
  1267. /* cancel uncache work */
  1268. cancel_delayed_work_sync(&fwc->work);
  1269. fw_fallback_set_cache_timeout();
  1270. mutex_lock(&fw_lock);
  1271. fwc->state = FW_LOADER_START_CACHE;
  1272. dpm_for_each_dev(NULL, dev_cache_fw_image);
  1273. mutex_unlock(&fw_lock);
  1274. /* wait for completion of caching firmware for all devices */
  1275. async_synchronize_full_domain(&fw_cache_domain);
  1276. fw_fallback_set_default_timeout();
  1277. }
  1278. /**
  1279. * device_uncache_fw_images() - uncache devices' firmware
  1280. *
  1281. * uncache all firmwares which have been cached successfully
  1282. * by device_uncache_fw_images earlier
  1283. */
  1284. static void device_uncache_fw_images(void)
  1285. {
  1286. pr_debug("%s\n", __func__);
  1287. __device_uncache_fw_images();
  1288. }
  1289. static void device_uncache_fw_images_work(struct work_struct *work)
  1290. {
  1291. device_uncache_fw_images();
  1292. }
  1293. /**
  1294. * device_uncache_fw_images_delay() - uncache devices firmwares
  1295. * @delay: number of milliseconds to delay uncache device firmwares
  1296. *
  1297. * uncache all devices's firmwares which has been cached successfully
  1298. * by device_cache_fw_images after @delay milliseconds.
  1299. */
  1300. static void device_uncache_fw_images_delay(unsigned long delay)
  1301. {
  1302. queue_delayed_work(system_power_efficient_wq, &fw_cache.work,
  1303. msecs_to_jiffies(delay));
  1304. }
  1305. static int fw_pm_notify(struct notifier_block *notify_block,
  1306. unsigned long mode, void *unused)
  1307. {
  1308. switch (mode) {
  1309. case PM_HIBERNATION_PREPARE:
  1310. case PM_SUSPEND_PREPARE:
  1311. case PM_RESTORE_PREPARE:
  1312. /*
  1313. * kill pending fallback requests with a custom fallback
  1314. * to avoid stalling suspend.
  1315. */
  1316. kill_pending_fw_fallback_reqs(true);
  1317. device_cache_fw_images();
  1318. break;
  1319. case PM_POST_SUSPEND:
  1320. case PM_POST_HIBERNATION:
  1321. case PM_POST_RESTORE:
  1322. /*
  1323. * In case that system sleep failed and syscore_suspend is
  1324. * not called.
  1325. */
  1326. mutex_lock(&fw_lock);
  1327. fw_cache.state = FW_LOADER_NO_CACHE;
  1328. mutex_unlock(&fw_lock);
  1329. device_uncache_fw_images_delay(10 * MSEC_PER_SEC);
  1330. break;
  1331. }
  1332. return 0;
  1333. }
  1334. /* stop caching firmware once syscore_suspend is reached */
  1335. static int fw_suspend(void)
  1336. {
  1337. fw_cache.state = FW_LOADER_NO_CACHE;
  1338. return 0;
  1339. }
  1340. static struct syscore_ops fw_syscore_ops = {
  1341. .suspend = fw_suspend,
  1342. };
  1343. static int __init register_fw_pm_ops(void)
  1344. {
  1345. int ret;
  1346. spin_lock_init(&fw_cache.name_lock);
  1347. INIT_LIST_HEAD(&fw_cache.fw_names);
  1348. INIT_DELAYED_WORK(&fw_cache.work,
  1349. device_uncache_fw_images_work);
  1350. fw_cache.pm_notify.notifier_call = fw_pm_notify;
  1351. ret = register_pm_notifier(&fw_cache.pm_notify);
  1352. if (ret)
  1353. return ret;
  1354. register_syscore_ops(&fw_syscore_ops);
  1355. return ret;
  1356. }
  1357. static inline void unregister_fw_pm_ops(void)
  1358. {
  1359. unregister_syscore_ops(&fw_syscore_ops);
  1360. unregister_pm_notifier(&fw_cache.pm_notify);
  1361. }
  1362. #else
  1363. static void fw_cache_piggyback_on_request(struct fw_priv *fw_priv)
  1364. {
  1365. }
  1366. static inline int register_fw_pm_ops(void)
  1367. {
  1368. return 0;
  1369. }
  1370. static inline void unregister_fw_pm_ops(void)
  1371. {
  1372. }
  1373. #endif
  1374. static void __init fw_cache_init(void)
  1375. {
  1376. spin_lock_init(&fw_cache.lock);
  1377. INIT_LIST_HEAD(&fw_cache.head);
  1378. fw_cache.state = FW_LOADER_NO_CACHE;
  1379. }
  1380. static int fw_shutdown_notify(struct notifier_block *unused1,
  1381. unsigned long unused2, void *unused3)
  1382. {
  1383. /*
  1384. * Kill all pending fallback requests to avoid both stalling shutdown,
  1385. * and avoid a deadlock with the usermode_lock.
  1386. */
  1387. kill_pending_fw_fallback_reqs(false);
  1388. return NOTIFY_DONE;
  1389. }
  1390. static struct notifier_block fw_shutdown_nb = {
  1391. .notifier_call = fw_shutdown_notify,
  1392. };
  1393. static int __init firmware_class_init(void)
  1394. {
  1395. int ret;
  1396. /* No need to unfold these on exit */
  1397. fw_cache_init();
  1398. ret = register_fw_pm_ops();
  1399. if (ret)
  1400. return ret;
  1401. ret = register_reboot_notifier(&fw_shutdown_nb);
  1402. if (ret)
  1403. goto out;
  1404. return register_sysfs_loader();
  1405. out:
  1406. unregister_fw_pm_ops();
  1407. return ret;
  1408. }
  1409. static void __exit firmware_class_exit(void)
  1410. {
  1411. unregister_fw_pm_ops();
  1412. unregister_reboot_notifier(&fw_shutdown_nb);
  1413. unregister_sysfs_loader();
  1414. }
  1415. fs_initcall(firmware_class_init);
  1416. module_exit(firmware_class_exit);