test_firmware.c 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * This module provides an interface to trigger and test firmware loading.
  4. *
  5. * It is designed to be used for basic evaluation of the firmware loading
  6. * subsystem (for example when validating firmware verification). It lacks
  7. * any extra dependencies, and will not normally be loaded by the system
  8. * unless explicitly requested by name.
  9. */
  10. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  11. #include <linux/init.h>
  12. #include <linux/module.h>
  13. #include <linux/printk.h>
  14. #include <linux/completion.h>
  15. #include <linux/firmware.h>
  16. #include <linux/device.h>
  17. #include <linux/fs.h>
  18. #include <linux/miscdevice.h>
  19. #include <linux/sizes.h>
  20. #include <linux/slab.h>
  21. #include <linux/uaccess.h>
  22. #include <linux/delay.h>
  23. #include <linux/kstrtox.h>
  24. #include <linux/kthread.h>
  25. #include <linux/vmalloc.h>
  26. #include <linux/efi_embedded_fw.h>
  27. MODULE_IMPORT_NS(TEST_FIRMWARE);
  28. #define TEST_FIRMWARE_NAME "test-firmware.bin"
  29. #define TEST_FIRMWARE_NUM_REQS 4
  30. #define TEST_FIRMWARE_BUF_SIZE SZ_1K
  31. #define TEST_UPLOAD_MAX_SIZE SZ_2K
  32. #define TEST_UPLOAD_BLK_SIZE 37 /* Avoid powers of two in testing */
  33. static DEFINE_MUTEX(test_fw_mutex);
  34. static const struct firmware *test_firmware;
  35. static LIST_HEAD(test_upload_list);
  36. struct test_batched_req {
  37. u8 idx;
  38. int rc;
  39. bool sent;
  40. const struct firmware *fw;
  41. const char *name;
  42. const char *fw_buf;
  43. struct completion completion;
  44. struct task_struct *task;
  45. struct device *dev;
  46. };
  47. /**
  48. * test_config - represents configuration for the test for different triggers
  49. *
  50. * @name: the name of the firmware file to look for
  51. * @into_buf: when the into_buf is used if this is true
  52. * request_firmware_into_buf() will be used instead.
  53. * @buf_size: size of buf to allocate when into_buf is true
  54. * @file_offset: file offset to request when calling request_firmware_into_buf
  55. * @partial: partial read opt when calling request_firmware_into_buf
  56. * @sync_direct: when the sync trigger is used if this is true
  57. * request_firmware_direct() will be used instead.
  58. * @send_uevent: whether or not to send a uevent for async requests
  59. * @num_requests: number of requests to try per test case. This is trigger
  60. * specific.
  61. * @reqs: stores all requests information
  62. * @read_fw_idx: index of thread from which we want to read firmware results
  63. * from through the read_fw trigger.
  64. * @upload_name: firmware name to be used with upload_read sysfs node
  65. * @test_result: a test may use this to collect the result from the call
  66. * of the request_firmware*() calls used in their tests. In order of
  67. * priority we always keep first any setup error. If no setup errors were
  68. * found then we move on to the first error encountered while running the
  69. * API. Note that for async calls this typically will be a successful
  70. * result (0) unless of course you've used bogus parameters, or the system
  71. * is out of memory. In the async case the callback is expected to do a
  72. * bit more homework to figure out what happened, unfortunately the only
  73. * information passed today on error is the fact that no firmware was
  74. * found so we can only assume -ENOENT on async calls if the firmware is
  75. * NULL.
  76. *
  77. * Errors you can expect:
  78. *
  79. * API specific:
  80. *
  81. * 0: success for sync, for async it means request was sent
  82. * -EINVAL: invalid parameters or request
  83. * -ENOENT: files not found
  84. *
  85. * System environment:
  86. *
  87. * -ENOMEM: memory pressure on system
  88. * -ENODEV: out of number of devices to test
  89. * -EINVAL: an unexpected error has occurred
  90. * @req_firmware: if @sync_direct is true this is set to
  91. * request_firmware_direct(), otherwise request_firmware()
  92. */
  93. struct test_config {
  94. char *name;
  95. bool into_buf;
  96. size_t buf_size;
  97. size_t file_offset;
  98. bool partial;
  99. bool sync_direct;
  100. bool send_uevent;
  101. u8 num_requests;
  102. u8 read_fw_idx;
  103. char *upload_name;
  104. /*
  105. * These below don't belong her but we'll move them once we create
  106. * a struct fw_test_device and stuff the misc_dev under there later.
  107. */
  108. struct test_batched_req *reqs;
  109. int test_result;
  110. int (*req_firmware)(const struct firmware **fw, const char *name,
  111. struct device *device);
  112. };
  113. struct upload_inject_err {
  114. const char *prog;
  115. enum fw_upload_err err_code;
  116. };
  117. struct test_firmware_upload {
  118. char *name;
  119. struct list_head node;
  120. char *buf;
  121. size_t size;
  122. bool cancel_request;
  123. struct upload_inject_err inject;
  124. struct fw_upload *fwl;
  125. };
  126. static struct test_config *test_fw_config;
  127. static struct test_firmware_upload *upload_lookup_name(const char *name)
  128. {
  129. struct test_firmware_upload *tst;
  130. list_for_each_entry(tst, &test_upload_list, node)
  131. if (strncmp(name, tst->name, strlen(tst->name)) == 0)
  132. return tst;
  133. return NULL;
  134. }
  135. static ssize_t test_fw_misc_read(struct file *f, char __user *buf,
  136. size_t size, loff_t *offset)
  137. {
  138. ssize_t rc = 0;
  139. mutex_lock(&test_fw_mutex);
  140. if (test_firmware)
  141. rc = simple_read_from_buffer(buf, size, offset,
  142. test_firmware->data,
  143. test_firmware->size);
  144. mutex_unlock(&test_fw_mutex);
  145. return rc;
  146. }
  147. static const struct file_operations test_fw_fops = {
  148. .owner = THIS_MODULE,
  149. .read = test_fw_misc_read,
  150. };
  151. static void __test_release_all_firmware(void)
  152. {
  153. struct test_batched_req *req;
  154. u8 i;
  155. if (!test_fw_config->reqs)
  156. return;
  157. for (i = 0; i < test_fw_config->num_requests; i++) {
  158. req = &test_fw_config->reqs[i];
  159. if (req->fw) {
  160. if (req->fw_buf) {
  161. kfree_const(req->fw_buf);
  162. req->fw_buf = NULL;
  163. }
  164. release_firmware(req->fw);
  165. req->fw = NULL;
  166. }
  167. }
  168. vfree(test_fw_config->reqs);
  169. test_fw_config->reqs = NULL;
  170. }
  171. static void test_release_all_firmware(void)
  172. {
  173. mutex_lock(&test_fw_mutex);
  174. __test_release_all_firmware();
  175. mutex_unlock(&test_fw_mutex);
  176. }
  177. static void __test_firmware_config_free(void)
  178. {
  179. __test_release_all_firmware();
  180. kfree_const(test_fw_config->name);
  181. test_fw_config->name = NULL;
  182. }
  183. /*
  184. * XXX: move to kstrncpy() once merged.
  185. *
  186. * Users should use kfree_const() when freeing these.
  187. */
  188. static int __kstrncpy(char **dst, const char *name, size_t count, gfp_t gfp)
  189. {
  190. *dst = kstrndup(name, count, gfp);
  191. if (!*dst)
  192. return -ENOMEM;
  193. return count;
  194. }
  195. static int __test_firmware_config_init(void)
  196. {
  197. int ret;
  198. ret = __kstrncpy(&test_fw_config->name, TEST_FIRMWARE_NAME,
  199. strlen(TEST_FIRMWARE_NAME), GFP_KERNEL);
  200. if (ret < 0)
  201. goto out;
  202. test_fw_config->num_requests = TEST_FIRMWARE_NUM_REQS;
  203. test_fw_config->send_uevent = true;
  204. test_fw_config->into_buf = false;
  205. test_fw_config->buf_size = TEST_FIRMWARE_BUF_SIZE;
  206. test_fw_config->file_offset = 0;
  207. test_fw_config->partial = false;
  208. test_fw_config->sync_direct = false;
  209. test_fw_config->req_firmware = request_firmware;
  210. test_fw_config->test_result = 0;
  211. test_fw_config->reqs = NULL;
  212. test_fw_config->upload_name = NULL;
  213. return 0;
  214. out:
  215. __test_firmware_config_free();
  216. return ret;
  217. }
  218. static ssize_t reset_store(struct device *dev,
  219. struct device_attribute *attr,
  220. const char *buf, size_t count)
  221. {
  222. int ret;
  223. mutex_lock(&test_fw_mutex);
  224. __test_firmware_config_free();
  225. ret = __test_firmware_config_init();
  226. if (ret < 0) {
  227. ret = -ENOMEM;
  228. pr_err("could not alloc settings for config trigger: %d\n",
  229. ret);
  230. goto out;
  231. }
  232. pr_info("reset\n");
  233. ret = count;
  234. out:
  235. mutex_unlock(&test_fw_mutex);
  236. return ret;
  237. }
  238. static DEVICE_ATTR_WO(reset);
  239. static ssize_t config_show(struct device *dev,
  240. struct device_attribute *attr,
  241. char *buf)
  242. {
  243. int len = 0;
  244. mutex_lock(&test_fw_mutex);
  245. len += scnprintf(buf, PAGE_SIZE - len,
  246. "Custom trigger configuration for: %s\n",
  247. dev_name(dev));
  248. if (test_fw_config->name)
  249. len += scnprintf(buf + len, PAGE_SIZE - len,
  250. "name:\t%s\n",
  251. test_fw_config->name);
  252. else
  253. len += scnprintf(buf + len, PAGE_SIZE - len,
  254. "name:\tEMTPY\n");
  255. len += scnprintf(buf + len, PAGE_SIZE - len,
  256. "num_requests:\t%u\n", test_fw_config->num_requests);
  257. len += scnprintf(buf + len, PAGE_SIZE - len,
  258. "send_uevent:\t\t%s\n",
  259. test_fw_config->send_uevent ?
  260. "FW_ACTION_UEVENT" :
  261. "FW_ACTION_NOUEVENT");
  262. len += scnprintf(buf + len, PAGE_SIZE - len,
  263. "into_buf:\t\t%s\n",
  264. test_fw_config->into_buf ? "true" : "false");
  265. len += scnprintf(buf + len, PAGE_SIZE - len,
  266. "buf_size:\t%zu\n", test_fw_config->buf_size);
  267. len += scnprintf(buf + len, PAGE_SIZE - len,
  268. "file_offset:\t%zu\n", test_fw_config->file_offset);
  269. len += scnprintf(buf + len, PAGE_SIZE - len,
  270. "partial:\t\t%s\n",
  271. test_fw_config->partial ? "true" : "false");
  272. len += scnprintf(buf + len, PAGE_SIZE - len,
  273. "sync_direct:\t\t%s\n",
  274. test_fw_config->sync_direct ? "true" : "false");
  275. len += scnprintf(buf + len, PAGE_SIZE - len,
  276. "read_fw_idx:\t%u\n", test_fw_config->read_fw_idx);
  277. if (test_fw_config->upload_name)
  278. len += scnprintf(buf + len, PAGE_SIZE - len,
  279. "upload_name:\t%s\n",
  280. test_fw_config->upload_name);
  281. else
  282. len += scnprintf(buf + len, PAGE_SIZE - len,
  283. "upload_name:\tEMTPY\n");
  284. mutex_unlock(&test_fw_mutex);
  285. return len;
  286. }
  287. static DEVICE_ATTR_RO(config);
  288. static ssize_t config_name_store(struct device *dev,
  289. struct device_attribute *attr,
  290. const char *buf, size_t count)
  291. {
  292. int ret;
  293. mutex_lock(&test_fw_mutex);
  294. kfree_const(test_fw_config->name);
  295. ret = __kstrncpy(&test_fw_config->name, buf, count, GFP_KERNEL);
  296. mutex_unlock(&test_fw_mutex);
  297. return ret;
  298. }
  299. /*
  300. * As per sysfs_kf_seq_show() the buf is max PAGE_SIZE.
  301. */
  302. static ssize_t config_test_show_str(char *dst,
  303. char *src)
  304. {
  305. int len;
  306. mutex_lock(&test_fw_mutex);
  307. len = snprintf(dst, PAGE_SIZE, "%s\n", src);
  308. mutex_unlock(&test_fw_mutex);
  309. return len;
  310. }
  311. static inline int __test_dev_config_update_bool(const char *buf, size_t size,
  312. bool *cfg)
  313. {
  314. int ret;
  315. if (kstrtobool(buf, cfg) < 0)
  316. ret = -EINVAL;
  317. else
  318. ret = size;
  319. return ret;
  320. }
  321. static int test_dev_config_update_bool(const char *buf, size_t size,
  322. bool *cfg)
  323. {
  324. int ret;
  325. mutex_lock(&test_fw_mutex);
  326. ret = __test_dev_config_update_bool(buf, size, cfg);
  327. mutex_unlock(&test_fw_mutex);
  328. return ret;
  329. }
  330. static ssize_t test_dev_config_show_bool(char *buf, bool val)
  331. {
  332. return snprintf(buf, PAGE_SIZE, "%d\n", val);
  333. }
  334. static int __test_dev_config_update_size_t(
  335. const char *buf,
  336. size_t size,
  337. size_t *cfg)
  338. {
  339. int ret;
  340. long new;
  341. ret = kstrtol(buf, 10, &new);
  342. if (ret)
  343. return ret;
  344. *(size_t *)cfg = new;
  345. /* Always return full write size even if we didn't consume all */
  346. return size;
  347. }
  348. static ssize_t test_dev_config_show_size_t(char *buf, size_t val)
  349. {
  350. return snprintf(buf, PAGE_SIZE, "%zu\n", val);
  351. }
  352. static ssize_t test_dev_config_show_int(char *buf, int val)
  353. {
  354. return snprintf(buf, PAGE_SIZE, "%d\n", val);
  355. }
  356. static int __test_dev_config_update_u8(const char *buf, size_t size, u8 *cfg)
  357. {
  358. u8 val;
  359. int ret;
  360. ret = kstrtou8(buf, 10, &val);
  361. if (ret)
  362. return ret;
  363. *(u8 *)cfg = val;
  364. /* Always return full write size even if we didn't consume all */
  365. return size;
  366. }
  367. static int test_dev_config_update_u8(const char *buf, size_t size, u8 *cfg)
  368. {
  369. int ret;
  370. mutex_lock(&test_fw_mutex);
  371. ret = __test_dev_config_update_u8(buf, size, cfg);
  372. mutex_unlock(&test_fw_mutex);
  373. return ret;
  374. }
  375. static ssize_t test_dev_config_show_u8(char *buf, u8 val)
  376. {
  377. return snprintf(buf, PAGE_SIZE, "%u\n", val);
  378. }
  379. static ssize_t config_name_show(struct device *dev,
  380. struct device_attribute *attr,
  381. char *buf)
  382. {
  383. return config_test_show_str(buf, test_fw_config->name);
  384. }
  385. static DEVICE_ATTR_RW(config_name);
  386. static ssize_t config_upload_name_store(struct device *dev,
  387. struct device_attribute *attr,
  388. const char *buf, size_t count)
  389. {
  390. struct test_firmware_upload *tst;
  391. int ret = count;
  392. mutex_lock(&test_fw_mutex);
  393. tst = upload_lookup_name(buf);
  394. if (tst)
  395. test_fw_config->upload_name = tst->name;
  396. else
  397. ret = -EINVAL;
  398. mutex_unlock(&test_fw_mutex);
  399. return ret;
  400. }
  401. static ssize_t config_upload_name_show(struct device *dev,
  402. struct device_attribute *attr,
  403. char *buf)
  404. {
  405. return config_test_show_str(buf, test_fw_config->upload_name);
  406. }
  407. static DEVICE_ATTR_RW(config_upload_name);
  408. static ssize_t config_num_requests_store(struct device *dev,
  409. struct device_attribute *attr,
  410. const char *buf, size_t count)
  411. {
  412. int rc;
  413. mutex_lock(&test_fw_mutex);
  414. if (test_fw_config->reqs) {
  415. pr_err("Must call release_all_firmware prior to changing config\n");
  416. rc = -EINVAL;
  417. mutex_unlock(&test_fw_mutex);
  418. goto out;
  419. }
  420. rc = __test_dev_config_update_u8(buf, count,
  421. &test_fw_config->num_requests);
  422. mutex_unlock(&test_fw_mutex);
  423. out:
  424. return rc;
  425. }
  426. static ssize_t config_num_requests_show(struct device *dev,
  427. struct device_attribute *attr,
  428. char *buf)
  429. {
  430. return test_dev_config_show_u8(buf, test_fw_config->num_requests);
  431. }
  432. static DEVICE_ATTR_RW(config_num_requests);
  433. static ssize_t config_into_buf_store(struct device *dev,
  434. struct device_attribute *attr,
  435. const char *buf, size_t count)
  436. {
  437. return test_dev_config_update_bool(buf,
  438. count,
  439. &test_fw_config->into_buf);
  440. }
  441. static ssize_t config_into_buf_show(struct device *dev,
  442. struct device_attribute *attr,
  443. char *buf)
  444. {
  445. return test_dev_config_show_bool(buf, test_fw_config->into_buf);
  446. }
  447. static DEVICE_ATTR_RW(config_into_buf);
  448. static ssize_t config_buf_size_store(struct device *dev,
  449. struct device_attribute *attr,
  450. const char *buf, size_t count)
  451. {
  452. int rc;
  453. mutex_lock(&test_fw_mutex);
  454. if (test_fw_config->reqs) {
  455. pr_err("Must call release_all_firmware prior to changing config\n");
  456. rc = -EINVAL;
  457. mutex_unlock(&test_fw_mutex);
  458. goto out;
  459. }
  460. rc = __test_dev_config_update_size_t(buf, count,
  461. &test_fw_config->buf_size);
  462. mutex_unlock(&test_fw_mutex);
  463. out:
  464. return rc;
  465. }
  466. static ssize_t config_buf_size_show(struct device *dev,
  467. struct device_attribute *attr,
  468. char *buf)
  469. {
  470. return test_dev_config_show_size_t(buf, test_fw_config->buf_size);
  471. }
  472. static DEVICE_ATTR_RW(config_buf_size);
  473. static ssize_t config_file_offset_store(struct device *dev,
  474. struct device_attribute *attr,
  475. const char *buf, size_t count)
  476. {
  477. int rc;
  478. mutex_lock(&test_fw_mutex);
  479. if (test_fw_config->reqs) {
  480. pr_err("Must call release_all_firmware prior to changing config\n");
  481. rc = -EINVAL;
  482. mutex_unlock(&test_fw_mutex);
  483. goto out;
  484. }
  485. rc = __test_dev_config_update_size_t(buf, count,
  486. &test_fw_config->file_offset);
  487. mutex_unlock(&test_fw_mutex);
  488. out:
  489. return rc;
  490. }
  491. static ssize_t config_file_offset_show(struct device *dev,
  492. struct device_attribute *attr,
  493. char *buf)
  494. {
  495. return test_dev_config_show_size_t(buf, test_fw_config->file_offset);
  496. }
  497. static DEVICE_ATTR_RW(config_file_offset);
  498. static ssize_t config_partial_store(struct device *dev,
  499. struct device_attribute *attr,
  500. const char *buf, size_t count)
  501. {
  502. return test_dev_config_update_bool(buf,
  503. count,
  504. &test_fw_config->partial);
  505. }
  506. static ssize_t config_partial_show(struct device *dev,
  507. struct device_attribute *attr,
  508. char *buf)
  509. {
  510. return test_dev_config_show_bool(buf, test_fw_config->partial);
  511. }
  512. static DEVICE_ATTR_RW(config_partial);
  513. static ssize_t config_sync_direct_store(struct device *dev,
  514. struct device_attribute *attr,
  515. const char *buf, size_t count)
  516. {
  517. int rc = test_dev_config_update_bool(buf, count,
  518. &test_fw_config->sync_direct);
  519. if (rc == count)
  520. test_fw_config->req_firmware = test_fw_config->sync_direct ?
  521. request_firmware_direct :
  522. request_firmware;
  523. return rc;
  524. }
  525. static ssize_t config_sync_direct_show(struct device *dev,
  526. struct device_attribute *attr,
  527. char *buf)
  528. {
  529. return test_dev_config_show_bool(buf, test_fw_config->sync_direct);
  530. }
  531. static DEVICE_ATTR_RW(config_sync_direct);
  532. static ssize_t config_send_uevent_store(struct device *dev,
  533. struct device_attribute *attr,
  534. const char *buf, size_t count)
  535. {
  536. return test_dev_config_update_bool(buf, count,
  537. &test_fw_config->send_uevent);
  538. }
  539. static ssize_t config_send_uevent_show(struct device *dev,
  540. struct device_attribute *attr,
  541. char *buf)
  542. {
  543. return test_dev_config_show_bool(buf, test_fw_config->send_uevent);
  544. }
  545. static DEVICE_ATTR_RW(config_send_uevent);
  546. static ssize_t config_read_fw_idx_store(struct device *dev,
  547. struct device_attribute *attr,
  548. const char *buf, size_t count)
  549. {
  550. return test_dev_config_update_u8(buf, count,
  551. &test_fw_config->read_fw_idx);
  552. }
  553. static ssize_t config_read_fw_idx_show(struct device *dev,
  554. struct device_attribute *attr,
  555. char *buf)
  556. {
  557. return test_dev_config_show_u8(buf, test_fw_config->read_fw_idx);
  558. }
  559. static DEVICE_ATTR_RW(config_read_fw_idx);
  560. static ssize_t trigger_request_store(struct device *dev,
  561. struct device_attribute *attr,
  562. const char *buf, size_t count)
  563. {
  564. int rc;
  565. char *name;
  566. name = kstrndup(buf, count, GFP_KERNEL);
  567. if (!name)
  568. return -ENOMEM;
  569. pr_info("loading '%s'\n", name);
  570. mutex_lock(&test_fw_mutex);
  571. release_firmware(test_firmware);
  572. if (test_fw_config->reqs)
  573. __test_release_all_firmware();
  574. test_firmware = NULL;
  575. rc = request_firmware(&test_firmware, name, dev);
  576. if (rc) {
  577. pr_info("load of '%s' failed: %d\n", name, rc);
  578. goto out;
  579. }
  580. pr_info("loaded: %zu\n", test_firmware->size);
  581. rc = count;
  582. out:
  583. mutex_unlock(&test_fw_mutex);
  584. kfree(name);
  585. return rc;
  586. }
  587. static DEVICE_ATTR_WO(trigger_request);
  588. #ifdef CONFIG_EFI_EMBEDDED_FIRMWARE
  589. extern struct list_head efi_embedded_fw_list;
  590. extern bool efi_embedded_fw_checked;
  591. static ssize_t trigger_request_platform_store(struct device *dev,
  592. struct device_attribute *attr,
  593. const char *buf, size_t count)
  594. {
  595. static const u8 test_data[] = {
  596. 0x55, 0xaa, 0x55, 0xaa, 0x01, 0x02, 0x03, 0x04,
  597. 0x55, 0xaa, 0x55, 0xaa, 0x05, 0x06, 0x07, 0x08,
  598. 0x55, 0xaa, 0x55, 0xaa, 0x10, 0x20, 0x30, 0x40,
  599. 0x55, 0xaa, 0x55, 0xaa, 0x50, 0x60, 0x70, 0x80
  600. };
  601. struct efi_embedded_fw efi_embedded_fw;
  602. const struct firmware *firmware = NULL;
  603. bool saved_efi_embedded_fw_checked;
  604. char *name;
  605. int rc;
  606. name = kstrndup(buf, count, GFP_KERNEL);
  607. if (!name)
  608. return -ENOMEM;
  609. pr_info("inserting test platform fw '%s'\n", name);
  610. efi_embedded_fw.name = name;
  611. efi_embedded_fw.data = (void *)test_data;
  612. efi_embedded_fw.length = sizeof(test_data);
  613. list_add(&efi_embedded_fw.list, &efi_embedded_fw_list);
  614. saved_efi_embedded_fw_checked = efi_embedded_fw_checked;
  615. efi_embedded_fw_checked = true;
  616. pr_info("loading '%s'\n", name);
  617. rc = firmware_request_platform(&firmware, name, dev);
  618. if (rc) {
  619. pr_info("load of '%s' failed: %d\n", name, rc);
  620. goto out;
  621. }
  622. if (firmware->size != sizeof(test_data) ||
  623. memcmp(firmware->data, test_data, sizeof(test_data)) != 0) {
  624. pr_info("firmware contents mismatch for '%s'\n", name);
  625. rc = -EINVAL;
  626. goto out;
  627. }
  628. pr_info("loaded: %zu\n", firmware->size);
  629. rc = count;
  630. out:
  631. efi_embedded_fw_checked = saved_efi_embedded_fw_checked;
  632. release_firmware(firmware);
  633. list_del(&efi_embedded_fw.list);
  634. kfree(name);
  635. return rc;
  636. }
  637. static DEVICE_ATTR_WO(trigger_request_platform);
  638. #endif
  639. static DECLARE_COMPLETION(async_fw_done);
  640. static void trigger_async_request_cb(const struct firmware *fw, void *context)
  641. {
  642. test_firmware = fw;
  643. complete(&async_fw_done);
  644. }
  645. static ssize_t trigger_async_request_store(struct device *dev,
  646. struct device_attribute *attr,
  647. const char *buf, size_t count)
  648. {
  649. int rc;
  650. char *name;
  651. name = kstrndup(buf, count, GFP_KERNEL);
  652. if (!name)
  653. return -ENOMEM;
  654. pr_info("loading '%s'\n", name);
  655. mutex_lock(&test_fw_mutex);
  656. release_firmware(test_firmware);
  657. test_firmware = NULL;
  658. if (test_fw_config->reqs)
  659. __test_release_all_firmware();
  660. rc = request_firmware_nowait(THIS_MODULE, 1, name, dev, GFP_KERNEL,
  661. NULL, trigger_async_request_cb);
  662. if (rc) {
  663. pr_info("async load of '%s' failed: %d\n", name, rc);
  664. kfree(name);
  665. goto out;
  666. }
  667. /* Free 'name' ASAP, to test for race conditions */
  668. kfree(name);
  669. wait_for_completion(&async_fw_done);
  670. if (test_firmware) {
  671. pr_info("loaded: %zu\n", test_firmware->size);
  672. rc = count;
  673. } else {
  674. pr_err("failed to async load firmware\n");
  675. rc = -ENOMEM;
  676. }
  677. out:
  678. mutex_unlock(&test_fw_mutex);
  679. return rc;
  680. }
  681. static DEVICE_ATTR_WO(trigger_async_request);
  682. static ssize_t trigger_custom_fallback_store(struct device *dev,
  683. struct device_attribute *attr,
  684. const char *buf, size_t count)
  685. {
  686. int rc;
  687. char *name;
  688. name = kstrndup(buf, count, GFP_KERNEL);
  689. if (!name)
  690. return -ENOMEM;
  691. pr_info("loading '%s' using custom fallback mechanism\n", name);
  692. mutex_lock(&test_fw_mutex);
  693. release_firmware(test_firmware);
  694. if (test_fw_config->reqs)
  695. __test_release_all_firmware();
  696. test_firmware = NULL;
  697. rc = request_firmware_nowait(THIS_MODULE, FW_ACTION_NOUEVENT, name,
  698. dev, GFP_KERNEL, NULL,
  699. trigger_async_request_cb);
  700. if (rc) {
  701. pr_info("async load of '%s' failed: %d\n", name, rc);
  702. kfree(name);
  703. goto out;
  704. }
  705. /* Free 'name' ASAP, to test for race conditions */
  706. kfree(name);
  707. wait_for_completion(&async_fw_done);
  708. if (test_firmware) {
  709. pr_info("loaded: %zu\n", test_firmware->size);
  710. rc = count;
  711. } else {
  712. pr_err("failed to async load firmware\n");
  713. rc = -ENODEV;
  714. }
  715. out:
  716. mutex_unlock(&test_fw_mutex);
  717. return rc;
  718. }
  719. static DEVICE_ATTR_WO(trigger_custom_fallback);
  720. static int test_fw_run_batch_request(void *data)
  721. {
  722. struct test_batched_req *req = data;
  723. if (!req) {
  724. test_fw_config->test_result = -EINVAL;
  725. return -EINVAL;
  726. }
  727. if (test_fw_config->into_buf) {
  728. void *test_buf;
  729. test_buf = kzalloc(TEST_FIRMWARE_BUF_SIZE, GFP_KERNEL);
  730. if (!test_buf)
  731. return -ENOMEM;
  732. if (test_fw_config->partial)
  733. req->rc = request_partial_firmware_into_buf
  734. (&req->fw,
  735. req->name,
  736. req->dev,
  737. test_buf,
  738. test_fw_config->buf_size,
  739. test_fw_config->file_offset);
  740. else
  741. req->rc = request_firmware_into_buf
  742. (&req->fw,
  743. req->name,
  744. req->dev,
  745. test_buf,
  746. test_fw_config->buf_size);
  747. if (!req->fw)
  748. kfree(test_buf);
  749. else
  750. req->fw_buf = test_buf;
  751. } else {
  752. req->rc = test_fw_config->req_firmware(&req->fw,
  753. req->name,
  754. req->dev);
  755. }
  756. if (req->rc) {
  757. pr_info("#%u: batched sync load failed: %d\n",
  758. req->idx, req->rc);
  759. if (!test_fw_config->test_result)
  760. test_fw_config->test_result = req->rc;
  761. } else if (req->fw) {
  762. req->sent = true;
  763. pr_info("#%u: batched sync loaded %zu\n",
  764. req->idx, req->fw->size);
  765. }
  766. complete(&req->completion);
  767. req->task = NULL;
  768. return 0;
  769. }
  770. /*
  771. * We use a kthread as otherwise the kernel serializes all our sync requests
  772. * and we would not be able to mimic batched requests on a sync call. Batched
  773. * requests on a sync call can for instance happen on a device driver when
  774. * multiple cards are used and firmware loading happens outside of probe.
  775. */
  776. static ssize_t trigger_batched_requests_store(struct device *dev,
  777. struct device_attribute *attr,
  778. const char *buf, size_t count)
  779. {
  780. struct test_batched_req *req;
  781. int rc;
  782. u8 i;
  783. mutex_lock(&test_fw_mutex);
  784. if (test_fw_config->reqs) {
  785. rc = -EBUSY;
  786. goto out_bail;
  787. }
  788. test_fw_config->reqs =
  789. vzalloc(array3_size(sizeof(struct test_batched_req),
  790. test_fw_config->num_requests, 2));
  791. if (!test_fw_config->reqs) {
  792. rc = -ENOMEM;
  793. goto out_unlock;
  794. }
  795. pr_info("batched sync firmware loading '%s' %u times\n",
  796. test_fw_config->name, test_fw_config->num_requests);
  797. for (i = 0; i < test_fw_config->num_requests; i++) {
  798. req = &test_fw_config->reqs[i];
  799. req->fw = NULL;
  800. req->idx = i;
  801. req->name = test_fw_config->name;
  802. req->fw_buf = NULL;
  803. req->dev = dev;
  804. init_completion(&req->completion);
  805. req->task = kthread_run(test_fw_run_batch_request, req,
  806. "%s-%u", KBUILD_MODNAME, req->idx);
  807. if (!req->task || IS_ERR(req->task)) {
  808. pr_err("Setting up thread %u failed\n", req->idx);
  809. req->task = NULL;
  810. rc = -ENOMEM;
  811. goto out_bail;
  812. }
  813. }
  814. rc = count;
  815. /*
  816. * We require an explicit release to enable more time and delay of
  817. * calling release_firmware() to improve our chances of forcing a
  818. * batched request. If we instead called release_firmware() right away
  819. * then we might miss on an opportunity of having a successful firmware
  820. * request pass on the opportunity to be come a batched request.
  821. */
  822. out_bail:
  823. for (i = 0; i < test_fw_config->num_requests; i++) {
  824. req = &test_fw_config->reqs[i];
  825. if (req->task || req->sent)
  826. wait_for_completion(&req->completion);
  827. }
  828. /* Override any worker error if we had a general setup error */
  829. if (rc < 0)
  830. test_fw_config->test_result = rc;
  831. out_unlock:
  832. mutex_unlock(&test_fw_mutex);
  833. return rc;
  834. }
  835. static DEVICE_ATTR_WO(trigger_batched_requests);
  836. /*
  837. * We wait for each callback to return with the lock held, no need to lock here
  838. */
  839. static void trigger_batched_cb(const struct firmware *fw, void *context)
  840. {
  841. struct test_batched_req *req = context;
  842. if (!req) {
  843. test_fw_config->test_result = -EINVAL;
  844. return;
  845. }
  846. /* forces *some* batched requests to queue up */
  847. if (!req->idx)
  848. ssleep(2);
  849. req->fw = fw;
  850. /*
  851. * Unfortunately the firmware API gives us nothing other than a null FW
  852. * if the firmware was not found on async requests. Best we can do is
  853. * just assume -ENOENT. A better API would pass the actual return
  854. * value to the callback.
  855. */
  856. if (!fw && !test_fw_config->test_result)
  857. test_fw_config->test_result = -ENOENT;
  858. complete(&req->completion);
  859. }
  860. static
  861. ssize_t trigger_batched_requests_async_store(struct device *dev,
  862. struct device_attribute *attr,
  863. const char *buf, size_t count)
  864. {
  865. struct test_batched_req *req;
  866. bool send_uevent;
  867. int rc;
  868. u8 i;
  869. mutex_lock(&test_fw_mutex);
  870. if (test_fw_config->reqs) {
  871. rc = -EBUSY;
  872. goto out_bail;
  873. }
  874. test_fw_config->reqs =
  875. vzalloc(array3_size(sizeof(struct test_batched_req),
  876. test_fw_config->num_requests, 2));
  877. if (!test_fw_config->reqs) {
  878. rc = -ENOMEM;
  879. goto out;
  880. }
  881. pr_info("batched loading '%s' custom fallback mechanism %u times\n",
  882. test_fw_config->name, test_fw_config->num_requests);
  883. send_uevent = test_fw_config->send_uevent ? FW_ACTION_UEVENT :
  884. FW_ACTION_NOUEVENT;
  885. for (i = 0; i < test_fw_config->num_requests; i++) {
  886. req = &test_fw_config->reqs[i];
  887. req->name = test_fw_config->name;
  888. req->fw_buf = NULL;
  889. req->fw = NULL;
  890. req->idx = i;
  891. init_completion(&req->completion);
  892. rc = request_firmware_nowait(THIS_MODULE, send_uevent,
  893. req->name,
  894. dev, GFP_KERNEL, req,
  895. trigger_batched_cb);
  896. if (rc) {
  897. pr_info("#%u: batched async load failed setup: %d\n",
  898. i, rc);
  899. req->rc = rc;
  900. goto out_bail;
  901. } else
  902. req->sent = true;
  903. }
  904. rc = count;
  905. out_bail:
  906. /*
  907. * We require an explicit release to enable more time and delay of
  908. * calling release_firmware() to improve our chances of forcing a
  909. * batched request. If we instead called release_firmware() right away
  910. * then we might miss on an opportunity of having a successful firmware
  911. * request pass on the opportunity to be come a batched request.
  912. */
  913. for (i = 0; i < test_fw_config->num_requests; i++) {
  914. req = &test_fw_config->reqs[i];
  915. if (req->sent)
  916. wait_for_completion(&req->completion);
  917. }
  918. /* Override any worker error if we had a general setup error */
  919. if (rc < 0)
  920. test_fw_config->test_result = rc;
  921. out:
  922. mutex_unlock(&test_fw_mutex);
  923. return rc;
  924. }
  925. static DEVICE_ATTR_WO(trigger_batched_requests_async);
  926. static void upload_release(struct test_firmware_upload *tst)
  927. {
  928. firmware_upload_unregister(tst->fwl);
  929. kfree(tst->buf);
  930. kfree(tst->name);
  931. kfree(tst);
  932. }
  933. static void upload_release_all(void)
  934. {
  935. struct test_firmware_upload *tst, *tmp;
  936. list_for_each_entry_safe(tst, tmp, &test_upload_list, node) {
  937. list_del(&tst->node);
  938. upload_release(tst);
  939. }
  940. test_fw_config->upload_name = NULL;
  941. }
  942. /*
  943. * This table is replicated from .../firmware_loader/sysfs_upload.c
  944. * and needs to be kept in sync.
  945. */
  946. static const char * const fw_upload_err_str[] = {
  947. [FW_UPLOAD_ERR_NONE] = "none",
  948. [FW_UPLOAD_ERR_HW_ERROR] = "hw-error",
  949. [FW_UPLOAD_ERR_TIMEOUT] = "timeout",
  950. [FW_UPLOAD_ERR_CANCELED] = "user-abort",
  951. [FW_UPLOAD_ERR_BUSY] = "device-busy",
  952. [FW_UPLOAD_ERR_INVALID_SIZE] = "invalid-file-size",
  953. [FW_UPLOAD_ERR_RW_ERROR] = "read-write-error",
  954. [FW_UPLOAD_ERR_WEAROUT] = "flash-wearout",
  955. };
  956. static void upload_err_inject_error(struct test_firmware_upload *tst,
  957. const u8 *p, const char *prog)
  958. {
  959. enum fw_upload_err err;
  960. for (err = FW_UPLOAD_ERR_NONE + 1; err < FW_UPLOAD_ERR_MAX; err++) {
  961. if (strncmp(p, fw_upload_err_str[err],
  962. strlen(fw_upload_err_str[err])) == 0) {
  963. tst->inject.prog = prog;
  964. tst->inject.err_code = err;
  965. return;
  966. }
  967. }
  968. }
  969. static void upload_err_inject_prog(struct test_firmware_upload *tst,
  970. const u8 *p)
  971. {
  972. static const char * const progs[] = {
  973. "preparing:", "transferring:", "programming:"
  974. };
  975. int i;
  976. for (i = 0; i < ARRAY_SIZE(progs); i++) {
  977. if (strncmp(p, progs[i], strlen(progs[i])) == 0) {
  978. upload_err_inject_error(tst, p + strlen(progs[i]),
  979. progs[i]);
  980. return;
  981. }
  982. }
  983. }
  984. #define FIVE_MINUTES_MS (5 * 60 * 1000)
  985. static enum fw_upload_err
  986. fw_upload_wait_on_cancel(struct test_firmware_upload *tst)
  987. {
  988. int ms_delay;
  989. for (ms_delay = 0; ms_delay < FIVE_MINUTES_MS; ms_delay += 100) {
  990. msleep(100);
  991. if (tst->cancel_request)
  992. return FW_UPLOAD_ERR_CANCELED;
  993. }
  994. return FW_UPLOAD_ERR_NONE;
  995. }
  996. static enum fw_upload_err test_fw_upload_prepare(struct fw_upload *fwl,
  997. const u8 *data, u32 size)
  998. {
  999. struct test_firmware_upload *tst = fwl->dd_handle;
  1000. enum fw_upload_err ret = FW_UPLOAD_ERR_NONE;
  1001. const char *progress = "preparing:";
  1002. tst->cancel_request = false;
  1003. if (!size || size > TEST_UPLOAD_MAX_SIZE) {
  1004. ret = FW_UPLOAD_ERR_INVALID_SIZE;
  1005. goto err_out;
  1006. }
  1007. if (strncmp(data, "inject:", strlen("inject:")) == 0)
  1008. upload_err_inject_prog(tst, data + strlen("inject:"));
  1009. memset(tst->buf, 0, TEST_UPLOAD_MAX_SIZE);
  1010. tst->size = size;
  1011. if (tst->inject.err_code == FW_UPLOAD_ERR_NONE ||
  1012. strncmp(tst->inject.prog, progress, strlen(progress)) != 0)
  1013. return FW_UPLOAD_ERR_NONE;
  1014. if (tst->inject.err_code == FW_UPLOAD_ERR_CANCELED)
  1015. ret = fw_upload_wait_on_cancel(tst);
  1016. else
  1017. ret = tst->inject.err_code;
  1018. err_out:
  1019. /*
  1020. * The cleanup op only executes if the prepare op succeeds.
  1021. * If the prepare op fails, it must do it's own clean-up.
  1022. */
  1023. tst->inject.err_code = FW_UPLOAD_ERR_NONE;
  1024. tst->inject.prog = NULL;
  1025. return ret;
  1026. }
  1027. static enum fw_upload_err test_fw_upload_write(struct fw_upload *fwl,
  1028. const u8 *data, u32 offset,
  1029. u32 size, u32 *written)
  1030. {
  1031. struct test_firmware_upload *tst = fwl->dd_handle;
  1032. const char *progress = "transferring:";
  1033. u32 blk_size;
  1034. if (tst->cancel_request)
  1035. return FW_UPLOAD_ERR_CANCELED;
  1036. blk_size = min_t(u32, TEST_UPLOAD_BLK_SIZE, size);
  1037. memcpy(tst->buf + offset, data + offset, blk_size);
  1038. *written = blk_size;
  1039. if (tst->inject.err_code == FW_UPLOAD_ERR_NONE ||
  1040. strncmp(tst->inject.prog, progress, strlen(progress)) != 0)
  1041. return FW_UPLOAD_ERR_NONE;
  1042. if (tst->inject.err_code == FW_UPLOAD_ERR_CANCELED)
  1043. return fw_upload_wait_on_cancel(tst);
  1044. return tst->inject.err_code;
  1045. }
  1046. static enum fw_upload_err test_fw_upload_complete(struct fw_upload *fwl)
  1047. {
  1048. struct test_firmware_upload *tst = fwl->dd_handle;
  1049. const char *progress = "programming:";
  1050. if (tst->cancel_request)
  1051. return FW_UPLOAD_ERR_CANCELED;
  1052. if (tst->inject.err_code == FW_UPLOAD_ERR_NONE ||
  1053. strncmp(tst->inject.prog, progress, strlen(progress)) != 0)
  1054. return FW_UPLOAD_ERR_NONE;
  1055. if (tst->inject.err_code == FW_UPLOAD_ERR_CANCELED)
  1056. return fw_upload_wait_on_cancel(tst);
  1057. return tst->inject.err_code;
  1058. }
  1059. static void test_fw_upload_cancel(struct fw_upload *fwl)
  1060. {
  1061. struct test_firmware_upload *tst = fwl->dd_handle;
  1062. tst->cancel_request = true;
  1063. }
  1064. static void test_fw_cleanup(struct fw_upload *fwl)
  1065. {
  1066. struct test_firmware_upload *tst = fwl->dd_handle;
  1067. tst->inject.err_code = FW_UPLOAD_ERR_NONE;
  1068. tst->inject.prog = NULL;
  1069. }
  1070. static const struct fw_upload_ops upload_test_ops = {
  1071. .prepare = test_fw_upload_prepare,
  1072. .write = test_fw_upload_write,
  1073. .poll_complete = test_fw_upload_complete,
  1074. .cancel = test_fw_upload_cancel,
  1075. .cleanup = test_fw_cleanup
  1076. };
  1077. static ssize_t upload_register_store(struct device *dev,
  1078. struct device_attribute *attr,
  1079. const char *buf, size_t count)
  1080. {
  1081. struct test_firmware_upload *tst;
  1082. struct fw_upload *fwl;
  1083. char *name;
  1084. int ret;
  1085. name = kstrndup(buf, count, GFP_KERNEL);
  1086. if (!name)
  1087. return -ENOMEM;
  1088. mutex_lock(&test_fw_mutex);
  1089. tst = upload_lookup_name(name);
  1090. if (tst) {
  1091. ret = -EEXIST;
  1092. goto free_name;
  1093. }
  1094. tst = kzalloc(sizeof(*tst), GFP_KERNEL);
  1095. if (!tst) {
  1096. ret = -ENOMEM;
  1097. goto free_name;
  1098. }
  1099. tst->name = name;
  1100. tst->buf = kzalloc(TEST_UPLOAD_MAX_SIZE, GFP_KERNEL);
  1101. if (!tst->buf) {
  1102. ret = -ENOMEM;
  1103. goto free_tst;
  1104. }
  1105. fwl = firmware_upload_register(THIS_MODULE, dev, tst->name,
  1106. &upload_test_ops, tst);
  1107. if (IS_ERR(fwl)) {
  1108. ret = PTR_ERR(fwl);
  1109. goto free_buf;
  1110. }
  1111. tst->fwl = fwl;
  1112. list_add_tail(&tst->node, &test_upload_list);
  1113. mutex_unlock(&test_fw_mutex);
  1114. return count;
  1115. free_buf:
  1116. kfree(tst->buf);
  1117. free_tst:
  1118. kfree(tst);
  1119. free_name:
  1120. mutex_unlock(&test_fw_mutex);
  1121. kfree(name);
  1122. return ret;
  1123. }
  1124. static DEVICE_ATTR_WO(upload_register);
  1125. static ssize_t upload_unregister_store(struct device *dev,
  1126. struct device_attribute *attr,
  1127. const char *buf, size_t count)
  1128. {
  1129. struct test_firmware_upload *tst;
  1130. int ret = count;
  1131. mutex_lock(&test_fw_mutex);
  1132. tst = upload_lookup_name(buf);
  1133. if (!tst) {
  1134. ret = -EINVAL;
  1135. goto out;
  1136. }
  1137. if (test_fw_config->upload_name == tst->name)
  1138. test_fw_config->upload_name = NULL;
  1139. list_del(&tst->node);
  1140. upload_release(tst);
  1141. out:
  1142. mutex_unlock(&test_fw_mutex);
  1143. return ret;
  1144. }
  1145. static DEVICE_ATTR_WO(upload_unregister);
  1146. static ssize_t test_result_show(struct device *dev,
  1147. struct device_attribute *attr,
  1148. char *buf)
  1149. {
  1150. return test_dev_config_show_int(buf, test_fw_config->test_result);
  1151. }
  1152. static DEVICE_ATTR_RO(test_result);
  1153. static ssize_t release_all_firmware_store(struct device *dev,
  1154. struct device_attribute *attr,
  1155. const char *buf, size_t count)
  1156. {
  1157. test_release_all_firmware();
  1158. return count;
  1159. }
  1160. static DEVICE_ATTR_WO(release_all_firmware);
  1161. static ssize_t read_firmware_show(struct device *dev,
  1162. struct device_attribute *attr,
  1163. char *buf)
  1164. {
  1165. struct test_batched_req *req;
  1166. u8 idx;
  1167. ssize_t rc = 0;
  1168. mutex_lock(&test_fw_mutex);
  1169. idx = test_fw_config->read_fw_idx;
  1170. if (idx >= test_fw_config->num_requests) {
  1171. rc = -ERANGE;
  1172. goto out;
  1173. }
  1174. if (!test_fw_config->reqs) {
  1175. rc = -EINVAL;
  1176. goto out;
  1177. }
  1178. req = &test_fw_config->reqs[idx];
  1179. if (!req->fw) {
  1180. pr_err("#%u: failed to async load firmware\n", idx);
  1181. rc = -ENOENT;
  1182. goto out;
  1183. }
  1184. pr_info("#%u: loaded %zu\n", idx, req->fw->size);
  1185. if (req->fw->size > PAGE_SIZE) {
  1186. pr_err("Testing interface must use PAGE_SIZE firmware for now\n");
  1187. rc = -EINVAL;
  1188. goto out;
  1189. }
  1190. memcpy(buf, req->fw->data, req->fw->size);
  1191. rc = req->fw->size;
  1192. out:
  1193. mutex_unlock(&test_fw_mutex);
  1194. return rc;
  1195. }
  1196. static DEVICE_ATTR_RO(read_firmware);
  1197. static ssize_t upload_read_show(struct device *dev,
  1198. struct device_attribute *attr,
  1199. char *buf)
  1200. {
  1201. struct test_firmware_upload *tst = NULL;
  1202. struct test_firmware_upload *tst_iter;
  1203. int ret = -EINVAL;
  1204. if (!test_fw_config->upload_name) {
  1205. pr_err("Set config_upload_name before using upload_read\n");
  1206. return -EINVAL;
  1207. }
  1208. mutex_lock(&test_fw_mutex);
  1209. list_for_each_entry(tst_iter, &test_upload_list, node)
  1210. if (tst_iter->name == test_fw_config->upload_name) {
  1211. tst = tst_iter;
  1212. break;
  1213. }
  1214. if (!tst) {
  1215. pr_err("Firmware name not found: %s\n",
  1216. test_fw_config->upload_name);
  1217. goto out;
  1218. }
  1219. if (tst->size > PAGE_SIZE) {
  1220. pr_err("Testing interface must use PAGE_SIZE firmware for now\n");
  1221. goto out;
  1222. }
  1223. memcpy(buf, tst->buf, tst->size);
  1224. ret = tst->size;
  1225. out:
  1226. mutex_unlock(&test_fw_mutex);
  1227. return ret;
  1228. }
  1229. static DEVICE_ATTR_RO(upload_read);
  1230. #define TEST_FW_DEV_ATTR(name) &dev_attr_##name.attr
  1231. static struct attribute *test_dev_attrs[] = {
  1232. TEST_FW_DEV_ATTR(reset),
  1233. TEST_FW_DEV_ATTR(config),
  1234. TEST_FW_DEV_ATTR(config_name),
  1235. TEST_FW_DEV_ATTR(config_num_requests),
  1236. TEST_FW_DEV_ATTR(config_into_buf),
  1237. TEST_FW_DEV_ATTR(config_buf_size),
  1238. TEST_FW_DEV_ATTR(config_file_offset),
  1239. TEST_FW_DEV_ATTR(config_partial),
  1240. TEST_FW_DEV_ATTR(config_sync_direct),
  1241. TEST_FW_DEV_ATTR(config_send_uevent),
  1242. TEST_FW_DEV_ATTR(config_read_fw_idx),
  1243. TEST_FW_DEV_ATTR(config_upload_name),
  1244. /* These don't use the config at all - they could be ported! */
  1245. TEST_FW_DEV_ATTR(trigger_request),
  1246. TEST_FW_DEV_ATTR(trigger_async_request),
  1247. TEST_FW_DEV_ATTR(trigger_custom_fallback),
  1248. #ifdef CONFIG_EFI_EMBEDDED_FIRMWARE
  1249. TEST_FW_DEV_ATTR(trigger_request_platform),
  1250. #endif
  1251. /* These use the config and can use the test_result */
  1252. TEST_FW_DEV_ATTR(trigger_batched_requests),
  1253. TEST_FW_DEV_ATTR(trigger_batched_requests_async),
  1254. TEST_FW_DEV_ATTR(release_all_firmware),
  1255. TEST_FW_DEV_ATTR(test_result),
  1256. TEST_FW_DEV_ATTR(read_firmware),
  1257. TEST_FW_DEV_ATTR(upload_read),
  1258. TEST_FW_DEV_ATTR(upload_register),
  1259. TEST_FW_DEV_ATTR(upload_unregister),
  1260. NULL,
  1261. };
  1262. ATTRIBUTE_GROUPS(test_dev);
  1263. static struct miscdevice test_fw_misc_device = {
  1264. .minor = MISC_DYNAMIC_MINOR,
  1265. .name = "test_firmware",
  1266. .fops = &test_fw_fops,
  1267. .groups = test_dev_groups,
  1268. };
  1269. static int __init test_firmware_init(void)
  1270. {
  1271. int rc;
  1272. test_fw_config = kzalloc(sizeof(struct test_config), GFP_KERNEL);
  1273. if (!test_fw_config)
  1274. return -ENOMEM;
  1275. rc = __test_firmware_config_init();
  1276. if (rc) {
  1277. kfree(test_fw_config);
  1278. pr_err("could not init firmware test config: %d\n", rc);
  1279. return rc;
  1280. }
  1281. rc = misc_register(&test_fw_misc_device);
  1282. if (rc) {
  1283. __test_firmware_config_free();
  1284. kfree(test_fw_config);
  1285. pr_err("could not register misc device: %d\n", rc);
  1286. return rc;
  1287. }
  1288. pr_warn("interface ready\n");
  1289. return 0;
  1290. }
  1291. module_init(test_firmware_init);
  1292. static void __exit test_firmware_exit(void)
  1293. {
  1294. mutex_lock(&test_fw_mutex);
  1295. release_firmware(test_firmware);
  1296. misc_deregister(&test_fw_misc_device);
  1297. upload_release_all();
  1298. __test_firmware_config_free();
  1299. kfree(test_fw_config);
  1300. mutex_unlock(&test_fw_mutex);
  1301. pr_warn("removed interface\n");
  1302. }
  1303. module_exit(test_firmware_exit);
  1304. MODULE_AUTHOR("Kees Cook <[email protected]>");
  1305. MODULE_LICENSE("GPL");