nvm.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * NVM helpers
  4. *
  5. * Copyright (C) 2020, Intel Corporation
  6. * Author: Mika Westerberg <[email protected]>
  7. */
  8. #include <linux/idr.h>
  9. #include <linux/slab.h>
  10. #include <linux/vmalloc.h>
  11. #include "tb.h"
  12. /* Intel specific NVM offsets */
  13. #define INTEL_NVM_DEVID 0x05
  14. #define INTEL_NVM_VERSION 0x08
  15. #define INTEL_NVM_CSS 0x10
  16. #define INTEL_NVM_FLASH_SIZE 0x45
  17. /* ASMedia specific NVM offsets */
  18. #define ASMEDIA_NVM_DATE 0x1c
  19. #define ASMEDIA_NVM_VERSION 0x28
  20. static DEFINE_IDA(nvm_ida);
  21. /**
  22. * struct tb_nvm_vendor_ops - Vendor specific NVM operations
  23. * @read_version: Reads out NVM version from the flash
  24. * @validate: Validates the NVM image before update (optional)
  25. * @write_headers: Writes headers before the rest of the image (optional)
  26. */
  27. struct tb_nvm_vendor_ops {
  28. int (*read_version)(struct tb_nvm *nvm);
  29. int (*validate)(struct tb_nvm *nvm);
  30. int (*write_headers)(struct tb_nvm *nvm);
  31. };
  32. /**
  33. * struct tb_nvm_vendor - Vendor to &struct tb_nvm_vendor_ops mapping
  34. * @vendor: Vendor ID
  35. * @vops: Vendor specific NVM operations
  36. *
  37. * Maps vendor ID to NVM vendor operations. If there is no mapping then
  38. * NVM firmware upgrade is disabled for the device.
  39. */
  40. struct tb_nvm_vendor {
  41. u16 vendor;
  42. const struct tb_nvm_vendor_ops *vops;
  43. };
  44. static int intel_switch_nvm_version(struct tb_nvm *nvm)
  45. {
  46. struct tb_switch *sw = tb_to_switch(nvm->dev);
  47. u32 val, nvm_size, hdr_size;
  48. int ret;
  49. /*
  50. * If the switch is in safe-mode the only accessible portion of
  51. * the NVM is the non-active one where userspace is expected to
  52. * write new functional NVM.
  53. */
  54. if (sw->safe_mode)
  55. return 0;
  56. ret = tb_switch_nvm_read(sw, INTEL_NVM_FLASH_SIZE, &val, sizeof(val));
  57. if (ret)
  58. return ret;
  59. hdr_size = sw->generation < 3 ? SZ_8K : SZ_16K;
  60. nvm_size = (SZ_1M << (val & 7)) / 8;
  61. nvm_size = (nvm_size - hdr_size) / 2;
  62. ret = tb_switch_nvm_read(sw, INTEL_NVM_VERSION, &val, sizeof(val));
  63. if (ret)
  64. return ret;
  65. nvm->major = (val >> 16) & 0xff;
  66. nvm->minor = (val >> 8) & 0xff;
  67. nvm->active_size = nvm_size;
  68. return 0;
  69. }
  70. static int intel_switch_nvm_validate(struct tb_nvm *nvm)
  71. {
  72. struct tb_switch *sw = tb_to_switch(nvm->dev);
  73. unsigned int image_size, hdr_size;
  74. u16 ds_size, device_id;
  75. u8 *buf = nvm->buf;
  76. image_size = nvm->buf_data_size;
  77. /*
  78. * FARB pointer must point inside the image and must at least
  79. * contain parts of the digital section we will be reading here.
  80. */
  81. hdr_size = (*(u32 *)buf) & 0xffffff;
  82. if (hdr_size + INTEL_NVM_DEVID + 2 >= image_size)
  83. return -EINVAL;
  84. /* Digital section start should be aligned to 4k page */
  85. if (!IS_ALIGNED(hdr_size, SZ_4K))
  86. return -EINVAL;
  87. /*
  88. * Read digital section size and check that it also fits inside
  89. * the image.
  90. */
  91. ds_size = *(u16 *)(buf + hdr_size);
  92. if (ds_size >= image_size)
  93. return -EINVAL;
  94. if (sw->safe_mode)
  95. return 0;
  96. /*
  97. * Make sure the device ID in the image matches the one
  98. * we read from the switch config space.
  99. */
  100. device_id = *(u16 *)(buf + hdr_size + INTEL_NVM_DEVID);
  101. if (device_id != sw->config.device_id)
  102. return -EINVAL;
  103. /* Skip headers in the image */
  104. nvm->buf_data_start = buf + hdr_size;
  105. nvm->buf_data_size = image_size - hdr_size;
  106. return 0;
  107. }
  108. static int intel_switch_nvm_write_headers(struct tb_nvm *nvm)
  109. {
  110. struct tb_switch *sw = tb_to_switch(nvm->dev);
  111. if (sw->generation < 3) {
  112. int ret;
  113. /* Write CSS headers first */
  114. ret = dma_port_flash_write(sw->dma_port,
  115. DMA_PORT_CSS_ADDRESS, nvm->buf + INTEL_NVM_CSS,
  116. DMA_PORT_CSS_MAX_SIZE);
  117. if (ret)
  118. return ret;
  119. }
  120. return 0;
  121. }
  122. static const struct tb_nvm_vendor_ops intel_switch_nvm_ops = {
  123. .read_version = intel_switch_nvm_version,
  124. .validate = intel_switch_nvm_validate,
  125. .write_headers = intel_switch_nvm_write_headers,
  126. };
  127. static int asmedia_switch_nvm_version(struct tb_nvm *nvm)
  128. {
  129. struct tb_switch *sw = tb_to_switch(nvm->dev);
  130. u32 val;
  131. int ret;
  132. ret = tb_switch_nvm_read(sw, ASMEDIA_NVM_VERSION, &val, sizeof(val));
  133. if (ret)
  134. return ret;
  135. nvm->major = (val << 16) & 0xff0000;
  136. nvm->major |= val & 0x00ff00;
  137. nvm->major |= (val >> 16) & 0x0000ff;
  138. ret = tb_switch_nvm_read(sw, ASMEDIA_NVM_DATE, &val, sizeof(val));
  139. if (ret)
  140. return ret;
  141. nvm->minor = (val << 16) & 0xff0000;
  142. nvm->minor |= val & 0x00ff00;
  143. nvm->minor |= (val >> 16) & 0x0000ff;
  144. /* ASMedia NVM size is fixed to 512k */
  145. nvm->active_size = SZ_512K;
  146. return 0;
  147. }
  148. static const struct tb_nvm_vendor_ops asmedia_switch_nvm_ops = {
  149. .read_version = asmedia_switch_nvm_version,
  150. };
  151. /* Router vendor NVM support table */
  152. static const struct tb_nvm_vendor switch_nvm_vendors[] = {
  153. { 0x174c, &asmedia_switch_nvm_ops },
  154. { PCI_VENDOR_ID_INTEL, &intel_switch_nvm_ops },
  155. { 0x8087, &intel_switch_nvm_ops },
  156. };
  157. static int intel_retimer_nvm_version(struct tb_nvm *nvm)
  158. {
  159. struct tb_retimer *rt = tb_to_retimer(nvm->dev);
  160. u32 val, nvm_size;
  161. int ret;
  162. ret = tb_retimer_nvm_read(rt, INTEL_NVM_VERSION, &val, sizeof(val));
  163. if (ret)
  164. return ret;
  165. nvm->major = (val >> 16) & 0xff;
  166. nvm->minor = (val >> 8) & 0xff;
  167. ret = tb_retimer_nvm_read(rt, INTEL_NVM_FLASH_SIZE, &val, sizeof(val));
  168. if (ret)
  169. return ret;
  170. nvm_size = (SZ_1M << (val & 7)) / 8;
  171. nvm_size = (nvm_size - SZ_16K) / 2;
  172. nvm->active_size = nvm_size;
  173. return 0;
  174. }
  175. static int intel_retimer_nvm_validate(struct tb_nvm *nvm)
  176. {
  177. struct tb_retimer *rt = tb_to_retimer(nvm->dev);
  178. unsigned int image_size, hdr_size;
  179. u8 *buf = nvm->buf;
  180. u16 ds_size, device;
  181. image_size = nvm->buf_data_size;
  182. /*
  183. * FARB pointer must point inside the image and must at least
  184. * contain parts of the digital section we will be reading here.
  185. */
  186. hdr_size = (*(u32 *)buf) & 0xffffff;
  187. if (hdr_size + INTEL_NVM_DEVID + 2 >= image_size)
  188. return -EINVAL;
  189. /* Digital section start should be aligned to 4k page */
  190. if (!IS_ALIGNED(hdr_size, SZ_4K))
  191. return -EINVAL;
  192. /*
  193. * Read digital section size and check that it also fits inside
  194. * the image.
  195. */
  196. ds_size = *(u16 *)(buf + hdr_size);
  197. if (ds_size >= image_size)
  198. return -EINVAL;
  199. /*
  200. * Make sure the device ID in the image matches the retimer
  201. * hardware.
  202. */
  203. device = *(u16 *)(buf + hdr_size + INTEL_NVM_DEVID);
  204. if (device != rt->device)
  205. return -EINVAL;
  206. /* Skip headers in the image */
  207. nvm->buf_data_start = buf + hdr_size;
  208. nvm->buf_data_size = image_size - hdr_size;
  209. return 0;
  210. }
  211. static const struct tb_nvm_vendor_ops intel_retimer_nvm_ops = {
  212. .read_version = intel_retimer_nvm_version,
  213. .validate = intel_retimer_nvm_validate,
  214. };
  215. /* Retimer vendor NVM support table */
  216. static const struct tb_nvm_vendor retimer_nvm_vendors[] = {
  217. { 0x8087, &intel_retimer_nvm_ops },
  218. };
  219. /**
  220. * tb_nvm_alloc() - Allocate new NVM structure
  221. * @dev: Device owning the NVM
  222. *
  223. * Allocates new NVM structure with unique @id and returns it. In case
  224. * of error returns ERR_PTR(). Specifically returns %-EOPNOTSUPP if the
  225. * NVM format of the @dev is not known by the kernel.
  226. */
  227. struct tb_nvm *tb_nvm_alloc(struct device *dev)
  228. {
  229. const struct tb_nvm_vendor_ops *vops = NULL;
  230. struct tb_nvm *nvm;
  231. int ret, i;
  232. if (tb_is_switch(dev)) {
  233. const struct tb_switch *sw = tb_to_switch(dev);
  234. for (i = 0; i < ARRAY_SIZE(switch_nvm_vendors); i++) {
  235. const struct tb_nvm_vendor *v = &switch_nvm_vendors[i];
  236. if (v->vendor == sw->config.vendor_id) {
  237. vops = v->vops;
  238. break;
  239. }
  240. }
  241. if (!vops) {
  242. tb_sw_dbg(sw, "router NVM format of vendor %#x unknown\n",
  243. sw->config.vendor_id);
  244. return ERR_PTR(-EOPNOTSUPP);
  245. }
  246. } else if (tb_is_retimer(dev)) {
  247. const struct tb_retimer *rt = tb_to_retimer(dev);
  248. for (i = 0; i < ARRAY_SIZE(retimer_nvm_vendors); i++) {
  249. const struct tb_nvm_vendor *v = &retimer_nvm_vendors[i];
  250. if (v->vendor == rt->vendor) {
  251. vops = v->vops;
  252. break;
  253. }
  254. }
  255. if (!vops) {
  256. dev_dbg(dev, "retimer NVM format of vendor %#x unknown\n",
  257. rt->vendor);
  258. return ERR_PTR(-EOPNOTSUPP);
  259. }
  260. } else {
  261. return ERR_PTR(-EOPNOTSUPP);
  262. }
  263. nvm = kzalloc(sizeof(*nvm), GFP_KERNEL);
  264. if (!nvm)
  265. return ERR_PTR(-ENOMEM);
  266. ret = ida_simple_get(&nvm_ida, 0, 0, GFP_KERNEL);
  267. if (ret < 0) {
  268. kfree(nvm);
  269. return ERR_PTR(ret);
  270. }
  271. nvm->id = ret;
  272. nvm->dev = dev;
  273. nvm->vops = vops;
  274. return nvm;
  275. }
  276. /**
  277. * tb_nvm_read_version() - Read and populate NVM version
  278. * @nvm: NVM structure
  279. *
  280. * Uses vendor specific means to read out and fill in the existing
  281. * active NVM version. Returns %0 in case of success and negative errno
  282. * otherwise.
  283. */
  284. int tb_nvm_read_version(struct tb_nvm *nvm)
  285. {
  286. const struct tb_nvm_vendor_ops *vops = nvm->vops;
  287. if (vops && vops->read_version)
  288. return vops->read_version(nvm);
  289. return -EOPNOTSUPP;
  290. }
  291. /**
  292. * tb_nvm_validate() - Validate new NVM image
  293. * @nvm: NVM structure
  294. *
  295. * Runs vendor specific validation over the new NVM image and if all
  296. * checks pass returns %0. As side effect updates @nvm->buf_data_start
  297. * and @nvm->buf_data_size fields to match the actual data to be written
  298. * to the NVM.
  299. *
  300. * If the validation does not pass then returns negative errno.
  301. */
  302. int tb_nvm_validate(struct tb_nvm *nvm)
  303. {
  304. const struct tb_nvm_vendor_ops *vops = nvm->vops;
  305. unsigned int image_size;
  306. u8 *buf = nvm->buf;
  307. if (!buf)
  308. return -EINVAL;
  309. if (!vops)
  310. return -EOPNOTSUPP;
  311. /* Just do basic image size checks */
  312. image_size = nvm->buf_data_size;
  313. if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE)
  314. return -EINVAL;
  315. /*
  316. * Set the default data start in the buffer. The validate method
  317. * below can change this if needed.
  318. */
  319. nvm->buf_data_start = buf;
  320. return vops->validate ? vops->validate(nvm) : 0;
  321. }
  322. /**
  323. * tb_nvm_write_headers() - Write headers before the rest of the image
  324. * @nvm: NVM structure
  325. *
  326. * If the vendor NVM format requires writing headers before the rest of
  327. * the image, this function does that. Can be called even if the device
  328. * does not need this.
  329. *
  330. * Returns %0 in case of success and negative errno otherwise.
  331. */
  332. int tb_nvm_write_headers(struct tb_nvm *nvm)
  333. {
  334. const struct tb_nvm_vendor_ops *vops = nvm->vops;
  335. return vops->write_headers ? vops->write_headers(nvm) : 0;
  336. }
  337. /**
  338. * tb_nvm_add_active() - Adds active NVMem device to NVM
  339. * @nvm: NVM structure
  340. * @reg_read: Pointer to the function to read the NVM (passed directly to the
  341. * NVMem device)
  342. *
  343. * Registers new active NVmem device for @nvm. The @reg_read is called
  344. * directly from NVMem so it must handle possible concurrent access if
  345. * needed. The first parameter passed to @reg_read is @nvm structure.
  346. * Returns %0 in success and negative errno otherwise.
  347. */
  348. int tb_nvm_add_active(struct tb_nvm *nvm, nvmem_reg_read_t reg_read)
  349. {
  350. struct nvmem_config config;
  351. struct nvmem_device *nvmem;
  352. memset(&config, 0, sizeof(config));
  353. config.name = "nvm_active";
  354. config.reg_read = reg_read;
  355. config.read_only = true;
  356. config.id = nvm->id;
  357. config.stride = 4;
  358. config.word_size = 4;
  359. config.size = nvm->active_size;
  360. config.dev = nvm->dev;
  361. config.owner = THIS_MODULE;
  362. config.priv = nvm;
  363. nvmem = nvmem_register(&config);
  364. if (IS_ERR(nvmem))
  365. return PTR_ERR(nvmem);
  366. nvm->active = nvmem;
  367. return 0;
  368. }
  369. /**
  370. * tb_nvm_write_buf() - Write data to @nvm buffer
  371. * @nvm: NVM structure
  372. * @offset: Offset where to write the data
  373. * @val: Data buffer to write
  374. * @bytes: Number of bytes to write
  375. *
  376. * Helper function to cache the new NVM image before it is actually
  377. * written to the flash. Copies @bytes from @val to @nvm->buf starting
  378. * from @offset.
  379. */
  380. int tb_nvm_write_buf(struct tb_nvm *nvm, unsigned int offset, void *val,
  381. size_t bytes)
  382. {
  383. if (!nvm->buf) {
  384. nvm->buf = vmalloc(NVM_MAX_SIZE);
  385. if (!nvm->buf)
  386. return -ENOMEM;
  387. }
  388. nvm->flushed = false;
  389. nvm->buf_data_size = offset + bytes;
  390. memcpy(nvm->buf + offset, val, bytes);
  391. return 0;
  392. }
  393. /**
  394. * tb_nvm_add_non_active() - Adds non-active NVMem device to NVM
  395. * @nvm: NVM structure
  396. * @reg_write: Pointer to the function to write the NVM (passed directly
  397. * to the NVMem device)
  398. *
  399. * Registers new non-active NVmem device for @nvm. The @reg_write is called
  400. * directly from NVMem so it must handle possible concurrent access if
  401. * needed. The first parameter passed to @reg_write is @nvm structure.
  402. * The size of the NVMem device is set to %NVM_MAX_SIZE.
  403. *
  404. * Returns %0 in success and negative errno otherwise.
  405. */
  406. int tb_nvm_add_non_active(struct tb_nvm *nvm, nvmem_reg_write_t reg_write)
  407. {
  408. struct nvmem_config config;
  409. struct nvmem_device *nvmem;
  410. memset(&config, 0, sizeof(config));
  411. config.name = "nvm_non_active";
  412. config.reg_write = reg_write;
  413. config.root_only = true;
  414. config.id = nvm->id;
  415. config.stride = 4;
  416. config.word_size = 4;
  417. config.size = NVM_MAX_SIZE;
  418. config.dev = nvm->dev;
  419. config.owner = THIS_MODULE;
  420. config.priv = nvm;
  421. nvmem = nvmem_register(&config);
  422. if (IS_ERR(nvmem))
  423. return PTR_ERR(nvmem);
  424. nvm->non_active = nvmem;
  425. return 0;
  426. }
  427. /**
  428. * tb_nvm_free() - Release NVM and its resources
  429. * @nvm: NVM structure to release
  430. *
  431. * Releases NVM and the NVMem devices if they were registered.
  432. */
  433. void tb_nvm_free(struct tb_nvm *nvm)
  434. {
  435. if (nvm) {
  436. nvmem_unregister(nvm->non_active);
  437. nvmem_unregister(nvm->active);
  438. vfree(nvm->buf);
  439. ida_simple_remove(&nvm_ida, nvm->id);
  440. }
  441. kfree(nvm);
  442. }
  443. /**
  444. * tb_nvm_read_data() - Read data from NVM
  445. * @address: Start address on the flash
  446. * @buf: Buffer where the read data is copied
  447. * @size: Size of the buffer in bytes
  448. * @retries: Number of retries if block read fails
  449. * @read_block: Function that reads block from the flash
  450. * @read_block_data: Data passsed to @read_block
  451. *
  452. * This is a generic function that reads data from NVM or NVM like
  453. * device.
  454. *
  455. * Returns %0 on success and negative errno otherwise.
  456. */
  457. int tb_nvm_read_data(unsigned int address, void *buf, size_t size,
  458. unsigned int retries, read_block_fn read_block,
  459. void *read_block_data)
  460. {
  461. do {
  462. unsigned int dwaddress, dwords, offset;
  463. u8 data[NVM_DATA_DWORDS * 4];
  464. size_t nbytes;
  465. int ret;
  466. offset = address & 3;
  467. nbytes = min_t(size_t, size + offset, NVM_DATA_DWORDS * 4);
  468. dwaddress = address / 4;
  469. dwords = ALIGN(nbytes, 4) / 4;
  470. ret = read_block(read_block_data, dwaddress, data, dwords);
  471. if (ret) {
  472. if (ret != -ENODEV && retries--)
  473. continue;
  474. return ret;
  475. }
  476. nbytes -= offset;
  477. memcpy(buf, data + offset, nbytes);
  478. size -= nbytes;
  479. address += nbytes;
  480. buf += nbytes;
  481. } while (size > 0);
  482. return 0;
  483. }
  484. /**
  485. * tb_nvm_write_data() - Write data to NVM
  486. * @address: Start address on the flash
  487. * @buf: Buffer where the data is copied from
  488. * @size: Size of the buffer in bytes
  489. * @retries: Number of retries if the block write fails
  490. * @write_block: Function that writes block to the flash
  491. * @write_block_data: Data passwd to @write_block
  492. *
  493. * This is generic function that writes data to NVM or NVM like device.
  494. *
  495. * Returns %0 on success and negative errno otherwise.
  496. */
  497. int tb_nvm_write_data(unsigned int address, const void *buf, size_t size,
  498. unsigned int retries, write_block_fn write_block,
  499. void *write_block_data)
  500. {
  501. do {
  502. unsigned int offset, dwaddress;
  503. u8 data[NVM_DATA_DWORDS * 4];
  504. size_t nbytes;
  505. int ret;
  506. offset = address & 3;
  507. nbytes = min_t(u32, size + offset, NVM_DATA_DWORDS * 4);
  508. memcpy(data + offset, buf, nbytes);
  509. dwaddress = address / 4;
  510. ret = write_block(write_block_data, dwaddress, data, nbytes / 4);
  511. if (ret) {
  512. if (ret == -ETIMEDOUT) {
  513. if (retries--)
  514. continue;
  515. ret = -EIO;
  516. }
  517. return ret;
  518. }
  519. size -= nbytes;
  520. address += nbytes;
  521. buf += nbytes;
  522. } while (size > 0);
  523. return 0;
  524. }
  525. void tb_nvm_exit(void)
  526. {
  527. ida_destroy(&nvm_ida);
  528. }