flash.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/kernel.h>
  3. #include <linux/fs.h>
  4. #include <linux/semaphore.h>
  5. #include <linux/slab.h>
  6. #include <linux/uaccess.h>
  7. #include <linux/of.h>
  8. #include <asm/rtas.h>
  9. #include "cxl.h"
  10. #include "hcalls.h"
  11. #define DOWNLOAD_IMAGE 1
  12. #define VALIDATE_IMAGE 2
  13. struct ai_header {
  14. u16 version;
  15. u8 reserved0[6];
  16. u16 vendor;
  17. u16 device;
  18. u16 subsystem_vendor;
  19. u16 subsystem;
  20. u64 image_offset;
  21. u64 image_length;
  22. u8 reserved1[96];
  23. };
  24. static struct semaphore sem;
  25. static unsigned long *buffer[CXL_AI_MAX_ENTRIES];
  26. static struct sg_list *le;
  27. static u64 continue_token;
  28. static unsigned int transfer;
  29. struct update_props_workarea {
  30. __be32 phandle;
  31. __be32 state;
  32. __be64 reserved;
  33. __be32 nprops;
  34. } __packed;
  35. struct update_nodes_workarea {
  36. __be32 state;
  37. __be64 unit_address;
  38. __be32 reserved;
  39. } __packed;
  40. #define DEVICE_SCOPE 3
  41. #define NODE_ACTION_MASK 0xff000000
  42. #define NODE_COUNT_MASK 0x00ffffff
  43. #define OPCODE_DELETE 0x01000000
  44. #define OPCODE_UPDATE 0x02000000
  45. #define OPCODE_ADD 0x03000000
  46. static int rcall(int token, char *buf, s32 scope)
  47. {
  48. int rc;
  49. spin_lock(&rtas_data_buf_lock);
  50. memcpy(rtas_data_buf, buf, RTAS_DATA_BUF_SIZE);
  51. rc = rtas_call(token, 2, 1, NULL, rtas_data_buf, scope);
  52. memcpy(buf, rtas_data_buf, RTAS_DATA_BUF_SIZE);
  53. spin_unlock(&rtas_data_buf_lock);
  54. return rc;
  55. }
  56. static int update_property(struct device_node *dn, const char *name,
  57. u32 vd, char *value)
  58. {
  59. struct property *new_prop;
  60. u32 *val;
  61. int rc;
  62. new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL);
  63. if (!new_prop)
  64. return -ENOMEM;
  65. new_prop->name = kstrdup(name, GFP_KERNEL);
  66. if (!new_prop->name) {
  67. kfree(new_prop);
  68. return -ENOMEM;
  69. }
  70. new_prop->length = vd;
  71. new_prop->value = kzalloc(new_prop->length, GFP_KERNEL);
  72. if (!new_prop->value) {
  73. kfree(new_prop->name);
  74. kfree(new_prop);
  75. return -ENOMEM;
  76. }
  77. memcpy(new_prop->value, value, vd);
  78. val = (u32 *)new_prop->value;
  79. rc = cxl_update_properties(dn, new_prop);
  80. pr_devel("%pOFn: update property (%s, length: %i, value: %#x)\n",
  81. dn, name, vd, be32_to_cpu(*val));
  82. if (rc) {
  83. kfree(new_prop->name);
  84. kfree(new_prop->value);
  85. kfree(new_prop);
  86. }
  87. return rc;
  88. }
  89. static int update_node(__be32 phandle, s32 scope)
  90. {
  91. struct update_props_workarea *upwa;
  92. struct device_node *dn;
  93. int i, rc, ret;
  94. char *prop_data;
  95. char *buf;
  96. int token;
  97. u32 nprops;
  98. u32 vd;
  99. token = rtas_token("ibm,update-properties");
  100. if (token == RTAS_UNKNOWN_SERVICE)
  101. return -EINVAL;
  102. buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL);
  103. if (!buf)
  104. return -ENOMEM;
  105. dn = of_find_node_by_phandle(be32_to_cpu(phandle));
  106. if (!dn) {
  107. kfree(buf);
  108. return -ENOENT;
  109. }
  110. upwa = (struct update_props_workarea *)&buf[0];
  111. upwa->phandle = phandle;
  112. do {
  113. rc = rcall(token, buf, scope);
  114. if (rc < 0)
  115. break;
  116. prop_data = buf + sizeof(*upwa);
  117. nprops = be32_to_cpu(upwa->nprops);
  118. if (*prop_data == 0) {
  119. prop_data++;
  120. vd = be32_to_cpu(*(__be32 *)prop_data);
  121. prop_data += vd + sizeof(vd);
  122. nprops--;
  123. }
  124. for (i = 0; i < nprops; i++) {
  125. char *prop_name;
  126. prop_name = prop_data;
  127. prop_data += strlen(prop_name) + 1;
  128. vd = be32_to_cpu(*(__be32 *)prop_data);
  129. prop_data += sizeof(vd);
  130. if ((vd != 0x00000000) && (vd != 0x80000000)) {
  131. ret = update_property(dn, prop_name, vd,
  132. prop_data);
  133. if (ret)
  134. pr_err("cxl: Could not update property %s - %i\n",
  135. prop_name, ret);
  136. prop_data += vd;
  137. }
  138. }
  139. } while (rc == 1);
  140. of_node_put(dn);
  141. kfree(buf);
  142. return rc;
  143. }
  144. static int update_devicetree(struct cxl *adapter, s32 scope)
  145. {
  146. struct update_nodes_workarea *unwa;
  147. u32 action, node_count;
  148. int token, rc, i;
  149. __be32 *data, phandle;
  150. char *buf;
  151. token = rtas_token("ibm,update-nodes");
  152. if (token == RTAS_UNKNOWN_SERVICE)
  153. return -EINVAL;
  154. buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL);
  155. if (!buf)
  156. return -ENOMEM;
  157. unwa = (struct update_nodes_workarea *)&buf[0];
  158. unwa->unit_address = cpu_to_be64(adapter->guest->handle);
  159. do {
  160. rc = rcall(token, buf, scope);
  161. if (rc && rc != 1)
  162. break;
  163. data = (__be32 *)buf + 4;
  164. while (be32_to_cpu(*data) & NODE_ACTION_MASK) {
  165. action = be32_to_cpu(*data) & NODE_ACTION_MASK;
  166. node_count = be32_to_cpu(*data) & NODE_COUNT_MASK;
  167. pr_devel("device reconfiguration - action: %#x, nodes: %#x\n",
  168. action, node_count);
  169. data++;
  170. for (i = 0; i < node_count; i++) {
  171. phandle = *data++;
  172. switch (action) {
  173. case OPCODE_DELETE:
  174. /* nothing to do */
  175. break;
  176. case OPCODE_UPDATE:
  177. update_node(phandle, scope);
  178. break;
  179. case OPCODE_ADD:
  180. /* nothing to do, just move pointer */
  181. data++;
  182. break;
  183. }
  184. }
  185. }
  186. } while (rc == 1);
  187. kfree(buf);
  188. return 0;
  189. }
  190. static int handle_image(struct cxl *adapter, int operation,
  191. long (*fct)(u64, u64, u64, u64 *),
  192. struct cxl_adapter_image *ai)
  193. {
  194. size_t mod, s_copy, len_chunk = 0;
  195. struct ai_header *header = NULL;
  196. unsigned int entries = 0, i;
  197. void *dest, *from;
  198. int rc = 0, need_header;
  199. /* base adapter image header */
  200. need_header = (ai->flags & CXL_AI_NEED_HEADER);
  201. if (need_header) {
  202. header = kzalloc(sizeof(struct ai_header), GFP_KERNEL);
  203. if (!header)
  204. return -ENOMEM;
  205. header->version = cpu_to_be16(1);
  206. header->vendor = cpu_to_be16(adapter->guest->vendor);
  207. header->device = cpu_to_be16(adapter->guest->device);
  208. header->subsystem_vendor = cpu_to_be16(adapter->guest->subsystem_vendor);
  209. header->subsystem = cpu_to_be16(adapter->guest->subsystem);
  210. header->image_offset = cpu_to_be64(CXL_AI_HEADER_SIZE);
  211. header->image_length = cpu_to_be64(ai->len_image);
  212. }
  213. /* number of entries in the list */
  214. len_chunk = ai->len_data;
  215. if (need_header)
  216. len_chunk += CXL_AI_HEADER_SIZE;
  217. entries = len_chunk / CXL_AI_BUFFER_SIZE;
  218. mod = len_chunk % CXL_AI_BUFFER_SIZE;
  219. if (mod)
  220. entries++;
  221. if (entries > CXL_AI_MAX_ENTRIES) {
  222. rc = -EINVAL;
  223. goto err;
  224. }
  225. /* < -- MAX_CHUNK_SIZE = 4096 * 256 = 1048576 bytes -->
  226. * chunk 0 ----------------------------------------------------
  227. * | header | data |
  228. * ----------------------------------------------------
  229. * chunk 1 ----------------------------------------------------
  230. * | data |
  231. * ----------------------------------------------------
  232. * ....
  233. * chunk n ----------------------------------------------------
  234. * | data |
  235. * ----------------------------------------------------
  236. */
  237. from = (void *) ai->data;
  238. for (i = 0; i < entries; i++) {
  239. dest = buffer[i];
  240. s_copy = CXL_AI_BUFFER_SIZE;
  241. if ((need_header) && (i == 0)) {
  242. /* add adapter image header */
  243. memcpy(buffer[i], header, sizeof(struct ai_header));
  244. s_copy = CXL_AI_BUFFER_SIZE - CXL_AI_HEADER_SIZE;
  245. dest += CXL_AI_HEADER_SIZE; /* image offset */
  246. }
  247. if ((i == (entries - 1)) && mod)
  248. s_copy = mod;
  249. /* copy data */
  250. if (copy_from_user(dest, from, s_copy))
  251. goto err;
  252. /* fill in the list */
  253. le[i].phys_addr = cpu_to_be64(virt_to_phys(buffer[i]));
  254. le[i].len = cpu_to_be64(CXL_AI_BUFFER_SIZE);
  255. if ((i == (entries - 1)) && mod)
  256. le[i].len = cpu_to_be64(mod);
  257. from += s_copy;
  258. }
  259. pr_devel("%s (op: %i, need header: %i, entries: %i, token: %#llx)\n",
  260. __func__, operation, need_header, entries, continue_token);
  261. /*
  262. * download/validate the adapter image to the coherent
  263. * platform facility
  264. */
  265. rc = fct(adapter->guest->handle, virt_to_phys(le), entries,
  266. &continue_token);
  267. if (rc == 0) /* success of download/validation operation */
  268. continue_token = 0;
  269. err:
  270. kfree(header);
  271. return rc;
  272. }
  273. static int transfer_image(struct cxl *adapter, int operation,
  274. struct cxl_adapter_image *ai)
  275. {
  276. int rc = 0;
  277. int afu;
  278. switch (operation) {
  279. case DOWNLOAD_IMAGE:
  280. rc = handle_image(adapter, operation,
  281. &cxl_h_download_adapter_image, ai);
  282. if (rc < 0) {
  283. pr_devel("resetting adapter\n");
  284. cxl_h_reset_adapter(adapter->guest->handle);
  285. }
  286. return rc;
  287. case VALIDATE_IMAGE:
  288. rc = handle_image(adapter, operation,
  289. &cxl_h_validate_adapter_image, ai);
  290. if (rc < 0) {
  291. pr_devel("resetting adapter\n");
  292. cxl_h_reset_adapter(adapter->guest->handle);
  293. return rc;
  294. }
  295. if (rc == 0) {
  296. pr_devel("remove current afu\n");
  297. for (afu = 0; afu < adapter->slices; afu++)
  298. cxl_guest_remove_afu(adapter->afu[afu]);
  299. pr_devel("resetting adapter\n");
  300. cxl_h_reset_adapter(adapter->guest->handle);
  301. /* The entire image has now been
  302. * downloaded and the validation has
  303. * been successfully performed.
  304. * After that, the partition should call
  305. * ibm,update-nodes and
  306. * ibm,update-properties to receive the
  307. * current configuration
  308. */
  309. rc = update_devicetree(adapter, DEVICE_SCOPE);
  310. transfer = 1;
  311. }
  312. return rc;
  313. }
  314. return -EINVAL;
  315. }
  316. static long ioctl_transfer_image(struct cxl *adapter, int operation,
  317. struct cxl_adapter_image __user *uai)
  318. {
  319. struct cxl_adapter_image ai;
  320. pr_devel("%s\n", __func__);
  321. if (copy_from_user(&ai, uai, sizeof(struct cxl_adapter_image)))
  322. return -EFAULT;
  323. /*
  324. * Make sure reserved fields and bits are set to 0
  325. */
  326. if (ai.reserved1 || ai.reserved2 || ai.reserved3 || ai.reserved4 ||
  327. (ai.flags & ~CXL_AI_ALL))
  328. return -EINVAL;
  329. return transfer_image(adapter, operation, &ai);
  330. }
  331. static int device_open(struct inode *inode, struct file *file)
  332. {
  333. int adapter_num = CXL_DEVT_ADAPTER(inode->i_rdev);
  334. struct cxl *adapter;
  335. int rc = 0, i;
  336. pr_devel("in %s\n", __func__);
  337. BUG_ON(sizeof(struct ai_header) != CXL_AI_HEADER_SIZE);
  338. /* Allows one process to open the device by using a semaphore */
  339. if (down_interruptible(&sem) != 0)
  340. return -EPERM;
  341. if (!(adapter = get_cxl_adapter(adapter_num))) {
  342. rc = -ENODEV;
  343. goto err_unlock;
  344. }
  345. file->private_data = adapter;
  346. continue_token = 0;
  347. transfer = 0;
  348. for (i = 0; i < CXL_AI_MAX_ENTRIES; i++)
  349. buffer[i] = NULL;
  350. /* aligned buffer containing list entries which describes up to
  351. * 1 megabyte of data (256 entries of 4096 bytes each)
  352. * Logical real address of buffer 0 - Buffer 0 length in bytes
  353. * Logical real address of buffer 1 - Buffer 1 length in bytes
  354. * Logical real address of buffer 2 - Buffer 2 length in bytes
  355. * ....
  356. * ....
  357. * Logical real address of buffer N - Buffer N length in bytes
  358. */
  359. le = (struct sg_list *)get_zeroed_page(GFP_KERNEL);
  360. if (!le) {
  361. rc = -ENOMEM;
  362. goto err;
  363. }
  364. for (i = 0; i < CXL_AI_MAX_ENTRIES; i++) {
  365. buffer[i] = (unsigned long *)get_zeroed_page(GFP_KERNEL);
  366. if (!buffer[i]) {
  367. rc = -ENOMEM;
  368. goto err1;
  369. }
  370. }
  371. return 0;
  372. err1:
  373. for (i = 0; i < CXL_AI_MAX_ENTRIES; i++) {
  374. if (buffer[i])
  375. free_page((unsigned long) buffer[i]);
  376. }
  377. if (le)
  378. free_page((unsigned long) le);
  379. err:
  380. put_device(&adapter->dev);
  381. err_unlock:
  382. up(&sem);
  383. return rc;
  384. }
  385. static long device_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
  386. {
  387. struct cxl *adapter = file->private_data;
  388. pr_devel("in %s\n", __func__);
  389. if (cmd == CXL_IOCTL_DOWNLOAD_IMAGE)
  390. return ioctl_transfer_image(adapter,
  391. DOWNLOAD_IMAGE,
  392. (struct cxl_adapter_image __user *)arg);
  393. else if (cmd == CXL_IOCTL_VALIDATE_IMAGE)
  394. return ioctl_transfer_image(adapter,
  395. VALIDATE_IMAGE,
  396. (struct cxl_adapter_image __user *)arg);
  397. else
  398. return -EINVAL;
  399. }
  400. static int device_close(struct inode *inode, struct file *file)
  401. {
  402. struct cxl *adapter = file->private_data;
  403. int i;
  404. pr_devel("in %s\n", __func__);
  405. for (i = 0; i < CXL_AI_MAX_ENTRIES; i++) {
  406. if (buffer[i])
  407. free_page((unsigned long) buffer[i]);
  408. }
  409. if (le)
  410. free_page((unsigned long) le);
  411. up(&sem);
  412. put_device(&adapter->dev);
  413. continue_token = 0;
  414. /* reload the module */
  415. if (transfer)
  416. cxl_guest_reload_module(adapter);
  417. else {
  418. pr_devel("resetting adapter\n");
  419. cxl_h_reset_adapter(adapter->guest->handle);
  420. }
  421. transfer = 0;
  422. return 0;
  423. }
  424. static const struct file_operations fops = {
  425. .owner = THIS_MODULE,
  426. .open = device_open,
  427. .unlocked_ioctl = device_ioctl,
  428. .compat_ioctl = compat_ptr_ioctl,
  429. .release = device_close,
  430. };
  431. void cxl_guest_remove_chardev(struct cxl *adapter)
  432. {
  433. cdev_del(&adapter->guest->cdev);
  434. }
  435. int cxl_guest_add_chardev(struct cxl *adapter)
  436. {
  437. dev_t devt;
  438. int rc;
  439. devt = MKDEV(MAJOR(cxl_get_dev()), CXL_CARD_MINOR(adapter));
  440. cdev_init(&adapter->guest->cdev, &fops);
  441. if ((rc = cdev_add(&adapter->guest->cdev, devt, 1))) {
  442. dev_err(&adapter->dev,
  443. "Unable to add chardev on adapter (card%i): %i\n",
  444. adapter->adapter_num, rc);
  445. goto err;
  446. }
  447. adapter->dev.devt = devt;
  448. sema_init(&sem, 1);
  449. err:
  450. return rc;
  451. }