intel.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright(c) 2018 Intel Corporation. All rights reserved. */
  3. #include <linux/libnvdimm.h>
  4. #include <linux/ndctl.h>
  5. #include <linux/acpi.h>
  6. #include <asm/smp.h>
  7. #include "intel.h"
  8. #include "nfit.h"
  9. static ssize_t firmware_activate_noidle_show(struct device *dev,
  10. struct device_attribute *attr, char *buf)
  11. {
  12. struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
  13. struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
  14. struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
  15. return sprintf(buf, "%s\n", acpi_desc->fwa_noidle ? "Y" : "N");
  16. }
  17. static ssize_t firmware_activate_noidle_store(struct device *dev,
  18. struct device_attribute *attr, const char *buf, size_t size)
  19. {
  20. struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
  21. struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
  22. struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
  23. ssize_t rc;
  24. bool val;
  25. rc = kstrtobool(buf, &val);
  26. if (rc)
  27. return rc;
  28. if (val != acpi_desc->fwa_noidle)
  29. acpi_desc->fwa_cap = NVDIMM_FWA_CAP_INVALID;
  30. acpi_desc->fwa_noidle = val;
  31. return size;
  32. }
  33. DEVICE_ATTR_RW(firmware_activate_noidle);
  34. bool intel_fwa_supported(struct nvdimm_bus *nvdimm_bus)
  35. {
  36. struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
  37. struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
  38. unsigned long *mask;
  39. if (!test_bit(NVDIMM_BUS_FAMILY_INTEL, &nd_desc->bus_family_mask))
  40. return false;
  41. mask = &acpi_desc->family_dsm_mask[NVDIMM_BUS_FAMILY_INTEL];
  42. return *mask == NVDIMM_BUS_INTEL_FW_ACTIVATE_CMDMASK;
  43. }
  44. static unsigned long intel_security_flags(struct nvdimm *nvdimm,
  45. enum nvdimm_passphrase_type ptype)
  46. {
  47. struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
  48. unsigned long security_flags = 0;
  49. struct {
  50. struct nd_cmd_pkg pkg;
  51. struct nd_intel_get_security_state cmd;
  52. } nd_cmd = {
  53. .pkg = {
  54. .nd_command = NVDIMM_INTEL_GET_SECURITY_STATE,
  55. .nd_family = NVDIMM_FAMILY_INTEL,
  56. .nd_size_out =
  57. sizeof(struct nd_intel_get_security_state),
  58. .nd_fw_size =
  59. sizeof(struct nd_intel_get_security_state),
  60. },
  61. };
  62. int rc;
  63. if (!test_bit(NVDIMM_INTEL_GET_SECURITY_STATE, &nfit_mem->dsm_mask))
  64. return 0;
  65. /*
  66. * Short circuit the state retrieval while we are doing overwrite.
  67. * The DSM spec states that the security state is indeterminate
  68. * until the overwrite DSM completes.
  69. */
  70. if (nvdimm_in_overwrite(nvdimm) && ptype == NVDIMM_USER)
  71. return BIT(NVDIMM_SECURITY_OVERWRITE);
  72. rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
  73. if (rc < 0 || nd_cmd.cmd.status) {
  74. pr_err("%s: security state retrieval failed (%d:%#x)\n",
  75. nvdimm_name(nvdimm), rc, nd_cmd.cmd.status);
  76. return 0;
  77. }
  78. /* check and see if security is enabled and locked */
  79. if (ptype == NVDIMM_MASTER) {
  80. if (nd_cmd.cmd.extended_state & ND_INTEL_SEC_ESTATE_ENABLED)
  81. set_bit(NVDIMM_SECURITY_UNLOCKED, &security_flags);
  82. else
  83. set_bit(NVDIMM_SECURITY_DISABLED, &security_flags);
  84. if (nd_cmd.cmd.extended_state & ND_INTEL_SEC_ESTATE_PLIMIT)
  85. set_bit(NVDIMM_SECURITY_FROZEN, &security_flags);
  86. return security_flags;
  87. }
  88. if (nd_cmd.cmd.state & ND_INTEL_SEC_STATE_UNSUPPORTED)
  89. return 0;
  90. if (nd_cmd.cmd.state & ND_INTEL_SEC_STATE_ENABLED) {
  91. if (nd_cmd.cmd.state & ND_INTEL_SEC_STATE_FROZEN ||
  92. nd_cmd.cmd.state & ND_INTEL_SEC_STATE_PLIMIT)
  93. set_bit(NVDIMM_SECURITY_FROZEN, &security_flags);
  94. if (nd_cmd.cmd.state & ND_INTEL_SEC_STATE_LOCKED)
  95. set_bit(NVDIMM_SECURITY_LOCKED, &security_flags);
  96. else
  97. set_bit(NVDIMM_SECURITY_UNLOCKED, &security_flags);
  98. } else
  99. set_bit(NVDIMM_SECURITY_DISABLED, &security_flags);
  100. return security_flags;
  101. }
  102. static int intel_security_freeze(struct nvdimm *nvdimm)
  103. {
  104. struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
  105. struct {
  106. struct nd_cmd_pkg pkg;
  107. struct nd_intel_freeze_lock cmd;
  108. } nd_cmd = {
  109. .pkg = {
  110. .nd_command = NVDIMM_INTEL_FREEZE_LOCK,
  111. .nd_family = NVDIMM_FAMILY_INTEL,
  112. .nd_size_out = ND_INTEL_STATUS_SIZE,
  113. .nd_fw_size = ND_INTEL_STATUS_SIZE,
  114. },
  115. };
  116. int rc;
  117. if (!test_bit(NVDIMM_INTEL_FREEZE_LOCK, &nfit_mem->dsm_mask))
  118. return -ENOTTY;
  119. rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
  120. if (rc < 0)
  121. return rc;
  122. if (nd_cmd.cmd.status)
  123. return -EIO;
  124. return 0;
  125. }
  126. static int intel_security_change_key(struct nvdimm *nvdimm,
  127. const struct nvdimm_key_data *old_data,
  128. const struct nvdimm_key_data *new_data,
  129. enum nvdimm_passphrase_type ptype)
  130. {
  131. struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
  132. unsigned int cmd = ptype == NVDIMM_MASTER ?
  133. NVDIMM_INTEL_SET_MASTER_PASSPHRASE :
  134. NVDIMM_INTEL_SET_PASSPHRASE;
  135. struct {
  136. struct nd_cmd_pkg pkg;
  137. struct nd_intel_set_passphrase cmd;
  138. } nd_cmd = {
  139. .pkg = {
  140. .nd_family = NVDIMM_FAMILY_INTEL,
  141. .nd_size_in = ND_INTEL_PASSPHRASE_SIZE * 2,
  142. .nd_size_out = ND_INTEL_STATUS_SIZE,
  143. .nd_fw_size = ND_INTEL_STATUS_SIZE,
  144. .nd_command = cmd,
  145. },
  146. };
  147. int rc;
  148. if (!test_bit(cmd, &nfit_mem->dsm_mask))
  149. return -ENOTTY;
  150. memcpy(nd_cmd.cmd.old_pass, old_data->data,
  151. sizeof(nd_cmd.cmd.old_pass));
  152. memcpy(nd_cmd.cmd.new_pass, new_data->data,
  153. sizeof(nd_cmd.cmd.new_pass));
  154. rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
  155. if (rc < 0)
  156. return rc;
  157. switch (nd_cmd.cmd.status) {
  158. case 0:
  159. return 0;
  160. case ND_INTEL_STATUS_INVALID_PASS:
  161. return -EINVAL;
  162. case ND_INTEL_STATUS_NOT_SUPPORTED:
  163. return -EOPNOTSUPP;
  164. case ND_INTEL_STATUS_INVALID_STATE:
  165. default:
  166. return -EIO;
  167. }
  168. }
  169. static void nvdimm_invalidate_cache(void);
  170. static int __maybe_unused intel_security_unlock(struct nvdimm *nvdimm,
  171. const struct nvdimm_key_data *key_data)
  172. {
  173. struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
  174. struct {
  175. struct nd_cmd_pkg pkg;
  176. struct nd_intel_unlock_unit cmd;
  177. } nd_cmd = {
  178. .pkg = {
  179. .nd_command = NVDIMM_INTEL_UNLOCK_UNIT,
  180. .nd_family = NVDIMM_FAMILY_INTEL,
  181. .nd_size_in = ND_INTEL_PASSPHRASE_SIZE,
  182. .nd_size_out = ND_INTEL_STATUS_SIZE,
  183. .nd_fw_size = ND_INTEL_STATUS_SIZE,
  184. },
  185. };
  186. int rc;
  187. if (!test_bit(NVDIMM_INTEL_UNLOCK_UNIT, &nfit_mem->dsm_mask))
  188. return -ENOTTY;
  189. memcpy(nd_cmd.cmd.passphrase, key_data->data,
  190. sizeof(nd_cmd.cmd.passphrase));
  191. rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
  192. if (rc < 0)
  193. return rc;
  194. switch (nd_cmd.cmd.status) {
  195. case 0:
  196. break;
  197. case ND_INTEL_STATUS_INVALID_PASS:
  198. return -EINVAL;
  199. default:
  200. return -EIO;
  201. }
  202. /* DIMM unlocked, invalidate all CPU caches before we read it */
  203. nvdimm_invalidate_cache();
  204. return 0;
  205. }
  206. static int intel_security_disable(struct nvdimm *nvdimm,
  207. const struct nvdimm_key_data *key_data)
  208. {
  209. int rc;
  210. struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
  211. struct {
  212. struct nd_cmd_pkg pkg;
  213. struct nd_intel_disable_passphrase cmd;
  214. } nd_cmd = {
  215. .pkg = {
  216. .nd_command = NVDIMM_INTEL_DISABLE_PASSPHRASE,
  217. .nd_family = NVDIMM_FAMILY_INTEL,
  218. .nd_size_in = ND_INTEL_PASSPHRASE_SIZE,
  219. .nd_size_out = ND_INTEL_STATUS_SIZE,
  220. .nd_fw_size = ND_INTEL_STATUS_SIZE,
  221. },
  222. };
  223. if (!test_bit(NVDIMM_INTEL_DISABLE_PASSPHRASE, &nfit_mem->dsm_mask))
  224. return -ENOTTY;
  225. memcpy(nd_cmd.cmd.passphrase, key_data->data,
  226. sizeof(nd_cmd.cmd.passphrase));
  227. rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
  228. if (rc < 0)
  229. return rc;
  230. switch (nd_cmd.cmd.status) {
  231. case 0:
  232. break;
  233. case ND_INTEL_STATUS_INVALID_PASS:
  234. return -EINVAL;
  235. case ND_INTEL_STATUS_INVALID_STATE:
  236. default:
  237. return -ENXIO;
  238. }
  239. return 0;
  240. }
  241. static int __maybe_unused intel_security_erase(struct nvdimm *nvdimm,
  242. const struct nvdimm_key_data *key,
  243. enum nvdimm_passphrase_type ptype)
  244. {
  245. int rc;
  246. struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
  247. unsigned int cmd = ptype == NVDIMM_MASTER ?
  248. NVDIMM_INTEL_MASTER_SECURE_ERASE : NVDIMM_INTEL_SECURE_ERASE;
  249. struct {
  250. struct nd_cmd_pkg pkg;
  251. struct nd_intel_secure_erase cmd;
  252. } nd_cmd = {
  253. .pkg = {
  254. .nd_family = NVDIMM_FAMILY_INTEL,
  255. .nd_size_in = ND_INTEL_PASSPHRASE_SIZE,
  256. .nd_size_out = ND_INTEL_STATUS_SIZE,
  257. .nd_fw_size = ND_INTEL_STATUS_SIZE,
  258. .nd_command = cmd,
  259. },
  260. };
  261. if (!test_bit(cmd, &nfit_mem->dsm_mask))
  262. return -ENOTTY;
  263. /* flush all cache before we erase DIMM */
  264. nvdimm_invalidate_cache();
  265. memcpy(nd_cmd.cmd.passphrase, key->data,
  266. sizeof(nd_cmd.cmd.passphrase));
  267. rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
  268. if (rc < 0)
  269. return rc;
  270. switch (nd_cmd.cmd.status) {
  271. case 0:
  272. break;
  273. case ND_INTEL_STATUS_NOT_SUPPORTED:
  274. return -EOPNOTSUPP;
  275. case ND_INTEL_STATUS_INVALID_PASS:
  276. return -EINVAL;
  277. case ND_INTEL_STATUS_INVALID_STATE:
  278. default:
  279. return -ENXIO;
  280. }
  281. /* DIMM erased, invalidate all CPU caches before we read it */
  282. nvdimm_invalidate_cache();
  283. return 0;
  284. }
  285. static int __maybe_unused intel_security_query_overwrite(struct nvdimm *nvdimm)
  286. {
  287. int rc;
  288. struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
  289. struct {
  290. struct nd_cmd_pkg pkg;
  291. struct nd_intel_query_overwrite cmd;
  292. } nd_cmd = {
  293. .pkg = {
  294. .nd_command = NVDIMM_INTEL_QUERY_OVERWRITE,
  295. .nd_family = NVDIMM_FAMILY_INTEL,
  296. .nd_size_out = ND_INTEL_STATUS_SIZE,
  297. .nd_fw_size = ND_INTEL_STATUS_SIZE,
  298. },
  299. };
  300. if (!test_bit(NVDIMM_INTEL_QUERY_OVERWRITE, &nfit_mem->dsm_mask))
  301. return -ENOTTY;
  302. rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
  303. if (rc < 0)
  304. return rc;
  305. switch (nd_cmd.cmd.status) {
  306. case 0:
  307. break;
  308. case ND_INTEL_STATUS_OQUERY_INPROGRESS:
  309. return -EBUSY;
  310. default:
  311. return -ENXIO;
  312. }
  313. /* flush all cache before we make the nvdimms available */
  314. nvdimm_invalidate_cache();
  315. return 0;
  316. }
  317. static int __maybe_unused intel_security_overwrite(struct nvdimm *nvdimm,
  318. const struct nvdimm_key_data *nkey)
  319. {
  320. int rc;
  321. struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
  322. struct {
  323. struct nd_cmd_pkg pkg;
  324. struct nd_intel_overwrite cmd;
  325. } nd_cmd = {
  326. .pkg = {
  327. .nd_command = NVDIMM_INTEL_OVERWRITE,
  328. .nd_family = NVDIMM_FAMILY_INTEL,
  329. .nd_size_in = ND_INTEL_PASSPHRASE_SIZE,
  330. .nd_size_out = ND_INTEL_STATUS_SIZE,
  331. .nd_fw_size = ND_INTEL_STATUS_SIZE,
  332. },
  333. };
  334. if (!test_bit(NVDIMM_INTEL_OVERWRITE, &nfit_mem->dsm_mask))
  335. return -ENOTTY;
  336. /* flush all cache before we erase DIMM */
  337. nvdimm_invalidate_cache();
  338. memcpy(nd_cmd.cmd.passphrase, nkey->data,
  339. sizeof(nd_cmd.cmd.passphrase));
  340. rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
  341. if (rc < 0)
  342. return rc;
  343. switch (nd_cmd.cmd.status) {
  344. case 0:
  345. return 0;
  346. case ND_INTEL_STATUS_OVERWRITE_UNSUPPORTED:
  347. return -ENOTSUPP;
  348. case ND_INTEL_STATUS_INVALID_PASS:
  349. return -EINVAL;
  350. case ND_INTEL_STATUS_INVALID_STATE:
  351. default:
  352. return -ENXIO;
  353. }
  354. }
  355. /*
  356. * TODO: define a cross arch wbinvd equivalent when/if
  357. * NVDIMM_FAMILY_INTEL command support arrives on another arch.
  358. */
  359. #ifdef CONFIG_X86
  360. static void nvdimm_invalidate_cache(void)
  361. {
  362. wbinvd_on_all_cpus();
  363. }
  364. #else
  365. static void nvdimm_invalidate_cache(void)
  366. {
  367. WARN_ON_ONCE("cache invalidation required after unlock\n");
  368. }
  369. #endif
  370. static const struct nvdimm_security_ops __intel_security_ops = {
  371. .get_flags = intel_security_flags,
  372. .freeze = intel_security_freeze,
  373. .change_key = intel_security_change_key,
  374. .disable = intel_security_disable,
  375. #ifdef CONFIG_X86
  376. .unlock = intel_security_unlock,
  377. .erase = intel_security_erase,
  378. .overwrite = intel_security_overwrite,
  379. .query_overwrite = intel_security_query_overwrite,
  380. #endif
  381. };
  382. const struct nvdimm_security_ops *intel_security_ops = &__intel_security_ops;
  383. static int intel_bus_fwa_businfo(struct nvdimm_bus_descriptor *nd_desc,
  384. struct nd_intel_bus_fw_activate_businfo *info)
  385. {
  386. struct {
  387. struct nd_cmd_pkg pkg;
  388. struct nd_intel_bus_fw_activate_businfo cmd;
  389. } nd_cmd = {
  390. .pkg = {
  391. .nd_command = NVDIMM_BUS_INTEL_FW_ACTIVATE_BUSINFO,
  392. .nd_family = NVDIMM_BUS_FAMILY_INTEL,
  393. .nd_size_out =
  394. sizeof(struct nd_intel_bus_fw_activate_businfo),
  395. .nd_fw_size =
  396. sizeof(struct nd_intel_bus_fw_activate_businfo),
  397. },
  398. };
  399. int rc;
  400. rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd),
  401. NULL);
  402. *info = nd_cmd.cmd;
  403. return rc;
  404. }
  405. /* The fw_ops expect to be called with the nvdimm_bus_lock() held */
  406. static enum nvdimm_fwa_state intel_bus_fwa_state(
  407. struct nvdimm_bus_descriptor *nd_desc)
  408. {
  409. struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
  410. struct nd_intel_bus_fw_activate_businfo info;
  411. struct device *dev = acpi_desc->dev;
  412. enum nvdimm_fwa_state state;
  413. int rc;
  414. /*
  415. * It should not be possible for platform firmware to return
  416. * busy because activate is a synchronous operation. Treat it
  417. * similar to invalid, i.e. always refresh / poll the status.
  418. */
  419. switch (acpi_desc->fwa_state) {
  420. case NVDIMM_FWA_INVALID:
  421. case NVDIMM_FWA_BUSY:
  422. break;
  423. default:
  424. /* check if capability needs to be refreshed */
  425. if (acpi_desc->fwa_cap == NVDIMM_FWA_CAP_INVALID)
  426. break;
  427. return acpi_desc->fwa_state;
  428. }
  429. /* Refresh with platform firmware */
  430. rc = intel_bus_fwa_businfo(nd_desc, &info);
  431. if (rc)
  432. return NVDIMM_FWA_INVALID;
  433. switch (info.state) {
  434. case ND_INTEL_FWA_IDLE:
  435. state = NVDIMM_FWA_IDLE;
  436. break;
  437. case ND_INTEL_FWA_BUSY:
  438. state = NVDIMM_FWA_BUSY;
  439. break;
  440. case ND_INTEL_FWA_ARMED:
  441. if (info.activate_tmo > info.max_quiesce_tmo)
  442. state = NVDIMM_FWA_ARM_OVERFLOW;
  443. else
  444. state = NVDIMM_FWA_ARMED;
  445. break;
  446. default:
  447. dev_err_once(dev, "invalid firmware activate state %d\n",
  448. info.state);
  449. return NVDIMM_FWA_INVALID;
  450. }
  451. /*
  452. * Capability data is available in the same payload as state. It
  453. * is expected to be static.
  454. */
  455. if (acpi_desc->fwa_cap == NVDIMM_FWA_CAP_INVALID) {
  456. if (info.capability & ND_INTEL_BUS_FWA_CAP_FWQUIESCE)
  457. acpi_desc->fwa_cap = NVDIMM_FWA_CAP_QUIESCE;
  458. else if (info.capability & ND_INTEL_BUS_FWA_CAP_OSQUIESCE) {
  459. /*
  460. * Skip hibernate cycle by default if platform
  461. * indicates that it does not need devices to be
  462. * quiesced.
  463. */
  464. acpi_desc->fwa_cap = NVDIMM_FWA_CAP_LIVE;
  465. } else
  466. acpi_desc->fwa_cap = NVDIMM_FWA_CAP_NONE;
  467. }
  468. acpi_desc->fwa_state = state;
  469. return state;
  470. }
  471. static enum nvdimm_fwa_capability intel_bus_fwa_capability(
  472. struct nvdimm_bus_descriptor *nd_desc)
  473. {
  474. struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
  475. if (acpi_desc->fwa_cap > NVDIMM_FWA_CAP_INVALID)
  476. return acpi_desc->fwa_cap;
  477. if (intel_bus_fwa_state(nd_desc) > NVDIMM_FWA_INVALID)
  478. return acpi_desc->fwa_cap;
  479. return NVDIMM_FWA_CAP_INVALID;
  480. }
  481. static int intel_bus_fwa_activate(struct nvdimm_bus_descriptor *nd_desc)
  482. {
  483. struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
  484. struct {
  485. struct nd_cmd_pkg pkg;
  486. struct nd_intel_bus_fw_activate cmd;
  487. } nd_cmd = {
  488. .pkg = {
  489. .nd_command = NVDIMM_BUS_INTEL_FW_ACTIVATE,
  490. .nd_family = NVDIMM_BUS_FAMILY_INTEL,
  491. .nd_size_in = sizeof(nd_cmd.cmd.iodev_state),
  492. .nd_size_out =
  493. sizeof(struct nd_intel_bus_fw_activate),
  494. .nd_fw_size =
  495. sizeof(struct nd_intel_bus_fw_activate),
  496. },
  497. /*
  498. * Even though activate is run from a suspended context,
  499. * for safety, still ask platform firmware to force
  500. * quiesce devices by default. Let a module
  501. * parameter override that policy.
  502. */
  503. .cmd = {
  504. .iodev_state = acpi_desc->fwa_noidle
  505. ? ND_INTEL_BUS_FWA_IODEV_OS_IDLE
  506. : ND_INTEL_BUS_FWA_IODEV_FORCE_IDLE,
  507. },
  508. };
  509. int rc;
  510. switch (intel_bus_fwa_state(nd_desc)) {
  511. case NVDIMM_FWA_ARMED:
  512. case NVDIMM_FWA_ARM_OVERFLOW:
  513. break;
  514. default:
  515. return -ENXIO;
  516. }
  517. rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd),
  518. NULL);
  519. /*
  520. * Whether the command succeeded, or failed, the agent checking
  521. * for the result needs to query the DIMMs individually.
  522. * Increment the activation count to invalidate all the DIMM
  523. * states at once (it's otherwise not possible to take
  524. * acpi_desc->init_mutex in this context)
  525. */
  526. acpi_desc->fwa_state = NVDIMM_FWA_INVALID;
  527. acpi_desc->fwa_count++;
  528. dev_dbg(acpi_desc->dev, "result: %d\n", rc);
  529. return rc;
  530. }
  531. static const struct nvdimm_bus_fw_ops __intel_bus_fw_ops = {
  532. .activate_state = intel_bus_fwa_state,
  533. .capability = intel_bus_fwa_capability,
  534. .activate = intel_bus_fwa_activate,
  535. };
  536. const struct nvdimm_bus_fw_ops *intel_bus_fw_ops = &__intel_bus_fw_ops;
  537. static int intel_fwa_dimminfo(struct nvdimm *nvdimm,
  538. struct nd_intel_fw_activate_dimminfo *info)
  539. {
  540. struct {
  541. struct nd_cmd_pkg pkg;
  542. struct nd_intel_fw_activate_dimminfo cmd;
  543. } nd_cmd = {
  544. .pkg = {
  545. .nd_command = NVDIMM_INTEL_FW_ACTIVATE_DIMMINFO,
  546. .nd_family = NVDIMM_FAMILY_INTEL,
  547. .nd_size_out =
  548. sizeof(struct nd_intel_fw_activate_dimminfo),
  549. .nd_fw_size =
  550. sizeof(struct nd_intel_fw_activate_dimminfo),
  551. },
  552. };
  553. int rc;
  554. rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
  555. *info = nd_cmd.cmd;
  556. return rc;
  557. }
  558. static enum nvdimm_fwa_state intel_fwa_state(struct nvdimm *nvdimm)
  559. {
  560. struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
  561. struct acpi_nfit_desc *acpi_desc = nfit_mem->acpi_desc;
  562. struct nd_intel_fw_activate_dimminfo info;
  563. int rc;
  564. /*
  565. * Similar to the bus state, since activate is synchronous the
  566. * busy state should resolve within the context of 'activate'.
  567. */
  568. switch (nfit_mem->fwa_state) {
  569. case NVDIMM_FWA_INVALID:
  570. case NVDIMM_FWA_BUSY:
  571. break;
  572. default:
  573. /* If no activations occurred the old state is still valid */
  574. if (nfit_mem->fwa_count == acpi_desc->fwa_count)
  575. return nfit_mem->fwa_state;
  576. }
  577. rc = intel_fwa_dimminfo(nvdimm, &info);
  578. if (rc)
  579. return NVDIMM_FWA_INVALID;
  580. switch (info.state) {
  581. case ND_INTEL_FWA_IDLE:
  582. nfit_mem->fwa_state = NVDIMM_FWA_IDLE;
  583. break;
  584. case ND_INTEL_FWA_BUSY:
  585. nfit_mem->fwa_state = NVDIMM_FWA_BUSY;
  586. break;
  587. case ND_INTEL_FWA_ARMED:
  588. nfit_mem->fwa_state = NVDIMM_FWA_ARMED;
  589. break;
  590. default:
  591. nfit_mem->fwa_state = NVDIMM_FWA_INVALID;
  592. break;
  593. }
  594. switch (info.result) {
  595. case ND_INTEL_DIMM_FWA_NONE:
  596. nfit_mem->fwa_result = NVDIMM_FWA_RESULT_NONE;
  597. break;
  598. case ND_INTEL_DIMM_FWA_SUCCESS:
  599. nfit_mem->fwa_result = NVDIMM_FWA_RESULT_SUCCESS;
  600. break;
  601. case ND_INTEL_DIMM_FWA_NOTSTAGED:
  602. nfit_mem->fwa_result = NVDIMM_FWA_RESULT_NOTSTAGED;
  603. break;
  604. case ND_INTEL_DIMM_FWA_NEEDRESET:
  605. nfit_mem->fwa_result = NVDIMM_FWA_RESULT_NEEDRESET;
  606. break;
  607. case ND_INTEL_DIMM_FWA_MEDIAFAILED:
  608. case ND_INTEL_DIMM_FWA_ABORT:
  609. case ND_INTEL_DIMM_FWA_NOTSUPP:
  610. case ND_INTEL_DIMM_FWA_ERROR:
  611. default:
  612. nfit_mem->fwa_result = NVDIMM_FWA_RESULT_FAIL;
  613. break;
  614. }
  615. nfit_mem->fwa_count = acpi_desc->fwa_count;
  616. return nfit_mem->fwa_state;
  617. }
  618. static enum nvdimm_fwa_result intel_fwa_result(struct nvdimm *nvdimm)
  619. {
  620. struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
  621. struct acpi_nfit_desc *acpi_desc = nfit_mem->acpi_desc;
  622. if (nfit_mem->fwa_count == acpi_desc->fwa_count
  623. && nfit_mem->fwa_result > NVDIMM_FWA_RESULT_INVALID)
  624. return nfit_mem->fwa_result;
  625. if (intel_fwa_state(nvdimm) > NVDIMM_FWA_INVALID)
  626. return nfit_mem->fwa_result;
  627. return NVDIMM_FWA_RESULT_INVALID;
  628. }
  629. static int intel_fwa_arm(struct nvdimm *nvdimm, enum nvdimm_fwa_trigger arm)
  630. {
  631. struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
  632. struct acpi_nfit_desc *acpi_desc = nfit_mem->acpi_desc;
  633. struct {
  634. struct nd_cmd_pkg pkg;
  635. struct nd_intel_fw_activate_arm cmd;
  636. } nd_cmd = {
  637. .pkg = {
  638. .nd_command = NVDIMM_INTEL_FW_ACTIVATE_ARM,
  639. .nd_family = NVDIMM_FAMILY_INTEL,
  640. .nd_size_in = sizeof(nd_cmd.cmd.activate_arm),
  641. .nd_size_out =
  642. sizeof(struct nd_intel_fw_activate_arm),
  643. .nd_fw_size =
  644. sizeof(struct nd_intel_fw_activate_arm),
  645. },
  646. .cmd = {
  647. .activate_arm = arm == NVDIMM_FWA_ARM
  648. ? ND_INTEL_DIMM_FWA_ARM
  649. : ND_INTEL_DIMM_FWA_DISARM,
  650. },
  651. };
  652. int rc;
  653. switch (intel_fwa_state(nvdimm)) {
  654. case NVDIMM_FWA_INVALID:
  655. return -ENXIO;
  656. case NVDIMM_FWA_BUSY:
  657. return -EBUSY;
  658. case NVDIMM_FWA_IDLE:
  659. if (arm == NVDIMM_FWA_DISARM)
  660. return 0;
  661. break;
  662. case NVDIMM_FWA_ARMED:
  663. if (arm == NVDIMM_FWA_ARM)
  664. return 0;
  665. break;
  666. default:
  667. return -ENXIO;
  668. }
  669. /*
  670. * Invalidate the bus-level state, now that we're committed to
  671. * changing the 'arm' state.
  672. */
  673. acpi_desc->fwa_state = NVDIMM_FWA_INVALID;
  674. nfit_mem->fwa_state = NVDIMM_FWA_INVALID;
  675. rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
  676. dev_dbg(acpi_desc->dev, "%s result: %d\n", arm == NVDIMM_FWA_ARM
  677. ? "arm" : "disarm", rc);
  678. return rc;
  679. }
  680. static const struct nvdimm_fw_ops __intel_fw_ops = {
  681. .activate_state = intel_fwa_state,
  682. .activate_result = intel_fwa_result,
  683. .arm = intel_fwa_arm,
  684. };
  685. const struct nvdimm_fw_ops *intel_fw_ops = &__intel_fw_ops;