auth.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2020 Hannes Reinecke, SUSE Linux
  4. */
  5. #include <linux/crc32.h>
  6. #include <linux/base64.h>
  7. #include <linux/prandom.h>
  8. #include <asm/unaligned.h>
  9. #include <crypto/hash.h>
  10. #include <crypto/dh.h>
  11. #include "nvme.h"
  12. #include "fabrics.h"
  13. #include <linux/nvme-auth.h>
  14. struct nvme_dhchap_queue_context {
  15. struct list_head entry;
  16. struct work_struct auth_work;
  17. struct nvme_ctrl *ctrl;
  18. struct crypto_shash *shash_tfm;
  19. struct crypto_kpp *dh_tfm;
  20. void *buf;
  21. size_t buf_size;
  22. int qid;
  23. int error;
  24. u32 s1;
  25. u32 s2;
  26. u16 transaction;
  27. u8 status;
  28. u8 hash_id;
  29. size_t hash_len;
  30. u8 dhgroup_id;
  31. u8 c1[64];
  32. u8 c2[64];
  33. u8 response[64];
  34. u8 *host_response;
  35. u8 *ctrl_key;
  36. int ctrl_key_len;
  37. u8 *host_key;
  38. int host_key_len;
  39. u8 *sess_key;
  40. int sess_key_len;
  41. };
  42. #define nvme_auth_flags_from_qid(qid) \
  43. (qid == 0) ? 0 : BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED
  44. #define nvme_auth_queue_from_qid(ctrl, qid) \
  45. (qid == 0) ? (ctrl)->fabrics_q : (ctrl)->connect_q
  46. static int nvme_auth_submit(struct nvme_ctrl *ctrl, int qid,
  47. void *data, size_t data_len, bool auth_send)
  48. {
  49. struct nvme_command cmd = {};
  50. blk_mq_req_flags_t flags = nvme_auth_flags_from_qid(qid);
  51. struct request_queue *q = nvme_auth_queue_from_qid(ctrl, qid);
  52. int ret;
  53. cmd.auth_common.opcode = nvme_fabrics_command;
  54. cmd.auth_common.secp = NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER;
  55. cmd.auth_common.spsp0 = 0x01;
  56. cmd.auth_common.spsp1 = 0x01;
  57. if (auth_send) {
  58. cmd.auth_send.fctype = nvme_fabrics_type_auth_send;
  59. cmd.auth_send.tl = cpu_to_le32(data_len);
  60. } else {
  61. cmd.auth_receive.fctype = nvme_fabrics_type_auth_receive;
  62. cmd.auth_receive.al = cpu_to_le32(data_len);
  63. }
  64. ret = __nvme_submit_sync_cmd(q, &cmd, NULL, data, data_len,
  65. qid == 0 ? NVME_QID_ANY : qid,
  66. 0, flags);
  67. if (ret > 0)
  68. dev_warn(ctrl->device,
  69. "qid %d auth_send failed with status %d\n", qid, ret);
  70. else if (ret < 0)
  71. dev_err(ctrl->device,
  72. "qid %d auth_send failed with error %d\n", qid, ret);
  73. return ret;
  74. }
  75. static int nvme_auth_receive_validate(struct nvme_ctrl *ctrl, int qid,
  76. struct nvmf_auth_dhchap_failure_data *data,
  77. u16 transaction, u8 expected_msg)
  78. {
  79. dev_dbg(ctrl->device, "%s: qid %d auth_type %d auth_id %x\n",
  80. __func__, qid, data->auth_type, data->auth_id);
  81. if (data->auth_type == NVME_AUTH_COMMON_MESSAGES &&
  82. data->auth_id == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) {
  83. return data->rescode_exp;
  84. }
  85. if (data->auth_type != NVME_AUTH_DHCHAP_MESSAGES ||
  86. data->auth_id != expected_msg) {
  87. dev_warn(ctrl->device,
  88. "qid %d invalid message %02x/%02x\n",
  89. qid, data->auth_type, data->auth_id);
  90. return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
  91. }
  92. if (le16_to_cpu(data->t_id) != transaction) {
  93. dev_warn(ctrl->device,
  94. "qid %d invalid transaction ID %d\n",
  95. qid, le16_to_cpu(data->t_id));
  96. return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
  97. }
  98. return 0;
  99. }
  100. static int nvme_auth_set_dhchap_negotiate_data(struct nvme_ctrl *ctrl,
  101. struct nvme_dhchap_queue_context *chap)
  102. {
  103. struct nvmf_auth_dhchap_negotiate_data *data = chap->buf;
  104. size_t size = sizeof(*data) + sizeof(union nvmf_auth_protocol);
  105. if (chap->buf_size < size) {
  106. chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
  107. return -EINVAL;
  108. }
  109. memset((u8 *)chap->buf, 0, size);
  110. data->auth_type = NVME_AUTH_COMMON_MESSAGES;
  111. data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
  112. data->t_id = cpu_to_le16(chap->transaction);
  113. data->sc_c = 0; /* No secure channel concatenation */
  114. data->napd = 1;
  115. data->auth_protocol[0].dhchap.authid = NVME_AUTH_DHCHAP_AUTH_ID;
  116. data->auth_protocol[0].dhchap.halen = 3;
  117. data->auth_protocol[0].dhchap.dhlen = 6;
  118. data->auth_protocol[0].dhchap.idlist[0] = NVME_AUTH_HASH_SHA256;
  119. data->auth_protocol[0].dhchap.idlist[1] = NVME_AUTH_HASH_SHA384;
  120. data->auth_protocol[0].dhchap.idlist[2] = NVME_AUTH_HASH_SHA512;
  121. data->auth_protocol[0].dhchap.idlist[30] = NVME_AUTH_DHGROUP_NULL;
  122. data->auth_protocol[0].dhchap.idlist[31] = NVME_AUTH_DHGROUP_2048;
  123. data->auth_protocol[0].dhchap.idlist[32] = NVME_AUTH_DHGROUP_3072;
  124. data->auth_protocol[0].dhchap.idlist[33] = NVME_AUTH_DHGROUP_4096;
  125. data->auth_protocol[0].dhchap.idlist[34] = NVME_AUTH_DHGROUP_6144;
  126. data->auth_protocol[0].dhchap.idlist[35] = NVME_AUTH_DHGROUP_8192;
  127. return size;
  128. }
  129. static int nvme_auth_process_dhchap_challenge(struct nvme_ctrl *ctrl,
  130. struct nvme_dhchap_queue_context *chap)
  131. {
  132. struct nvmf_auth_dhchap_challenge_data *data = chap->buf;
  133. u16 dhvlen = le16_to_cpu(data->dhvlen);
  134. size_t size = sizeof(*data) + data->hl + dhvlen;
  135. const char *gid_name = nvme_auth_dhgroup_name(data->dhgid);
  136. const char *hmac_name, *kpp_name;
  137. if (chap->buf_size < size) {
  138. chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
  139. return NVME_SC_INVALID_FIELD;
  140. }
  141. hmac_name = nvme_auth_hmac_name(data->hashid);
  142. if (!hmac_name) {
  143. dev_warn(ctrl->device,
  144. "qid %d: invalid HASH ID %d\n",
  145. chap->qid, data->hashid);
  146. chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
  147. return NVME_SC_INVALID_FIELD;
  148. }
  149. if (chap->hash_id == data->hashid && chap->shash_tfm &&
  150. !strcmp(crypto_shash_alg_name(chap->shash_tfm), hmac_name) &&
  151. crypto_shash_digestsize(chap->shash_tfm) == data->hl) {
  152. dev_dbg(ctrl->device,
  153. "qid %d: reuse existing hash %s\n",
  154. chap->qid, hmac_name);
  155. goto select_kpp;
  156. }
  157. /* Reset if hash cannot be reused */
  158. if (chap->shash_tfm) {
  159. crypto_free_shash(chap->shash_tfm);
  160. chap->hash_id = 0;
  161. chap->hash_len = 0;
  162. }
  163. chap->shash_tfm = crypto_alloc_shash(hmac_name, 0,
  164. CRYPTO_ALG_ALLOCATES_MEMORY);
  165. if (IS_ERR(chap->shash_tfm)) {
  166. dev_warn(ctrl->device,
  167. "qid %d: failed to allocate hash %s, error %ld\n",
  168. chap->qid, hmac_name, PTR_ERR(chap->shash_tfm));
  169. chap->shash_tfm = NULL;
  170. chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
  171. return NVME_SC_AUTH_REQUIRED;
  172. }
  173. if (crypto_shash_digestsize(chap->shash_tfm) != data->hl) {
  174. dev_warn(ctrl->device,
  175. "qid %d: invalid hash length %d\n",
  176. chap->qid, data->hl);
  177. crypto_free_shash(chap->shash_tfm);
  178. chap->shash_tfm = NULL;
  179. chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
  180. return NVME_SC_AUTH_REQUIRED;
  181. }
  182. /* Reset host response if the hash had been changed */
  183. if (chap->hash_id != data->hashid) {
  184. kfree(chap->host_response);
  185. chap->host_response = NULL;
  186. }
  187. chap->hash_id = data->hashid;
  188. chap->hash_len = data->hl;
  189. dev_dbg(ctrl->device, "qid %d: selected hash %s\n",
  190. chap->qid, hmac_name);
  191. select_kpp:
  192. kpp_name = nvme_auth_dhgroup_kpp(data->dhgid);
  193. if (!kpp_name) {
  194. dev_warn(ctrl->device,
  195. "qid %d: invalid DH group id %d\n",
  196. chap->qid, data->dhgid);
  197. chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
  198. /* Leave previous dh_tfm intact */
  199. return NVME_SC_AUTH_REQUIRED;
  200. }
  201. /* Clear host and controller key to avoid accidental reuse */
  202. kfree_sensitive(chap->host_key);
  203. chap->host_key = NULL;
  204. chap->host_key_len = 0;
  205. kfree_sensitive(chap->ctrl_key);
  206. chap->ctrl_key = NULL;
  207. chap->ctrl_key_len = 0;
  208. if (chap->dhgroup_id == data->dhgid &&
  209. (data->dhgid == NVME_AUTH_DHGROUP_NULL || chap->dh_tfm)) {
  210. dev_dbg(ctrl->device,
  211. "qid %d: reuse existing DH group %s\n",
  212. chap->qid, gid_name);
  213. goto skip_kpp;
  214. }
  215. /* Reset dh_tfm if it can't be reused */
  216. if (chap->dh_tfm) {
  217. crypto_free_kpp(chap->dh_tfm);
  218. chap->dh_tfm = NULL;
  219. }
  220. if (data->dhgid != NVME_AUTH_DHGROUP_NULL) {
  221. if (dhvlen == 0) {
  222. dev_warn(ctrl->device,
  223. "qid %d: empty DH value\n",
  224. chap->qid);
  225. chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
  226. return NVME_SC_INVALID_FIELD;
  227. }
  228. chap->dh_tfm = crypto_alloc_kpp(kpp_name, 0, 0);
  229. if (IS_ERR(chap->dh_tfm)) {
  230. int ret = PTR_ERR(chap->dh_tfm);
  231. dev_warn(ctrl->device,
  232. "qid %d: error %d initializing DH group %s\n",
  233. chap->qid, ret, gid_name);
  234. chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
  235. chap->dh_tfm = NULL;
  236. return NVME_SC_AUTH_REQUIRED;
  237. }
  238. dev_dbg(ctrl->device, "qid %d: selected DH group %s\n",
  239. chap->qid, gid_name);
  240. } else if (dhvlen != 0) {
  241. dev_warn(ctrl->device,
  242. "qid %d: invalid DH value for NULL DH\n",
  243. chap->qid);
  244. chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
  245. return NVME_SC_INVALID_FIELD;
  246. }
  247. chap->dhgroup_id = data->dhgid;
  248. skip_kpp:
  249. chap->s1 = le32_to_cpu(data->seqnum);
  250. memcpy(chap->c1, data->cval, chap->hash_len);
  251. if (dhvlen) {
  252. chap->ctrl_key = kmalloc(dhvlen, GFP_KERNEL);
  253. if (!chap->ctrl_key) {
  254. chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
  255. return NVME_SC_AUTH_REQUIRED;
  256. }
  257. chap->ctrl_key_len = dhvlen;
  258. memcpy(chap->ctrl_key, data->cval + chap->hash_len,
  259. dhvlen);
  260. dev_dbg(ctrl->device, "ctrl public key %*ph\n",
  261. (int)chap->ctrl_key_len, chap->ctrl_key);
  262. }
  263. return 0;
  264. }
  265. static int nvme_auth_set_dhchap_reply_data(struct nvme_ctrl *ctrl,
  266. struct nvme_dhchap_queue_context *chap)
  267. {
  268. struct nvmf_auth_dhchap_reply_data *data = chap->buf;
  269. size_t size = sizeof(*data);
  270. size += 2 * chap->hash_len;
  271. if (chap->host_key_len)
  272. size += chap->host_key_len;
  273. if (chap->buf_size < size) {
  274. chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
  275. return -EINVAL;
  276. }
  277. memset(chap->buf, 0, size);
  278. data->auth_type = NVME_AUTH_DHCHAP_MESSAGES;
  279. data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_REPLY;
  280. data->t_id = cpu_to_le16(chap->transaction);
  281. data->hl = chap->hash_len;
  282. data->dhvlen = cpu_to_le16(chap->host_key_len);
  283. memcpy(data->rval, chap->response, chap->hash_len);
  284. if (ctrl->ctrl_key) {
  285. get_random_bytes(chap->c2, chap->hash_len);
  286. data->cvalid = 1;
  287. chap->s2 = nvme_auth_get_seqnum();
  288. memcpy(data->rval + chap->hash_len, chap->c2,
  289. chap->hash_len);
  290. dev_dbg(ctrl->device, "%s: qid %d ctrl challenge %*ph\n",
  291. __func__, chap->qid, (int)chap->hash_len, chap->c2);
  292. } else {
  293. memset(chap->c2, 0, chap->hash_len);
  294. chap->s2 = 0;
  295. }
  296. data->seqnum = cpu_to_le32(chap->s2);
  297. if (chap->host_key_len) {
  298. dev_dbg(ctrl->device, "%s: qid %d host public key %*ph\n",
  299. __func__, chap->qid,
  300. chap->host_key_len, chap->host_key);
  301. memcpy(data->rval + 2 * chap->hash_len, chap->host_key,
  302. chap->host_key_len);
  303. }
  304. return size;
  305. }
  306. static int nvme_auth_process_dhchap_success1(struct nvme_ctrl *ctrl,
  307. struct nvme_dhchap_queue_context *chap)
  308. {
  309. struct nvmf_auth_dhchap_success1_data *data = chap->buf;
  310. size_t size = sizeof(*data);
  311. if (ctrl->ctrl_key)
  312. size += chap->hash_len;
  313. if (chap->buf_size < size) {
  314. chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
  315. return NVME_SC_INVALID_FIELD;
  316. }
  317. if (data->hl != chap->hash_len) {
  318. dev_warn(ctrl->device,
  319. "qid %d: invalid hash length %u\n",
  320. chap->qid, data->hl);
  321. chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
  322. return NVME_SC_INVALID_FIELD;
  323. }
  324. /* Just print out information for the admin queue */
  325. if (chap->qid == 0)
  326. dev_info(ctrl->device,
  327. "qid 0: authenticated with hash %s dhgroup %s\n",
  328. nvme_auth_hmac_name(chap->hash_id),
  329. nvme_auth_dhgroup_name(chap->dhgroup_id));
  330. if (!data->rvalid)
  331. return 0;
  332. /* Validate controller response */
  333. if (memcmp(chap->response, data->rval, data->hl)) {
  334. dev_dbg(ctrl->device, "%s: qid %d ctrl response %*ph\n",
  335. __func__, chap->qid, (int)chap->hash_len, data->rval);
  336. dev_dbg(ctrl->device, "%s: qid %d host response %*ph\n",
  337. __func__, chap->qid, (int)chap->hash_len,
  338. chap->response);
  339. dev_warn(ctrl->device,
  340. "qid %d: controller authentication failed\n",
  341. chap->qid);
  342. chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
  343. return NVME_SC_AUTH_REQUIRED;
  344. }
  345. /* Just print out information for the admin queue */
  346. if (chap->qid == 0)
  347. dev_info(ctrl->device,
  348. "qid 0: controller authenticated\n");
  349. return 0;
  350. }
  351. static int nvme_auth_set_dhchap_success2_data(struct nvme_ctrl *ctrl,
  352. struct nvme_dhchap_queue_context *chap)
  353. {
  354. struct nvmf_auth_dhchap_success2_data *data = chap->buf;
  355. size_t size = sizeof(*data);
  356. memset(chap->buf, 0, size);
  357. data->auth_type = NVME_AUTH_DHCHAP_MESSAGES;
  358. data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2;
  359. data->t_id = cpu_to_le16(chap->transaction);
  360. return size;
  361. }
  362. static int nvme_auth_set_dhchap_failure2_data(struct nvme_ctrl *ctrl,
  363. struct nvme_dhchap_queue_context *chap)
  364. {
  365. struct nvmf_auth_dhchap_failure_data *data = chap->buf;
  366. size_t size = sizeof(*data);
  367. memset(chap->buf, 0, size);
  368. data->auth_type = NVME_AUTH_COMMON_MESSAGES;
  369. data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_FAILURE2;
  370. data->t_id = cpu_to_le16(chap->transaction);
  371. data->rescode = NVME_AUTH_DHCHAP_FAILURE_REASON_FAILED;
  372. data->rescode_exp = chap->status;
  373. return size;
  374. }
  375. static int nvme_auth_dhchap_setup_host_response(struct nvme_ctrl *ctrl,
  376. struct nvme_dhchap_queue_context *chap)
  377. {
  378. SHASH_DESC_ON_STACK(shash, chap->shash_tfm);
  379. u8 buf[4], *challenge = chap->c1;
  380. int ret;
  381. dev_dbg(ctrl->device, "%s: qid %d host response seq %u transaction %d\n",
  382. __func__, chap->qid, chap->s1, chap->transaction);
  383. if (!chap->host_response) {
  384. chap->host_response = nvme_auth_transform_key(ctrl->host_key,
  385. ctrl->opts->host->nqn);
  386. if (IS_ERR(chap->host_response)) {
  387. ret = PTR_ERR(chap->host_response);
  388. chap->host_response = NULL;
  389. return ret;
  390. }
  391. } else {
  392. dev_dbg(ctrl->device, "%s: qid %d re-using host response\n",
  393. __func__, chap->qid);
  394. }
  395. ret = crypto_shash_setkey(chap->shash_tfm,
  396. chap->host_response, ctrl->host_key->len);
  397. if (ret) {
  398. dev_warn(ctrl->device, "qid %d: failed to set key, error %d\n",
  399. chap->qid, ret);
  400. goto out;
  401. }
  402. if (chap->dh_tfm) {
  403. challenge = kmalloc(chap->hash_len, GFP_KERNEL);
  404. if (!challenge) {
  405. ret = -ENOMEM;
  406. goto out;
  407. }
  408. ret = nvme_auth_augmented_challenge(chap->hash_id,
  409. chap->sess_key,
  410. chap->sess_key_len,
  411. chap->c1, challenge,
  412. chap->hash_len);
  413. if (ret)
  414. goto out;
  415. }
  416. shash->tfm = chap->shash_tfm;
  417. ret = crypto_shash_init(shash);
  418. if (ret)
  419. goto out;
  420. ret = crypto_shash_update(shash, challenge, chap->hash_len);
  421. if (ret)
  422. goto out;
  423. put_unaligned_le32(chap->s1, buf);
  424. ret = crypto_shash_update(shash, buf, 4);
  425. if (ret)
  426. goto out;
  427. put_unaligned_le16(chap->transaction, buf);
  428. ret = crypto_shash_update(shash, buf, 2);
  429. if (ret)
  430. goto out;
  431. memset(buf, 0, sizeof(buf));
  432. ret = crypto_shash_update(shash, buf, 1);
  433. if (ret)
  434. goto out;
  435. ret = crypto_shash_update(shash, "HostHost", 8);
  436. if (ret)
  437. goto out;
  438. ret = crypto_shash_update(shash, ctrl->opts->host->nqn,
  439. strlen(ctrl->opts->host->nqn));
  440. if (ret)
  441. goto out;
  442. ret = crypto_shash_update(shash, buf, 1);
  443. if (ret)
  444. goto out;
  445. ret = crypto_shash_update(shash, ctrl->opts->subsysnqn,
  446. strlen(ctrl->opts->subsysnqn));
  447. if (ret)
  448. goto out;
  449. ret = crypto_shash_final(shash, chap->response);
  450. out:
  451. if (challenge != chap->c1)
  452. kfree(challenge);
  453. return ret;
  454. }
  455. static int nvme_auth_dhchap_setup_ctrl_response(struct nvme_ctrl *ctrl,
  456. struct nvme_dhchap_queue_context *chap)
  457. {
  458. SHASH_DESC_ON_STACK(shash, chap->shash_tfm);
  459. u8 *ctrl_response;
  460. u8 buf[4], *challenge = chap->c2;
  461. int ret;
  462. ctrl_response = nvme_auth_transform_key(ctrl->ctrl_key,
  463. ctrl->opts->subsysnqn);
  464. if (IS_ERR(ctrl_response)) {
  465. ret = PTR_ERR(ctrl_response);
  466. return ret;
  467. }
  468. ret = crypto_shash_setkey(chap->shash_tfm,
  469. ctrl_response, ctrl->ctrl_key->len);
  470. if (ret) {
  471. dev_warn(ctrl->device, "qid %d: failed to set key, error %d\n",
  472. chap->qid, ret);
  473. goto out;
  474. }
  475. if (chap->dh_tfm) {
  476. challenge = kmalloc(chap->hash_len, GFP_KERNEL);
  477. if (!challenge) {
  478. ret = -ENOMEM;
  479. goto out;
  480. }
  481. ret = nvme_auth_augmented_challenge(chap->hash_id,
  482. chap->sess_key,
  483. chap->sess_key_len,
  484. chap->c2, challenge,
  485. chap->hash_len);
  486. if (ret)
  487. goto out;
  488. }
  489. dev_dbg(ctrl->device, "%s: qid %d ctrl response seq %u transaction %d\n",
  490. __func__, chap->qid, chap->s2, chap->transaction);
  491. dev_dbg(ctrl->device, "%s: qid %d challenge %*ph\n",
  492. __func__, chap->qid, (int)chap->hash_len, challenge);
  493. dev_dbg(ctrl->device, "%s: qid %d subsysnqn %s\n",
  494. __func__, chap->qid, ctrl->opts->subsysnqn);
  495. dev_dbg(ctrl->device, "%s: qid %d hostnqn %s\n",
  496. __func__, chap->qid, ctrl->opts->host->nqn);
  497. shash->tfm = chap->shash_tfm;
  498. ret = crypto_shash_init(shash);
  499. if (ret)
  500. goto out;
  501. ret = crypto_shash_update(shash, challenge, chap->hash_len);
  502. if (ret)
  503. goto out;
  504. put_unaligned_le32(chap->s2, buf);
  505. ret = crypto_shash_update(shash, buf, 4);
  506. if (ret)
  507. goto out;
  508. put_unaligned_le16(chap->transaction, buf);
  509. ret = crypto_shash_update(shash, buf, 2);
  510. if (ret)
  511. goto out;
  512. memset(buf, 0, 4);
  513. ret = crypto_shash_update(shash, buf, 1);
  514. if (ret)
  515. goto out;
  516. ret = crypto_shash_update(shash, "Controller", 10);
  517. if (ret)
  518. goto out;
  519. ret = crypto_shash_update(shash, ctrl->opts->subsysnqn,
  520. strlen(ctrl->opts->subsysnqn));
  521. if (ret)
  522. goto out;
  523. ret = crypto_shash_update(shash, buf, 1);
  524. if (ret)
  525. goto out;
  526. ret = crypto_shash_update(shash, ctrl->opts->host->nqn,
  527. strlen(ctrl->opts->host->nqn));
  528. if (ret)
  529. goto out;
  530. ret = crypto_shash_final(shash, chap->response);
  531. out:
  532. if (challenge != chap->c2)
  533. kfree(challenge);
  534. kfree(ctrl_response);
  535. return ret;
  536. }
  537. static int nvme_auth_dhchap_exponential(struct nvme_ctrl *ctrl,
  538. struct nvme_dhchap_queue_context *chap)
  539. {
  540. int ret;
  541. if (chap->host_key && chap->host_key_len) {
  542. dev_dbg(ctrl->device,
  543. "qid %d: reusing host key\n", chap->qid);
  544. goto gen_sesskey;
  545. }
  546. ret = nvme_auth_gen_privkey(chap->dh_tfm, chap->dhgroup_id);
  547. if (ret < 0) {
  548. chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
  549. return ret;
  550. }
  551. chap->host_key_len = crypto_kpp_maxsize(chap->dh_tfm);
  552. chap->host_key = kzalloc(chap->host_key_len, GFP_KERNEL);
  553. if (!chap->host_key) {
  554. chap->host_key_len = 0;
  555. chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
  556. return -ENOMEM;
  557. }
  558. ret = nvme_auth_gen_pubkey(chap->dh_tfm,
  559. chap->host_key, chap->host_key_len);
  560. if (ret) {
  561. dev_dbg(ctrl->device,
  562. "failed to generate public key, error %d\n", ret);
  563. kfree(chap->host_key);
  564. chap->host_key = NULL;
  565. chap->host_key_len = 0;
  566. chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
  567. return ret;
  568. }
  569. gen_sesskey:
  570. chap->sess_key_len = chap->host_key_len;
  571. chap->sess_key = kmalloc(chap->sess_key_len, GFP_KERNEL);
  572. if (!chap->sess_key) {
  573. chap->sess_key_len = 0;
  574. chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
  575. return -ENOMEM;
  576. }
  577. ret = nvme_auth_gen_shared_secret(chap->dh_tfm,
  578. chap->ctrl_key, chap->ctrl_key_len,
  579. chap->sess_key, chap->sess_key_len);
  580. if (ret) {
  581. dev_dbg(ctrl->device,
  582. "failed to generate shared secret, error %d\n", ret);
  583. kfree_sensitive(chap->sess_key);
  584. chap->sess_key = NULL;
  585. chap->sess_key_len = 0;
  586. chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
  587. return ret;
  588. }
  589. dev_dbg(ctrl->device, "shared secret %*ph\n",
  590. (int)chap->sess_key_len, chap->sess_key);
  591. return 0;
  592. }
  593. static void nvme_auth_reset_dhchap(struct nvme_dhchap_queue_context *chap)
  594. {
  595. kfree_sensitive(chap->host_response);
  596. chap->host_response = NULL;
  597. kfree_sensitive(chap->host_key);
  598. chap->host_key = NULL;
  599. chap->host_key_len = 0;
  600. kfree_sensitive(chap->ctrl_key);
  601. chap->ctrl_key = NULL;
  602. chap->ctrl_key_len = 0;
  603. kfree_sensitive(chap->sess_key);
  604. chap->sess_key = NULL;
  605. chap->sess_key_len = 0;
  606. chap->status = 0;
  607. chap->error = 0;
  608. chap->s1 = 0;
  609. chap->s2 = 0;
  610. chap->transaction = 0;
  611. memset(chap->c1, 0, sizeof(chap->c1));
  612. memset(chap->c2, 0, sizeof(chap->c2));
  613. }
  614. static void nvme_auth_free_dhchap(struct nvme_dhchap_queue_context *chap)
  615. {
  616. nvme_auth_reset_dhchap(chap);
  617. if (chap->shash_tfm)
  618. crypto_free_shash(chap->shash_tfm);
  619. if (chap->dh_tfm)
  620. crypto_free_kpp(chap->dh_tfm);
  621. kfree_sensitive(chap->ctrl_key);
  622. kfree_sensitive(chap->host_key);
  623. kfree_sensitive(chap->sess_key);
  624. kfree_sensitive(chap->host_response);
  625. kfree(chap->buf);
  626. kfree(chap);
  627. }
  628. static void nvme_queue_auth_work(struct work_struct *work)
  629. {
  630. struct nvme_dhchap_queue_context *chap =
  631. container_of(work, struct nvme_dhchap_queue_context, auth_work);
  632. struct nvme_ctrl *ctrl = chap->ctrl;
  633. size_t tl;
  634. int ret = 0;
  635. chap->transaction = ctrl->transaction++;
  636. /* DH-HMAC-CHAP Step 1: send negotiate */
  637. dev_dbg(ctrl->device, "%s: qid %d send negotiate\n",
  638. __func__, chap->qid);
  639. ret = nvme_auth_set_dhchap_negotiate_data(ctrl, chap);
  640. if (ret < 0) {
  641. chap->error = ret;
  642. return;
  643. }
  644. tl = ret;
  645. ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true);
  646. if (ret) {
  647. chap->error = ret;
  648. return;
  649. }
  650. /* DH-HMAC-CHAP Step 2: receive challenge */
  651. dev_dbg(ctrl->device, "%s: qid %d receive challenge\n",
  652. __func__, chap->qid);
  653. memset(chap->buf, 0, chap->buf_size);
  654. ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, chap->buf_size, false);
  655. if (ret) {
  656. dev_warn(ctrl->device,
  657. "qid %d failed to receive challenge, %s %d\n",
  658. chap->qid, ret < 0 ? "error" : "nvme status", ret);
  659. chap->error = ret;
  660. return;
  661. }
  662. ret = nvme_auth_receive_validate(ctrl, chap->qid, chap->buf, chap->transaction,
  663. NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE);
  664. if (ret) {
  665. chap->status = ret;
  666. chap->error = NVME_SC_AUTH_REQUIRED;
  667. return;
  668. }
  669. ret = nvme_auth_process_dhchap_challenge(ctrl, chap);
  670. if (ret) {
  671. /* Invalid challenge parameters */
  672. chap->error = ret;
  673. goto fail2;
  674. }
  675. if (chap->ctrl_key_len) {
  676. dev_dbg(ctrl->device,
  677. "%s: qid %d DH exponential\n",
  678. __func__, chap->qid);
  679. ret = nvme_auth_dhchap_exponential(ctrl, chap);
  680. if (ret) {
  681. chap->error = ret;
  682. goto fail2;
  683. }
  684. }
  685. dev_dbg(ctrl->device, "%s: qid %d host response\n",
  686. __func__, chap->qid);
  687. ret = nvme_auth_dhchap_setup_host_response(ctrl, chap);
  688. if (ret) {
  689. chap->error = ret;
  690. goto fail2;
  691. }
  692. /* DH-HMAC-CHAP Step 3: send reply */
  693. dev_dbg(ctrl->device, "%s: qid %d send reply\n",
  694. __func__, chap->qid);
  695. ret = nvme_auth_set_dhchap_reply_data(ctrl, chap);
  696. if (ret < 0) {
  697. chap->error = ret;
  698. goto fail2;
  699. }
  700. tl = ret;
  701. ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true);
  702. if (ret) {
  703. chap->error = ret;
  704. goto fail2;
  705. }
  706. /* DH-HMAC-CHAP Step 4: receive success1 */
  707. dev_dbg(ctrl->device, "%s: qid %d receive success1\n",
  708. __func__, chap->qid);
  709. memset(chap->buf, 0, chap->buf_size);
  710. ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, chap->buf_size, false);
  711. if (ret) {
  712. dev_warn(ctrl->device,
  713. "qid %d failed to receive success1, %s %d\n",
  714. chap->qid, ret < 0 ? "error" : "nvme status", ret);
  715. chap->error = ret;
  716. return;
  717. }
  718. ret = nvme_auth_receive_validate(ctrl, chap->qid,
  719. chap->buf, chap->transaction,
  720. NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1);
  721. if (ret) {
  722. chap->status = ret;
  723. chap->error = NVME_SC_AUTH_REQUIRED;
  724. return;
  725. }
  726. if (ctrl->ctrl_key) {
  727. dev_dbg(ctrl->device,
  728. "%s: qid %d controller response\n",
  729. __func__, chap->qid);
  730. ret = nvme_auth_dhchap_setup_ctrl_response(ctrl, chap);
  731. if (ret) {
  732. chap->error = ret;
  733. goto fail2;
  734. }
  735. }
  736. ret = nvme_auth_process_dhchap_success1(ctrl, chap);
  737. if (ret) {
  738. /* Controller authentication failed */
  739. chap->error = NVME_SC_AUTH_REQUIRED;
  740. goto fail2;
  741. }
  742. if (ctrl->ctrl_key) {
  743. /* DH-HMAC-CHAP Step 5: send success2 */
  744. dev_dbg(ctrl->device, "%s: qid %d send success2\n",
  745. __func__, chap->qid);
  746. tl = nvme_auth_set_dhchap_success2_data(ctrl, chap);
  747. ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true);
  748. if (ret)
  749. chap->error = ret;
  750. }
  751. if (!ret) {
  752. chap->error = 0;
  753. return;
  754. }
  755. fail2:
  756. dev_dbg(ctrl->device, "%s: qid %d send failure2, status %x\n",
  757. __func__, chap->qid, chap->status);
  758. tl = nvme_auth_set_dhchap_failure2_data(ctrl, chap);
  759. ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true);
  760. /*
  761. * only update error if send failure2 failed and no other
  762. * error had been set during authentication.
  763. */
  764. if (ret && !chap->error)
  765. chap->error = ret;
  766. }
  767. int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid)
  768. {
  769. struct nvme_dhchap_queue_context *chap;
  770. if (!ctrl->host_key) {
  771. dev_warn(ctrl->device, "qid %d: no key\n", qid);
  772. return -ENOKEY;
  773. }
  774. if (ctrl->opts->dhchap_ctrl_secret && !ctrl->ctrl_key) {
  775. dev_warn(ctrl->device, "qid %d: invalid ctrl key\n", qid);
  776. return -ENOKEY;
  777. }
  778. mutex_lock(&ctrl->dhchap_auth_mutex);
  779. /* Check if the context is already queued */
  780. list_for_each_entry(chap, &ctrl->dhchap_auth_list, entry) {
  781. WARN_ON(!chap->buf);
  782. if (chap->qid == qid) {
  783. dev_dbg(ctrl->device, "qid %d: re-using context\n", qid);
  784. mutex_unlock(&ctrl->dhchap_auth_mutex);
  785. flush_work(&chap->auth_work);
  786. nvme_auth_reset_dhchap(chap);
  787. queue_work(nvme_wq, &chap->auth_work);
  788. return 0;
  789. }
  790. }
  791. chap = kzalloc(sizeof(*chap), GFP_KERNEL);
  792. if (!chap) {
  793. mutex_unlock(&ctrl->dhchap_auth_mutex);
  794. return -ENOMEM;
  795. }
  796. chap->qid = (qid == NVME_QID_ANY) ? 0 : qid;
  797. chap->ctrl = ctrl;
  798. /*
  799. * Allocate a large enough buffer for the entire negotiation:
  800. * 4k should be enough to ffdhe8192.
  801. */
  802. chap->buf_size = 4096;
  803. chap->buf = kzalloc(chap->buf_size, GFP_KERNEL);
  804. if (!chap->buf) {
  805. mutex_unlock(&ctrl->dhchap_auth_mutex);
  806. kfree(chap);
  807. return -ENOMEM;
  808. }
  809. INIT_WORK(&chap->auth_work, nvme_queue_auth_work);
  810. list_add(&chap->entry, &ctrl->dhchap_auth_list);
  811. mutex_unlock(&ctrl->dhchap_auth_mutex);
  812. queue_work(nvme_wq, &chap->auth_work);
  813. return 0;
  814. }
  815. EXPORT_SYMBOL_GPL(nvme_auth_negotiate);
  816. int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid)
  817. {
  818. struct nvme_dhchap_queue_context *chap;
  819. int ret;
  820. mutex_lock(&ctrl->dhchap_auth_mutex);
  821. list_for_each_entry(chap, &ctrl->dhchap_auth_list, entry) {
  822. if (chap->qid != qid)
  823. continue;
  824. mutex_unlock(&ctrl->dhchap_auth_mutex);
  825. flush_work(&chap->auth_work);
  826. ret = chap->error;
  827. return ret;
  828. }
  829. mutex_unlock(&ctrl->dhchap_auth_mutex);
  830. return -ENXIO;
  831. }
  832. EXPORT_SYMBOL_GPL(nvme_auth_wait);
  833. static void nvme_ctrl_auth_work(struct work_struct *work)
  834. {
  835. struct nvme_ctrl *ctrl =
  836. container_of(work, struct nvme_ctrl, dhchap_auth_work);
  837. int ret, q;
  838. /* Authenticate admin queue first */
  839. ret = nvme_auth_negotiate(ctrl, 0);
  840. if (ret) {
  841. dev_warn(ctrl->device,
  842. "qid 0: error %d setting up authentication\n", ret);
  843. return;
  844. }
  845. ret = nvme_auth_wait(ctrl, 0);
  846. if (ret) {
  847. dev_warn(ctrl->device,
  848. "qid 0: authentication failed\n");
  849. return;
  850. }
  851. for (q = 1; q < ctrl->queue_count; q++) {
  852. ret = nvme_auth_negotiate(ctrl, q);
  853. if (ret) {
  854. dev_warn(ctrl->device,
  855. "qid %d: error %d setting up authentication\n",
  856. q, ret);
  857. break;
  858. }
  859. }
  860. /*
  861. * Failure is a soft-state; credentials remain valid until
  862. * the controller terminates the connection.
  863. */
  864. }
  865. int nvme_auth_init_ctrl(struct nvme_ctrl *ctrl)
  866. {
  867. int ret;
  868. INIT_LIST_HEAD(&ctrl->dhchap_auth_list);
  869. INIT_WORK(&ctrl->dhchap_auth_work, nvme_ctrl_auth_work);
  870. mutex_init(&ctrl->dhchap_auth_mutex);
  871. if (!ctrl->opts)
  872. return 0;
  873. ret = nvme_auth_generate_key(ctrl->opts->dhchap_secret,
  874. &ctrl->host_key);
  875. if (ret)
  876. return ret;
  877. ret = nvme_auth_generate_key(ctrl->opts->dhchap_ctrl_secret,
  878. &ctrl->ctrl_key);
  879. if (ret) {
  880. nvme_auth_free_key(ctrl->host_key);
  881. ctrl->host_key = NULL;
  882. }
  883. return ret;
  884. }
  885. EXPORT_SYMBOL_GPL(nvme_auth_init_ctrl);
  886. void nvme_auth_stop(struct nvme_ctrl *ctrl)
  887. {
  888. struct nvme_dhchap_queue_context *chap = NULL, *tmp;
  889. cancel_work_sync(&ctrl->dhchap_auth_work);
  890. mutex_lock(&ctrl->dhchap_auth_mutex);
  891. list_for_each_entry_safe(chap, tmp, &ctrl->dhchap_auth_list, entry)
  892. cancel_work_sync(&chap->auth_work);
  893. mutex_unlock(&ctrl->dhchap_auth_mutex);
  894. }
  895. EXPORT_SYMBOL_GPL(nvme_auth_stop);
  896. void nvme_auth_free(struct nvme_ctrl *ctrl)
  897. {
  898. struct nvme_dhchap_queue_context *chap = NULL, *tmp;
  899. mutex_lock(&ctrl->dhchap_auth_mutex);
  900. list_for_each_entry_safe(chap, tmp, &ctrl->dhchap_auth_list, entry) {
  901. list_del_init(&chap->entry);
  902. flush_work(&chap->auth_work);
  903. nvme_auth_free_dhchap(chap);
  904. }
  905. mutex_unlock(&ctrl->dhchap_auth_mutex);
  906. if (ctrl->host_key) {
  907. nvme_auth_free_key(ctrl->host_key);
  908. ctrl->host_key = NULL;
  909. }
  910. if (ctrl->ctrl_key) {
  911. nvme_auth_free_key(ctrl->ctrl_key);
  912. ctrl->ctrl_key = NULL;
  913. }
  914. }
  915. EXPORT_SYMBOL_GPL(nvme_auth_free);