fils_aead.c 8.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * FILS AEAD for (Re)Association Request/Response frames
  4. * Copyright 2016, Qualcomm Atheros, Inc.
  5. */
  6. #include <crypto/aes.h>
  7. #include <crypto/algapi.h>
  8. #include <crypto/hash.h>
  9. #include <crypto/skcipher.h>
  10. #include "ieee80211_i.h"
  11. #include "aes_cmac.h"
  12. #include "fils_aead.h"
  13. static void gf_mulx(u8 *pad)
  14. {
  15. u64 a = get_unaligned_be64(pad);
  16. u64 b = get_unaligned_be64(pad + 8);
  17. put_unaligned_be64((a << 1) | (b >> 63), pad);
  18. put_unaligned_be64((b << 1) ^ ((a >> 63) ? 0x87 : 0), pad + 8);
  19. }
  20. static int aes_s2v(struct crypto_shash *tfm,
  21. size_t num_elem, const u8 *addr[], size_t len[], u8 *v)
  22. {
  23. u8 d[AES_BLOCK_SIZE], tmp[AES_BLOCK_SIZE] = {};
  24. SHASH_DESC_ON_STACK(desc, tfm);
  25. size_t i;
  26. desc->tfm = tfm;
  27. /* D = AES-CMAC(K, <zero>) */
  28. crypto_shash_digest(desc, tmp, AES_BLOCK_SIZE, d);
  29. for (i = 0; i < num_elem - 1; i++) {
  30. /* D = dbl(D) xor AES_CMAC(K, Si) */
  31. gf_mulx(d); /* dbl */
  32. crypto_shash_digest(desc, addr[i], len[i], tmp);
  33. crypto_xor(d, tmp, AES_BLOCK_SIZE);
  34. }
  35. crypto_shash_init(desc);
  36. if (len[i] >= AES_BLOCK_SIZE) {
  37. /* len(Sn) >= 128 */
  38. /* T = Sn xorend D */
  39. crypto_shash_update(desc, addr[i], len[i] - AES_BLOCK_SIZE);
  40. crypto_xor(d, addr[i] + len[i] - AES_BLOCK_SIZE,
  41. AES_BLOCK_SIZE);
  42. } else {
  43. /* len(Sn) < 128 */
  44. /* T = dbl(D) xor pad(Sn) */
  45. gf_mulx(d); /* dbl */
  46. crypto_xor(d, addr[i], len[i]);
  47. d[len[i]] ^= 0x80;
  48. }
  49. /* V = AES-CMAC(K, T) */
  50. crypto_shash_finup(desc, d, AES_BLOCK_SIZE, v);
  51. return 0;
  52. }
  53. /* Note: addr[] and len[] needs to have one extra slot at the end. */
  54. static int aes_siv_encrypt(const u8 *key, size_t key_len,
  55. const u8 *plain, size_t plain_len,
  56. size_t num_elem, const u8 *addr[],
  57. size_t len[], u8 *out)
  58. {
  59. u8 v[AES_BLOCK_SIZE];
  60. struct crypto_shash *tfm;
  61. struct crypto_skcipher *tfm2;
  62. struct skcipher_request *req;
  63. int res;
  64. struct scatterlist src[1], dst[1];
  65. u8 *tmp;
  66. key_len /= 2; /* S2V key || CTR key */
  67. addr[num_elem] = plain;
  68. len[num_elem] = plain_len;
  69. num_elem++;
  70. /* S2V */
  71. tfm = crypto_alloc_shash("cmac(aes)", 0, 0);
  72. if (IS_ERR(tfm))
  73. return PTR_ERR(tfm);
  74. /* K1 for S2V */
  75. res = crypto_shash_setkey(tfm, key, key_len);
  76. if (!res)
  77. res = aes_s2v(tfm, num_elem, addr, len, v);
  78. crypto_free_shash(tfm);
  79. if (res)
  80. return res;
  81. /* Use a temporary buffer of the plaintext to handle need for
  82. * overwriting this during AES-CTR.
  83. */
  84. tmp = kmemdup(plain, plain_len, GFP_KERNEL);
  85. if (!tmp)
  86. return -ENOMEM;
  87. /* IV for CTR before encrypted data */
  88. memcpy(out, v, AES_BLOCK_SIZE);
  89. /* Synthetic IV to be used as the initial counter in CTR:
  90. * Q = V bitand (1^64 || 0^1 || 1^31 || 0^1 || 1^31)
  91. */
  92. v[8] &= 0x7f;
  93. v[12] &= 0x7f;
  94. /* CTR */
  95. tfm2 = crypto_alloc_skcipher("ctr(aes)", 0, CRYPTO_ALG_ASYNC);
  96. if (IS_ERR(tfm2)) {
  97. kfree(tmp);
  98. return PTR_ERR(tfm2);
  99. }
  100. /* K2 for CTR */
  101. res = crypto_skcipher_setkey(tfm2, key + key_len, key_len);
  102. if (res)
  103. goto fail;
  104. req = skcipher_request_alloc(tfm2, GFP_KERNEL);
  105. if (!req) {
  106. res = -ENOMEM;
  107. goto fail;
  108. }
  109. sg_init_one(src, tmp, plain_len);
  110. sg_init_one(dst, out + AES_BLOCK_SIZE, plain_len);
  111. skcipher_request_set_crypt(req, src, dst, plain_len, v);
  112. res = crypto_skcipher_encrypt(req);
  113. skcipher_request_free(req);
  114. fail:
  115. kfree(tmp);
  116. crypto_free_skcipher(tfm2);
  117. return res;
  118. }
  119. /* Note: addr[] and len[] needs to have one extra slot at the end. */
  120. static int aes_siv_decrypt(const u8 *key, size_t key_len,
  121. const u8 *iv_crypt, size_t iv_c_len,
  122. size_t num_elem, const u8 *addr[], size_t len[],
  123. u8 *out)
  124. {
  125. struct crypto_shash *tfm;
  126. struct crypto_skcipher *tfm2;
  127. struct skcipher_request *req;
  128. struct scatterlist src[1], dst[1];
  129. size_t crypt_len;
  130. int res;
  131. u8 frame_iv[AES_BLOCK_SIZE], iv[AES_BLOCK_SIZE];
  132. u8 check[AES_BLOCK_SIZE];
  133. crypt_len = iv_c_len - AES_BLOCK_SIZE;
  134. key_len /= 2; /* S2V key || CTR key */
  135. addr[num_elem] = out;
  136. len[num_elem] = crypt_len;
  137. num_elem++;
  138. memcpy(iv, iv_crypt, AES_BLOCK_SIZE);
  139. memcpy(frame_iv, iv_crypt, AES_BLOCK_SIZE);
  140. /* Synthetic IV to be used as the initial counter in CTR:
  141. * Q = V bitand (1^64 || 0^1 || 1^31 || 0^1 || 1^31)
  142. */
  143. iv[8] &= 0x7f;
  144. iv[12] &= 0x7f;
  145. /* CTR */
  146. tfm2 = crypto_alloc_skcipher("ctr(aes)", 0, CRYPTO_ALG_ASYNC);
  147. if (IS_ERR(tfm2))
  148. return PTR_ERR(tfm2);
  149. /* K2 for CTR */
  150. res = crypto_skcipher_setkey(tfm2, key + key_len, key_len);
  151. if (res) {
  152. crypto_free_skcipher(tfm2);
  153. return res;
  154. }
  155. req = skcipher_request_alloc(tfm2, GFP_KERNEL);
  156. if (!req) {
  157. crypto_free_skcipher(tfm2);
  158. return -ENOMEM;
  159. }
  160. sg_init_one(src, iv_crypt + AES_BLOCK_SIZE, crypt_len);
  161. sg_init_one(dst, out, crypt_len);
  162. skcipher_request_set_crypt(req, src, dst, crypt_len, iv);
  163. res = crypto_skcipher_decrypt(req);
  164. skcipher_request_free(req);
  165. crypto_free_skcipher(tfm2);
  166. if (res)
  167. return res;
  168. /* S2V */
  169. tfm = crypto_alloc_shash("cmac(aes)", 0, 0);
  170. if (IS_ERR(tfm))
  171. return PTR_ERR(tfm);
  172. /* K1 for S2V */
  173. res = crypto_shash_setkey(tfm, key, key_len);
  174. if (!res)
  175. res = aes_s2v(tfm, num_elem, addr, len, check);
  176. crypto_free_shash(tfm);
  177. if (res)
  178. return res;
  179. if (memcmp(check, frame_iv, AES_BLOCK_SIZE) != 0)
  180. return -EINVAL;
  181. return 0;
  182. }
  183. int fils_encrypt_assoc_req(struct sk_buff *skb,
  184. struct ieee80211_mgd_assoc_data *assoc_data)
  185. {
  186. struct ieee80211_mgmt *mgmt = (void *)skb->data;
  187. u8 *capab, *ies, *encr;
  188. const u8 *addr[5 + 1];
  189. const struct element *session;
  190. size_t len[5 + 1];
  191. size_t crypt_len;
  192. if (ieee80211_is_reassoc_req(mgmt->frame_control)) {
  193. capab = (u8 *)&mgmt->u.reassoc_req.capab_info;
  194. ies = mgmt->u.reassoc_req.variable;
  195. } else {
  196. capab = (u8 *)&mgmt->u.assoc_req.capab_info;
  197. ies = mgmt->u.assoc_req.variable;
  198. }
  199. session = cfg80211_find_ext_elem(WLAN_EID_EXT_FILS_SESSION,
  200. ies, skb->data + skb->len - ies);
  201. if (!session || session->datalen != 1 + 8)
  202. return -EINVAL;
  203. /* encrypt after FILS Session element */
  204. encr = (u8 *)session->data + 1 + 8;
  205. /* AES-SIV AAD vectors */
  206. /* The STA's MAC address */
  207. addr[0] = mgmt->sa;
  208. len[0] = ETH_ALEN;
  209. /* The AP's BSSID */
  210. addr[1] = mgmt->da;
  211. len[1] = ETH_ALEN;
  212. /* The STA's nonce */
  213. addr[2] = assoc_data->fils_nonces;
  214. len[2] = FILS_NONCE_LEN;
  215. /* The AP's nonce */
  216. addr[3] = &assoc_data->fils_nonces[FILS_NONCE_LEN];
  217. len[3] = FILS_NONCE_LEN;
  218. /* The (Re)Association Request frame from the Capability Information
  219. * field to the FILS Session element (both inclusive).
  220. */
  221. addr[4] = capab;
  222. len[4] = encr - capab;
  223. crypt_len = skb->data + skb->len - encr;
  224. skb_put(skb, AES_BLOCK_SIZE);
  225. return aes_siv_encrypt(assoc_data->fils_kek, assoc_data->fils_kek_len,
  226. encr, crypt_len, 5, addr, len, encr);
  227. }
  228. int fils_decrypt_assoc_resp(struct ieee80211_sub_if_data *sdata,
  229. u8 *frame, size_t *frame_len,
  230. struct ieee80211_mgd_assoc_data *assoc_data)
  231. {
  232. struct ieee80211_mgmt *mgmt = (void *)frame;
  233. u8 *capab, *ies, *encr;
  234. const u8 *addr[5 + 1];
  235. const struct element *session;
  236. size_t len[5 + 1];
  237. int res;
  238. size_t crypt_len;
  239. if (*frame_len < 24 + 6)
  240. return -EINVAL;
  241. capab = (u8 *)&mgmt->u.assoc_resp.capab_info;
  242. ies = mgmt->u.assoc_resp.variable;
  243. session = cfg80211_find_ext_elem(WLAN_EID_EXT_FILS_SESSION,
  244. ies, frame + *frame_len - ies);
  245. if (!session || session->datalen != 1 + 8) {
  246. mlme_dbg(sdata,
  247. "No (valid) FILS Session element in (Re)Association Response frame from %pM",
  248. mgmt->sa);
  249. return -EINVAL;
  250. }
  251. /* decrypt after FILS Session element */
  252. encr = (u8 *)session->data + 1 + 8;
  253. /* AES-SIV AAD vectors */
  254. /* The AP's BSSID */
  255. addr[0] = mgmt->sa;
  256. len[0] = ETH_ALEN;
  257. /* The STA's MAC address */
  258. addr[1] = mgmt->da;
  259. len[1] = ETH_ALEN;
  260. /* The AP's nonce */
  261. addr[2] = &assoc_data->fils_nonces[FILS_NONCE_LEN];
  262. len[2] = FILS_NONCE_LEN;
  263. /* The STA's nonce */
  264. addr[3] = assoc_data->fils_nonces;
  265. len[3] = FILS_NONCE_LEN;
  266. /* The (Re)Association Response frame from the Capability Information
  267. * field to the FILS Session element (both inclusive).
  268. */
  269. addr[4] = capab;
  270. len[4] = encr - capab;
  271. crypt_len = frame + *frame_len - encr;
  272. if (crypt_len < AES_BLOCK_SIZE) {
  273. mlme_dbg(sdata,
  274. "Not enough room for AES-SIV data after FILS Session element in (Re)Association Response frame from %pM",
  275. mgmt->sa);
  276. return -EINVAL;
  277. }
  278. res = aes_siv_decrypt(assoc_data->fils_kek, assoc_data->fils_kek_len,
  279. encr, crypt_len, 5, addr, len, encr);
  280. if (res != 0) {
  281. mlme_dbg(sdata,
  282. "AES-SIV decryption of (Re)Association Response frame from %pM failed",
  283. mgmt->sa);
  284. return res;
  285. }
  286. *frame_len -= AES_BLOCK_SIZE;
  287. return 0;
  288. }