spu2.c 39 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright 2016 Broadcom
  4. */
  5. /*
  6. * This file works with the SPU2 version of the SPU. SPU2 has different message
  7. * formats than the previous version of the SPU. All SPU message format
  8. * differences should be hidden in the spux.c,h files.
  9. */
  10. #include <linux/kernel.h>
  11. #include <linux/string.h>
  12. #include "util.h"
  13. #include "spu.h"
  14. #include "spu2.h"
  15. #define SPU2_TX_STATUS_LEN 0 /* SPU2 has no STATUS in input packet */
  16. /*
  17. * Controlled by pkt_stat_cnt field in CRYPTO_SS_SPU0_CORE_SPU2_CONTROL0
  18. * register. Defaults to 2.
  19. */
  20. #define SPU2_RX_STATUS_LEN 2
  21. enum spu2_proto_sel {
  22. SPU2_PROTO_RESV = 0,
  23. SPU2_MACSEC_SECTAG8_ECB = 1,
  24. SPU2_MACSEC_SECTAG8_SCB = 2,
  25. SPU2_MACSEC_SECTAG16 = 3,
  26. SPU2_MACSEC_SECTAG16_8_XPN = 4,
  27. SPU2_IPSEC = 5,
  28. SPU2_IPSEC_ESN = 6,
  29. SPU2_TLS_CIPHER = 7,
  30. SPU2_TLS_AEAD = 8,
  31. SPU2_DTLS_CIPHER = 9,
  32. SPU2_DTLS_AEAD = 10
  33. };
  34. static char *spu2_cipher_type_names[] = { "None", "AES128", "AES192", "AES256",
  35. "DES", "3DES"
  36. };
  37. static char *spu2_cipher_mode_names[] = { "ECB", "CBC", "CTR", "CFB", "OFB",
  38. "XTS", "CCM", "GCM"
  39. };
  40. static char *spu2_hash_type_names[] = { "None", "AES128", "AES192", "AES256",
  41. "Reserved", "Reserved", "MD5", "SHA1", "SHA224", "SHA256", "SHA384",
  42. "SHA512", "SHA512/224", "SHA512/256", "SHA3-224", "SHA3-256",
  43. "SHA3-384", "SHA3-512"
  44. };
  45. static char *spu2_hash_mode_names[] = { "CMAC", "CBC-MAC", "XCBC-MAC", "HMAC",
  46. "Rabin", "CCM", "GCM", "Reserved"
  47. };
  48. static char *spu2_ciph_type_name(enum spu2_cipher_type cipher_type)
  49. {
  50. if (cipher_type >= SPU2_CIPHER_TYPE_LAST)
  51. return "Reserved";
  52. return spu2_cipher_type_names[cipher_type];
  53. }
  54. static char *spu2_ciph_mode_name(enum spu2_cipher_mode cipher_mode)
  55. {
  56. if (cipher_mode >= SPU2_CIPHER_MODE_LAST)
  57. return "Reserved";
  58. return spu2_cipher_mode_names[cipher_mode];
  59. }
  60. static char *spu2_hash_type_name(enum spu2_hash_type hash_type)
  61. {
  62. if (hash_type >= SPU2_HASH_TYPE_LAST)
  63. return "Reserved";
  64. return spu2_hash_type_names[hash_type];
  65. }
  66. static char *spu2_hash_mode_name(enum spu2_hash_mode hash_mode)
  67. {
  68. if (hash_mode >= SPU2_HASH_MODE_LAST)
  69. return "Reserved";
  70. return spu2_hash_mode_names[hash_mode];
  71. }
  72. /*
  73. * Convert from a software cipher mode value to the corresponding value
  74. * for SPU2.
  75. */
  76. static int spu2_cipher_mode_xlate(enum spu_cipher_mode cipher_mode,
  77. enum spu2_cipher_mode *spu2_mode)
  78. {
  79. switch (cipher_mode) {
  80. case CIPHER_MODE_ECB:
  81. *spu2_mode = SPU2_CIPHER_MODE_ECB;
  82. break;
  83. case CIPHER_MODE_CBC:
  84. *spu2_mode = SPU2_CIPHER_MODE_CBC;
  85. break;
  86. case CIPHER_MODE_OFB:
  87. *spu2_mode = SPU2_CIPHER_MODE_OFB;
  88. break;
  89. case CIPHER_MODE_CFB:
  90. *spu2_mode = SPU2_CIPHER_MODE_CFB;
  91. break;
  92. case CIPHER_MODE_CTR:
  93. *spu2_mode = SPU2_CIPHER_MODE_CTR;
  94. break;
  95. case CIPHER_MODE_CCM:
  96. *spu2_mode = SPU2_CIPHER_MODE_CCM;
  97. break;
  98. case CIPHER_MODE_GCM:
  99. *spu2_mode = SPU2_CIPHER_MODE_GCM;
  100. break;
  101. case CIPHER_MODE_XTS:
  102. *spu2_mode = SPU2_CIPHER_MODE_XTS;
  103. break;
  104. default:
  105. return -EINVAL;
  106. }
  107. return 0;
  108. }
  109. /**
  110. * spu2_cipher_xlate() - Convert a cipher {alg/mode/type} triple to a SPU2
  111. * cipher type and mode.
  112. * @cipher_alg: [in] cipher algorithm value from software enumeration
  113. * @cipher_mode: [in] cipher mode value from software enumeration
  114. * @cipher_type: [in] cipher type value from software enumeration
  115. * @spu2_type: [out] cipher type value used by spu2 hardware
  116. * @spu2_mode: [out] cipher mode value used by spu2 hardware
  117. *
  118. * Return: 0 if successful
  119. */
  120. static int spu2_cipher_xlate(enum spu_cipher_alg cipher_alg,
  121. enum spu_cipher_mode cipher_mode,
  122. enum spu_cipher_type cipher_type,
  123. enum spu2_cipher_type *spu2_type,
  124. enum spu2_cipher_mode *spu2_mode)
  125. {
  126. int err;
  127. err = spu2_cipher_mode_xlate(cipher_mode, spu2_mode);
  128. if (err) {
  129. flow_log("Invalid cipher mode %d\n", cipher_mode);
  130. return err;
  131. }
  132. switch (cipher_alg) {
  133. case CIPHER_ALG_NONE:
  134. *spu2_type = SPU2_CIPHER_TYPE_NONE;
  135. break;
  136. case CIPHER_ALG_RC4:
  137. /* SPU2 does not support RC4 */
  138. err = -EINVAL;
  139. *spu2_type = SPU2_CIPHER_TYPE_NONE;
  140. break;
  141. case CIPHER_ALG_DES:
  142. *spu2_type = SPU2_CIPHER_TYPE_DES;
  143. break;
  144. case CIPHER_ALG_3DES:
  145. *spu2_type = SPU2_CIPHER_TYPE_3DES;
  146. break;
  147. case CIPHER_ALG_AES:
  148. switch (cipher_type) {
  149. case CIPHER_TYPE_AES128:
  150. *spu2_type = SPU2_CIPHER_TYPE_AES128;
  151. break;
  152. case CIPHER_TYPE_AES192:
  153. *spu2_type = SPU2_CIPHER_TYPE_AES192;
  154. break;
  155. case CIPHER_TYPE_AES256:
  156. *spu2_type = SPU2_CIPHER_TYPE_AES256;
  157. break;
  158. default:
  159. err = -EINVAL;
  160. }
  161. break;
  162. case CIPHER_ALG_LAST:
  163. default:
  164. err = -EINVAL;
  165. break;
  166. }
  167. if (err)
  168. flow_log("Invalid cipher alg %d or type %d\n",
  169. cipher_alg, cipher_type);
  170. return err;
  171. }
  172. /*
  173. * Convert from a software hash mode value to the corresponding value
  174. * for SPU2. Note that HASH_MODE_NONE and HASH_MODE_XCBC have the same value.
  175. */
  176. static int spu2_hash_mode_xlate(enum hash_mode hash_mode,
  177. enum spu2_hash_mode *spu2_mode)
  178. {
  179. switch (hash_mode) {
  180. case HASH_MODE_XCBC:
  181. *spu2_mode = SPU2_HASH_MODE_XCBC_MAC;
  182. break;
  183. case HASH_MODE_CMAC:
  184. *spu2_mode = SPU2_HASH_MODE_CMAC;
  185. break;
  186. case HASH_MODE_HMAC:
  187. *spu2_mode = SPU2_HASH_MODE_HMAC;
  188. break;
  189. case HASH_MODE_CCM:
  190. *spu2_mode = SPU2_HASH_MODE_CCM;
  191. break;
  192. case HASH_MODE_GCM:
  193. *spu2_mode = SPU2_HASH_MODE_GCM;
  194. break;
  195. default:
  196. return -EINVAL;
  197. }
  198. return 0;
  199. }
  200. /**
  201. * spu2_hash_xlate() - Convert a hash {alg/mode/type} triple to a SPU2 hash type
  202. * and mode.
  203. * @hash_alg: [in] hash algorithm value from software enumeration
  204. * @hash_mode: [in] hash mode value from software enumeration
  205. * @hash_type: [in] hash type value from software enumeration
  206. * @ciph_type: [in] cipher type value from software enumeration
  207. * @spu2_type: [out] hash type value used by SPU2 hardware
  208. * @spu2_mode: [out] hash mode value used by SPU2 hardware
  209. *
  210. * Return: 0 if successful
  211. */
  212. static int
  213. spu2_hash_xlate(enum hash_alg hash_alg, enum hash_mode hash_mode,
  214. enum hash_type hash_type, enum spu_cipher_type ciph_type,
  215. enum spu2_hash_type *spu2_type, enum spu2_hash_mode *spu2_mode)
  216. {
  217. int err;
  218. err = spu2_hash_mode_xlate(hash_mode, spu2_mode);
  219. if (err) {
  220. flow_log("Invalid hash mode %d\n", hash_mode);
  221. return err;
  222. }
  223. switch (hash_alg) {
  224. case HASH_ALG_NONE:
  225. *spu2_type = SPU2_HASH_TYPE_NONE;
  226. break;
  227. case HASH_ALG_MD5:
  228. *spu2_type = SPU2_HASH_TYPE_MD5;
  229. break;
  230. case HASH_ALG_SHA1:
  231. *spu2_type = SPU2_HASH_TYPE_SHA1;
  232. break;
  233. case HASH_ALG_SHA224:
  234. *spu2_type = SPU2_HASH_TYPE_SHA224;
  235. break;
  236. case HASH_ALG_SHA256:
  237. *spu2_type = SPU2_HASH_TYPE_SHA256;
  238. break;
  239. case HASH_ALG_SHA384:
  240. *spu2_type = SPU2_HASH_TYPE_SHA384;
  241. break;
  242. case HASH_ALG_SHA512:
  243. *spu2_type = SPU2_HASH_TYPE_SHA512;
  244. break;
  245. case HASH_ALG_AES:
  246. switch (ciph_type) {
  247. case CIPHER_TYPE_AES128:
  248. *spu2_type = SPU2_HASH_TYPE_AES128;
  249. break;
  250. case CIPHER_TYPE_AES192:
  251. *spu2_type = SPU2_HASH_TYPE_AES192;
  252. break;
  253. case CIPHER_TYPE_AES256:
  254. *spu2_type = SPU2_HASH_TYPE_AES256;
  255. break;
  256. default:
  257. err = -EINVAL;
  258. }
  259. break;
  260. case HASH_ALG_SHA3_224:
  261. *spu2_type = SPU2_HASH_TYPE_SHA3_224;
  262. break;
  263. case HASH_ALG_SHA3_256:
  264. *spu2_type = SPU2_HASH_TYPE_SHA3_256;
  265. break;
  266. case HASH_ALG_SHA3_384:
  267. *spu2_type = SPU2_HASH_TYPE_SHA3_384;
  268. break;
  269. case HASH_ALG_SHA3_512:
  270. *spu2_type = SPU2_HASH_TYPE_SHA3_512;
  271. break;
  272. case HASH_ALG_LAST:
  273. default:
  274. err = -EINVAL;
  275. break;
  276. }
  277. if (err)
  278. flow_log("Invalid hash alg %d or type %d\n",
  279. hash_alg, hash_type);
  280. return err;
  281. }
  282. /* Dump FMD ctrl0. The ctrl0 input is in host byte order */
  283. static void spu2_dump_fmd_ctrl0(u64 ctrl0)
  284. {
  285. enum spu2_cipher_type ciph_type;
  286. enum spu2_cipher_mode ciph_mode;
  287. enum spu2_hash_type hash_type;
  288. enum spu2_hash_mode hash_mode;
  289. char *ciph_name;
  290. char *ciph_mode_name;
  291. char *hash_name;
  292. char *hash_mode_name;
  293. u8 cfb;
  294. u8 proto;
  295. packet_log(" FMD CTRL0 %#16llx\n", ctrl0);
  296. if (ctrl0 & SPU2_CIPH_ENCRYPT_EN)
  297. packet_log(" encrypt\n");
  298. else
  299. packet_log(" decrypt\n");
  300. ciph_type = (ctrl0 & SPU2_CIPH_TYPE) >> SPU2_CIPH_TYPE_SHIFT;
  301. ciph_name = spu2_ciph_type_name(ciph_type);
  302. packet_log(" Cipher type: %s\n", ciph_name);
  303. if (ciph_type != SPU2_CIPHER_TYPE_NONE) {
  304. ciph_mode = (ctrl0 & SPU2_CIPH_MODE) >> SPU2_CIPH_MODE_SHIFT;
  305. ciph_mode_name = spu2_ciph_mode_name(ciph_mode);
  306. packet_log(" Cipher mode: %s\n", ciph_mode_name);
  307. }
  308. cfb = (ctrl0 & SPU2_CFB_MASK) >> SPU2_CFB_MASK_SHIFT;
  309. packet_log(" CFB %#x\n", cfb);
  310. proto = (ctrl0 & SPU2_PROTO_SEL) >> SPU2_PROTO_SEL_SHIFT;
  311. packet_log(" protocol %#x\n", proto);
  312. if (ctrl0 & SPU2_HASH_FIRST)
  313. packet_log(" hash first\n");
  314. else
  315. packet_log(" cipher first\n");
  316. if (ctrl0 & SPU2_CHK_TAG)
  317. packet_log(" check tag\n");
  318. hash_type = (ctrl0 & SPU2_HASH_TYPE) >> SPU2_HASH_TYPE_SHIFT;
  319. hash_name = spu2_hash_type_name(hash_type);
  320. packet_log(" Hash type: %s\n", hash_name);
  321. if (hash_type != SPU2_HASH_TYPE_NONE) {
  322. hash_mode = (ctrl0 & SPU2_HASH_MODE) >> SPU2_HASH_MODE_SHIFT;
  323. hash_mode_name = spu2_hash_mode_name(hash_mode);
  324. packet_log(" Hash mode: %s\n", hash_mode_name);
  325. }
  326. if (ctrl0 & SPU2_CIPH_PAD_EN) {
  327. packet_log(" Cipher pad: %#2llx\n",
  328. (ctrl0 & SPU2_CIPH_PAD) >> SPU2_CIPH_PAD_SHIFT);
  329. }
  330. }
  331. /* Dump FMD ctrl1. The ctrl1 input is in host byte order */
  332. static void spu2_dump_fmd_ctrl1(u64 ctrl1)
  333. {
  334. u8 hash_key_len;
  335. u8 ciph_key_len;
  336. u8 ret_iv_len;
  337. u8 iv_offset;
  338. u8 iv_len;
  339. u8 hash_tag_len;
  340. u8 ret_md;
  341. packet_log(" FMD CTRL1 %#16llx\n", ctrl1);
  342. if (ctrl1 & SPU2_TAG_LOC)
  343. packet_log(" Tag after payload\n");
  344. packet_log(" Msg includes ");
  345. if (ctrl1 & SPU2_HAS_FR_DATA)
  346. packet_log("FD ");
  347. if (ctrl1 & SPU2_HAS_AAD1)
  348. packet_log("AAD1 ");
  349. if (ctrl1 & SPU2_HAS_NAAD)
  350. packet_log("NAAD ");
  351. if (ctrl1 & SPU2_HAS_AAD2)
  352. packet_log("AAD2 ");
  353. if (ctrl1 & SPU2_HAS_ESN)
  354. packet_log("ESN ");
  355. packet_log("\n");
  356. hash_key_len = (ctrl1 & SPU2_HASH_KEY_LEN) >> SPU2_HASH_KEY_LEN_SHIFT;
  357. packet_log(" Hash key len %u\n", hash_key_len);
  358. ciph_key_len = (ctrl1 & SPU2_CIPH_KEY_LEN) >> SPU2_CIPH_KEY_LEN_SHIFT;
  359. packet_log(" Cipher key len %u\n", ciph_key_len);
  360. if (ctrl1 & SPU2_GENIV)
  361. packet_log(" Generate IV\n");
  362. if (ctrl1 & SPU2_HASH_IV)
  363. packet_log(" IV included in hash\n");
  364. if (ctrl1 & SPU2_RET_IV)
  365. packet_log(" Return IV in output before payload\n");
  366. ret_iv_len = (ctrl1 & SPU2_RET_IV_LEN) >> SPU2_RET_IV_LEN_SHIFT;
  367. packet_log(" Length of returned IV %u bytes\n",
  368. ret_iv_len ? ret_iv_len : 16);
  369. iv_offset = (ctrl1 & SPU2_IV_OFFSET) >> SPU2_IV_OFFSET_SHIFT;
  370. packet_log(" IV offset %u\n", iv_offset);
  371. iv_len = (ctrl1 & SPU2_IV_LEN) >> SPU2_IV_LEN_SHIFT;
  372. packet_log(" Input IV len %u bytes\n", iv_len);
  373. hash_tag_len = (ctrl1 & SPU2_HASH_TAG_LEN) >> SPU2_HASH_TAG_LEN_SHIFT;
  374. packet_log(" Hash tag length %u bytes\n", hash_tag_len);
  375. packet_log(" Return ");
  376. ret_md = (ctrl1 & SPU2_RETURN_MD) >> SPU2_RETURN_MD_SHIFT;
  377. if (ret_md)
  378. packet_log("FMD ");
  379. if (ret_md == SPU2_RET_FMD_OMD)
  380. packet_log("OMD ");
  381. else if (ret_md == SPU2_RET_FMD_OMD_IV)
  382. packet_log("OMD IV ");
  383. if (ctrl1 & SPU2_RETURN_FD)
  384. packet_log("FD ");
  385. if (ctrl1 & SPU2_RETURN_AAD1)
  386. packet_log("AAD1 ");
  387. if (ctrl1 & SPU2_RETURN_NAAD)
  388. packet_log("NAAD ");
  389. if (ctrl1 & SPU2_RETURN_AAD2)
  390. packet_log("AAD2 ");
  391. if (ctrl1 & SPU2_RETURN_PAY)
  392. packet_log("Payload");
  393. packet_log("\n");
  394. }
  395. /* Dump FMD ctrl2. The ctrl2 input is in host byte order */
  396. static void spu2_dump_fmd_ctrl2(u64 ctrl2)
  397. {
  398. packet_log(" FMD CTRL2 %#16llx\n", ctrl2);
  399. packet_log(" AAD1 offset %llu length %llu bytes\n",
  400. ctrl2 & SPU2_AAD1_OFFSET,
  401. (ctrl2 & SPU2_AAD1_LEN) >> SPU2_AAD1_LEN_SHIFT);
  402. packet_log(" AAD2 offset %llu\n",
  403. (ctrl2 & SPU2_AAD2_OFFSET) >> SPU2_AAD2_OFFSET_SHIFT);
  404. packet_log(" Payload offset %llu\n",
  405. (ctrl2 & SPU2_PL_OFFSET) >> SPU2_PL_OFFSET_SHIFT);
  406. }
  407. /* Dump FMD ctrl3. The ctrl3 input is in host byte order */
  408. static void spu2_dump_fmd_ctrl3(u64 ctrl3)
  409. {
  410. packet_log(" FMD CTRL3 %#16llx\n", ctrl3);
  411. packet_log(" Payload length %llu bytes\n", ctrl3 & SPU2_PL_LEN);
  412. packet_log(" TLS length %llu bytes\n",
  413. (ctrl3 & SPU2_TLS_LEN) >> SPU2_TLS_LEN_SHIFT);
  414. }
  415. static void spu2_dump_fmd(struct SPU2_FMD *fmd)
  416. {
  417. spu2_dump_fmd_ctrl0(le64_to_cpu(fmd->ctrl0));
  418. spu2_dump_fmd_ctrl1(le64_to_cpu(fmd->ctrl1));
  419. spu2_dump_fmd_ctrl2(le64_to_cpu(fmd->ctrl2));
  420. spu2_dump_fmd_ctrl3(le64_to_cpu(fmd->ctrl3));
  421. }
  422. static void spu2_dump_omd(u8 *omd, u16 hash_key_len, u16 ciph_key_len,
  423. u16 hash_iv_len, u16 ciph_iv_len)
  424. {
  425. u8 *ptr = omd;
  426. packet_log(" OMD:\n");
  427. if (hash_key_len) {
  428. packet_log(" Hash Key Length %u bytes\n", hash_key_len);
  429. packet_dump(" KEY: ", ptr, hash_key_len);
  430. ptr += hash_key_len;
  431. }
  432. if (ciph_key_len) {
  433. packet_log(" Cipher Key Length %u bytes\n", ciph_key_len);
  434. packet_dump(" KEY: ", ptr, ciph_key_len);
  435. ptr += ciph_key_len;
  436. }
  437. if (hash_iv_len) {
  438. packet_log(" Hash IV Length %u bytes\n", hash_iv_len);
  439. packet_dump(" hash IV: ", ptr, hash_iv_len);
  440. ptr += ciph_key_len;
  441. }
  442. if (ciph_iv_len) {
  443. packet_log(" Cipher IV Length %u bytes\n", ciph_iv_len);
  444. packet_dump(" cipher IV: ", ptr, ciph_iv_len);
  445. }
  446. }
  447. /* Dump a SPU2 header for debug */
  448. void spu2_dump_msg_hdr(u8 *buf, unsigned int buf_len)
  449. {
  450. struct SPU2_FMD *fmd = (struct SPU2_FMD *)buf;
  451. u8 *omd;
  452. u64 ctrl1;
  453. u16 hash_key_len;
  454. u16 ciph_key_len;
  455. u16 hash_iv_len;
  456. u16 ciph_iv_len;
  457. u16 omd_len;
  458. packet_log("\n");
  459. packet_log("SPU2 message header %p len: %u\n", buf, buf_len);
  460. spu2_dump_fmd(fmd);
  461. omd = (u8 *)(fmd + 1);
  462. ctrl1 = le64_to_cpu(fmd->ctrl1);
  463. hash_key_len = (ctrl1 & SPU2_HASH_KEY_LEN) >> SPU2_HASH_KEY_LEN_SHIFT;
  464. ciph_key_len = (ctrl1 & SPU2_CIPH_KEY_LEN) >> SPU2_CIPH_KEY_LEN_SHIFT;
  465. hash_iv_len = 0;
  466. ciph_iv_len = (ctrl1 & SPU2_IV_LEN) >> SPU2_IV_LEN_SHIFT;
  467. spu2_dump_omd(omd, hash_key_len, ciph_key_len, hash_iv_len,
  468. ciph_iv_len);
  469. /* Double check sanity */
  470. omd_len = hash_key_len + ciph_key_len + hash_iv_len + ciph_iv_len;
  471. if (FMD_SIZE + omd_len != buf_len) {
  472. packet_log
  473. (" Packet parsed incorrectly. buf_len %u, sum of MD %zu\n",
  474. buf_len, FMD_SIZE + omd_len);
  475. }
  476. packet_log("\n");
  477. }
  478. /**
  479. * spu2_fmd_init() - At setkey time, initialize the fixed meta data for
  480. * subsequent skcipher requests for this context.
  481. * @fmd: Start of FMD field to be written
  482. * @spu2_type: Cipher algorithm
  483. * @spu2_mode: Cipher mode
  484. * @cipher_key_len: Length of cipher key, in bytes
  485. * @cipher_iv_len: Length of cipher initialization vector, in bytes
  486. *
  487. * Return: 0 (success)
  488. */
  489. static int spu2_fmd_init(struct SPU2_FMD *fmd,
  490. enum spu2_cipher_type spu2_type,
  491. enum spu2_cipher_mode spu2_mode,
  492. u32 cipher_key_len, u32 cipher_iv_len)
  493. {
  494. u64 ctrl0;
  495. u64 ctrl1;
  496. u64 ctrl2;
  497. u64 ctrl3;
  498. u32 aad1_offset;
  499. u32 aad2_offset;
  500. u16 aad1_len = 0;
  501. u64 payload_offset;
  502. ctrl0 = (spu2_type << SPU2_CIPH_TYPE_SHIFT) |
  503. (spu2_mode << SPU2_CIPH_MODE_SHIFT);
  504. ctrl1 = (cipher_key_len << SPU2_CIPH_KEY_LEN_SHIFT) |
  505. ((u64)cipher_iv_len << SPU2_IV_LEN_SHIFT) |
  506. ((u64)SPU2_RET_FMD_ONLY << SPU2_RETURN_MD_SHIFT) | SPU2_RETURN_PAY;
  507. /*
  508. * AAD1 offset is from start of FD. FD length is always 0 for this
  509. * driver. So AAD1_offset is always 0.
  510. */
  511. aad1_offset = 0;
  512. aad2_offset = aad1_offset;
  513. payload_offset = 0;
  514. ctrl2 = aad1_offset |
  515. (aad1_len << SPU2_AAD1_LEN_SHIFT) |
  516. (aad2_offset << SPU2_AAD2_OFFSET_SHIFT) |
  517. (payload_offset << SPU2_PL_OFFSET_SHIFT);
  518. ctrl3 = 0;
  519. fmd->ctrl0 = cpu_to_le64(ctrl0);
  520. fmd->ctrl1 = cpu_to_le64(ctrl1);
  521. fmd->ctrl2 = cpu_to_le64(ctrl2);
  522. fmd->ctrl3 = cpu_to_le64(ctrl3);
  523. return 0;
  524. }
  525. /**
  526. * spu2_fmd_ctrl0_write() - Write ctrl0 field in fixed metadata (FMD) field of
  527. * SPU request packet.
  528. * @fmd: Start of FMD field to be written
  529. * @is_inbound: true if decrypting. false if encrypting.
  530. * @auth_first: true if alg authenticates before encrypting
  531. * @protocol: protocol selector
  532. * @cipher_type: cipher algorithm
  533. * @cipher_mode: cipher mode
  534. * @auth_type: authentication type
  535. * @auth_mode: authentication mode
  536. */
  537. static void spu2_fmd_ctrl0_write(struct SPU2_FMD *fmd,
  538. bool is_inbound, bool auth_first,
  539. enum spu2_proto_sel protocol,
  540. enum spu2_cipher_type cipher_type,
  541. enum spu2_cipher_mode cipher_mode,
  542. enum spu2_hash_type auth_type,
  543. enum spu2_hash_mode auth_mode)
  544. {
  545. u64 ctrl0 = 0;
  546. if ((cipher_type != SPU2_CIPHER_TYPE_NONE) && !is_inbound)
  547. ctrl0 |= SPU2_CIPH_ENCRYPT_EN;
  548. ctrl0 |= ((u64)cipher_type << SPU2_CIPH_TYPE_SHIFT) |
  549. ((u64)cipher_mode << SPU2_CIPH_MODE_SHIFT);
  550. if (protocol)
  551. ctrl0 |= (u64)protocol << SPU2_PROTO_SEL_SHIFT;
  552. if (auth_first)
  553. ctrl0 |= SPU2_HASH_FIRST;
  554. if (is_inbound && (auth_type != SPU2_HASH_TYPE_NONE))
  555. ctrl0 |= SPU2_CHK_TAG;
  556. ctrl0 |= (((u64)auth_type << SPU2_HASH_TYPE_SHIFT) |
  557. ((u64)auth_mode << SPU2_HASH_MODE_SHIFT));
  558. fmd->ctrl0 = cpu_to_le64(ctrl0);
  559. }
  560. /**
  561. * spu2_fmd_ctrl1_write() - Write ctrl1 field in fixed metadata (FMD) field of
  562. * SPU request packet.
  563. * @fmd: Start of FMD field to be written
  564. * @is_inbound: true if decrypting. false if encrypting.
  565. * @assoc_size: Length of additional associated data, in bytes
  566. * @auth_key_len: Length of authentication key, in bytes
  567. * @cipher_key_len: Length of cipher key, in bytes
  568. * @gen_iv: If true, hw generates IV and returns in response
  569. * @hash_iv: IV participates in hash. Used for IPSEC and TLS.
  570. * @return_iv: Return IV in output packet before payload
  571. * @ret_iv_len: Length of IV returned from SPU, in bytes
  572. * @ret_iv_offset: Offset into full IV of start of returned IV
  573. * @cipher_iv_len: Length of input cipher IV, in bytes
  574. * @digest_size: Length of digest (aka, hash tag or ICV), in bytes
  575. * @return_payload: Return payload in SPU response
  576. * @return_md : return metadata in SPU response
  577. *
  578. * Packet can have AAD2 w/o AAD1. For algorithms currently supported,
  579. * associated data goes in AAD2.
  580. */
  581. static void spu2_fmd_ctrl1_write(struct SPU2_FMD *fmd, bool is_inbound,
  582. u64 assoc_size,
  583. u64 auth_key_len, u64 cipher_key_len,
  584. bool gen_iv, bool hash_iv, bool return_iv,
  585. u64 ret_iv_len, u64 ret_iv_offset,
  586. u64 cipher_iv_len, u64 digest_size,
  587. bool return_payload, bool return_md)
  588. {
  589. u64 ctrl1 = 0;
  590. if (is_inbound && digest_size)
  591. ctrl1 |= SPU2_TAG_LOC;
  592. if (assoc_size) {
  593. ctrl1 |= SPU2_HAS_AAD2;
  594. ctrl1 |= SPU2_RETURN_AAD2; /* need aad2 for gcm aes esp */
  595. }
  596. if (auth_key_len)
  597. ctrl1 |= ((auth_key_len << SPU2_HASH_KEY_LEN_SHIFT) &
  598. SPU2_HASH_KEY_LEN);
  599. if (cipher_key_len)
  600. ctrl1 |= ((cipher_key_len << SPU2_CIPH_KEY_LEN_SHIFT) &
  601. SPU2_CIPH_KEY_LEN);
  602. if (gen_iv)
  603. ctrl1 |= SPU2_GENIV;
  604. if (hash_iv)
  605. ctrl1 |= SPU2_HASH_IV;
  606. if (return_iv) {
  607. ctrl1 |= SPU2_RET_IV;
  608. ctrl1 |= ret_iv_len << SPU2_RET_IV_LEN_SHIFT;
  609. ctrl1 |= ret_iv_offset << SPU2_IV_OFFSET_SHIFT;
  610. }
  611. ctrl1 |= ((cipher_iv_len << SPU2_IV_LEN_SHIFT) & SPU2_IV_LEN);
  612. if (digest_size)
  613. ctrl1 |= ((digest_size << SPU2_HASH_TAG_LEN_SHIFT) &
  614. SPU2_HASH_TAG_LEN);
  615. /* Let's ask for the output pkt to include FMD, but don't need to
  616. * get keys and IVs back in OMD.
  617. */
  618. if (return_md)
  619. ctrl1 |= ((u64)SPU2_RET_FMD_ONLY << SPU2_RETURN_MD_SHIFT);
  620. else
  621. ctrl1 |= ((u64)SPU2_RET_NO_MD << SPU2_RETURN_MD_SHIFT);
  622. /* Crypto API does not get assoc data back. So no need for AAD2. */
  623. if (return_payload)
  624. ctrl1 |= SPU2_RETURN_PAY;
  625. fmd->ctrl1 = cpu_to_le64(ctrl1);
  626. }
  627. /**
  628. * spu2_fmd_ctrl2_write() - Set the ctrl2 field in the fixed metadata field of
  629. * SPU2 header.
  630. * @fmd: Start of FMD field to be written
  631. * @cipher_offset: Number of bytes from Start of Packet (end of FD field) where
  632. * data to be encrypted or decrypted begins
  633. * @auth_key_len: Length of authentication key, in bytes
  634. * @auth_iv_len: Length of authentication initialization vector, in bytes
  635. * @cipher_key_len: Length of cipher key, in bytes
  636. * @cipher_iv_len: Length of cipher IV, in bytes
  637. */
  638. static void spu2_fmd_ctrl2_write(struct SPU2_FMD *fmd, u64 cipher_offset,
  639. u64 auth_key_len, u64 auth_iv_len,
  640. u64 cipher_key_len, u64 cipher_iv_len)
  641. {
  642. u64 ctrl2;
  643. u64 aad1_offset;
  644. u64 aad2_offset;
  645. u16 aad1_len = 0;
  646. u64 payload_offset;
  647. /* AAD1 offset is from start of FD. FD length always 0. */
  648. aad1_offset = 0;
  649. aad2_offset = aad1_offset;
  650. payload_offset = cipher_offset;
  651. ctrl2 = aad1_offset |
  652. (aad1_len << SPU2_AAD1_LEN_SHIFT) |
  653. (aad2_offset << SPU2_AAD2_OFFSET_SHIFT) |
  654. (payload_offset << SPU2_PL_OFFSET_SHIFT);
  655. fmd->ctrl2 = cpu_to_le64(ctrl2);
  656. }
  657. /**
  658. * spu2_fmd_ctrl3_write() - Set the ctrl3 field in FMD
  659. * @fmd: Fixed meta data. First field in SPU2 msg header.
  660. * @payload_len: Length of payload, in bytes
  661. */
  662. static void spu2_fmd_ctrl3_write(struct SPU2_FMD *fmd, u64 payload_len)
  663. {
  664. u64 ctrl3;
  665. ctrl3 = payload_len & SPU2_PL_LEN;
  666. fmd->ctrl3 = cpu_to_le64(ctrl3);
  667. }
  668. /**
  669. * spu2_ctx_max_payload() - Determine the maximum length of the payload for a
  670. * SPU message for a given cipher and hash alg context.
  671. * @cipher_alg: The cipher algorithm
  672. * @cipher_mode: The cipher mode
  673. * @blocksize: The size of a block of data for this algo
  674. *
  675. * For SPU2, the hardware generally ignores the PayloadLen field in ctrl3 of
  676. * FMD and just keeps computing until it receives a DMA descriptor with the EOF
  677. * flag set. So we consider the max payload to be infinite. AES CCM is an
  678. * exception.
  679. *
  680. * Return: Max payload length in bytes
  681. */
  682. u32 spu2_ctx_max_payload(enum spu_cipher_alg cipher_alg,
  683. enum spu_cipher_mode cipher_mode,
  684. unsigned int blocksize)
  685. {
  686. if ((cipher_alg == CIPHER_ALG_AES) &&
  687. (cipher_mode == CIPHER_MODE_CCM)) {
  688. u32 excess = SPU2_MAX_PAYLOAD % blocksize;
  689. return SPU2_MAX_PAYLOAD - excess;
  690. } else {
  691. return SPU_MAX_PAYLOAD_INF;
  692. }
  693. }
  694. /**
  695. * spu2_payload_length() - Given a SPU2 message header, extract the payload
  696. * length.
  697. * @spu_hdr: Start of SPU message header (FMD)
  698. *
  699. * Return: payload length, in bytes
  700. */
  701. u32 spu2_payload_length(u8 *spu_hdr)
  702. {
  703. struct SPU2_FMD *fmd = (struct SPU2_FMD *)spu_hdr;
  704. u32 pl_len;
  705. u64 ctrl3;
  706. ctrl3 = le64_to_cpu(fmd->ctrl3);
  707. pl_len = ctrl3 & SPU2_PL_LEN;
  708. return pl_len;
  709. }
  710. /**
  711. * spu2_response_hdr_len() - Determine the expected length of a SPU response
  712. * header.
  713. * @auth_key_len: Length of authentication key, in bytes
  714. * @enc_key_len: Length of encryption key, in bytes
  715. * @is_hash: Unused
  716. *
  717. * For SPU2, includes just FMD. OMD is never requested.
  718. *
  719. * Return: Length of FMD, in bytes
  720. */
  721. u16 spu2_response_hdr_len(u16 auth_key_len, u16 enc_key_len, bool is_hash)
  722. {
  723. return FMD_SIZE;
  724. }
  725. /**
  726. * spu2_hash_pad_len() - Calculate the length of hash padding required to extend
  727. * data to a full block size.
  728. * @hash_alg: hash algorithm
  729. * @hash_mode: hash mode
  730. * @chunksize: length of data, in bytes
  731. * @hash_block_size: size of a hash block, in bytes
  732. *
  733. * SPU2 hardware does all hash padding
  734. *
  735. * Return: length of hash pad in bytes
  736. */
  737. u16 spu2_hash_pad_len(enum hash_alg hash_alg, enum hash_mode hash_mode,
  738. u32 chunksize, u16 hash_block_size)
  739. {
  740. return 0;
  741. }
  742. /**
  743. * spu2_gcm_ccm_pad_len() - Determine the length of GCM/CCM padding for either
  744. * the AAD field or the data.
  745. * @cipher_mode: Unused
  746. * @data_size: Unused
  747. *
  748. * Return: 0. Unlike SPU-M, SPU2 hardware does any GCM/CCM padding required.
  749. */
  750. u32 spu2_gcm_ccm_pad_len(enum spu_cipher_mode cipher_mode,
  751. unsigned int data_size)
  752. {
  753. return 0;
  754. }
  755. /**
  756. * spu2_assoc_resp_len() - Determine the size of the AAD2 buffer needed to catch
  757. * associated data in a SPU2 output packet.
  758. * @cipher_mode: cipher mode
  759. * @assoc_len: length of additional associated data, in bytes
  760. * @iv_len: length of initialization vector, in bytes
  761. * @is_encrypt: true if encrypting. false if decrypt.
  762. *
  763. * Return: Length of buffer to catch associated data in response
  764. */
  765. u32 spu2_assoc_resp_len(enum spu_cipher_mode cipher_mode,
  766. unsigned int assoc_len, unsigned int iv_len,
  767. bool is_encrypt)
  768. {
  769. u32 resp_len = assoc_len;
  770. if (is_encrypt)
  771. /* gcm aes esp has to write 8-byte IV in response */
  772. resp_len += iv_len;
  773. return resp_len;
  774. }
  775. /**
  776. * spu2_aead_ivlen() - Calculate the length of the AEAD IV to be included
  777. * in a SPU request after the AAD and before the payload.
  778. * @cipher_mode: cipher mode
  779. * @iv_len: initialization vector length in bytes
  780. *
  781. * For SPU2, AEAD IV is included in OMD and does not need to be repeated
  782. * prior to the payload.
  783. *
  784. * Return: Length of AEAD IV in bytes
  785. */
  786. u8 spu2_aead_ivlen(enum spu_cipher_mode cipher_mode, u16 iv_len)
  787. {
  788. return 0;
  789. }
  790. /**
  791. * spu2_hash_type() - Determine the type of hash operation.
  792. * @src_sent: The number of bytes in the current request that have already
  793. * been sent to the SPU to be hashed.
  794. *
  795. * SPU2 always does a FULL hash operation
  796. */
  797. enum hash_type spu2_hash_type(u32 src_sent)
  798. {
  799. return HASH_TYPE_FULL;
  800. }
  801. /**
  802. * spu2_digest_size() - Determine the size of a hash digest to expect the SPU to
  803. * return.
  804. * @alg_digest_size: Number of bytes in the final digest for the given algo
  805. * @alg: The hash algorithm
  806. * @htype: Type of hash operation (init, update, full, etc)
  807. *
  808. */
  809. u32 spu2_digest_size(u32 alg_digest_size, enum hash_alg alg,
  810. enum hash_type htype)
  811. {
  812. return alg_digest_size;
  813. }
  814. /**
  815. * spu2_create_request() - Build a SPU2 request message header, includint FMD and
  816. * OMD.
  817. * @spu_hdr: Start of buffer where SPU request header is to be written
  818. * @req_opts: SPU request message options
  819. * @cipher_parms: Parameters related to cipher algorithm
  820. * @hash_parms: Parameters related to hash algorithm
  821. * @aead_parms: Parameters related to AEAD operation
  822. * @data_size: Length of data to be encrypted or authenticated. If AEAD, does
  823. * not include length of AAD.
  824. *
  825. * Construct the message starting at spu_hdr. Caller should allocate this buffer
  826. * in DMA-able memory at least SPU_HEADER_ALLOC_LEN bytes long.
  827. *
  828. * Return: the length of the SPU header in bytes. 0 if an error occurs.
  829. */
  830. u32 spu2_create_request(u8 *spu_hdr,
  831. struct spu_request_opts *req_opts,
  832. struct spu_cipher_parms *cipher_parms,
  833. struct spu_hash_parms *hash_parms,
  834. struct spu_aead_parms *aead_parms,
  835. unsigned int data_size)
  836. {
  837. struct SPU2_FMD *fmd;
  838. u8 *ptr;
  839. unsigned int buf_len;
  840. int err;
  841. enum spu2_cipher_type spu2_ciph_type = SPU2_CIPHER_TYPE_NONE;
  842. enum spu2_cipher_mode spu2_ciph_mode;
  843. enum spu2_hash_type spu2_auth_type = SPU2_HASH_TYPE_NONE;
  844. enum spu2_hash_mode spu2_auth_mode;
  845. bool return_md = true;
  846. enum spu2_proto_sel proto = SPU2_PROTO_RESV;
  847. /* size of the payload */
  848. unsigned int payload_len =
  849. hash_parms->prebuf_len + data_size + hash_parms->pad_len -
  850. ((req_opts->is_aead && req_opts->is_inbound) ?
  851. hash_parms->digestsize : 0);
  852. /* offset of prebuf or data from start of AAD2 */
  853. unsigned int cipher_offset = aead_parms->assoc_size +
  854. aead_parms->aad_pad_len + aead_parms->iv_len;
  855. /* total size of the data following OMD (without STAT word padding) */
  856. unsigned int real_db_size = spu_real_db_size(aead_parms->assoc_size,
  857. aead_parms->iv_len,
  858. hash_parms->prebuf_len,
  859. data_size,
  860. aead_parms->aad_pad_len,
  861. aead_parms->data_pad_len,
  862. hash_parms->pad_len);
  863. unsigned int assoc_size = aead_parms->assoc_size;
  864. if (req_opts->is_aead &&
  865. (cipher_parms->alg == CIPHER_ALG_AES) &&
  866. (cipher_parms->mode == CIPHER_MODE_GCM))
  867. /*
  868. * On SPU 2, aes gcm cipher first on encrypt, auth first on
  869. * decrypt
  870. */
  871. req_opts->auth_first = req_opts->is_inbound;
  872. /* and do opposite for ccm (auth 1st on encrypt) */
  873. if (req_opts->is_aead &&
  874. (cipher_parms->alg == CIPHER_ALG_AES) &&
  875. (cipher_parms->mode == CIPHER_MODE_CCM))
  876. req_opts->auth_first = !req_opts->is_inbound;
  877. flow_log("%s()\n", __func__);
  878. flow_log(" in:%u authFirst:%u\n",
  879. req_opts->is_inbound, req_opts->auth_first);
  880. flow_log(" cipher alg:%u mode:%u type %u\n", cipher_parms->alg,
  881. cipher_parms->mode, cipher_parms->type);
  882. flow_log(" is_esp: %s\n", req_opts->is_esp ? "yes" : "no");
  883. flow_log(" key: %d\n", cipher_parms->key_len);
  884. flow_dump(" key: ", cipher_parms->key_buf, cipher_parms->key_len);
  885. flow_log(" iv: %d\n", cipher_parms->iv_len);
  886. flow_dump(" iv: ", cipher_parms->iv_buf, cipher_parms->iv_len);
  887. flow_log(" auth alg:%u mode:%u type %u\n",
  888. hash_parms->alg, hash_parms->mode, hash_parms->type);
  889. flow_log(" digestsize: %u\n", hash_parms->digestsize);
  890. flow_log(" authkey: %d\n", hash_parms->key_len);
  891. flow_dump(" authkey: ", hash_parms->key_buf, hash_parms->key_len);
  892. flow_log(" assoc_size:%u\n", assoc_size);
  893. flow_log(" prebuf_len:%u\n", hash_parms->prebuf_len);
  894. flow_log(" data_size:%u\n", data_size);
  895. flow_log(" hash_pad_len:%u\n", hash_parms->pad_len);
  896. flow_log(" real_db_size:%u\n", real_db_size);
  897. flow_log(" cipher_offset:%u payload_len:%u\n",
  898. cipher_offset, payload_len);
  899. flow_log(" aead_iv: %u\n", aead_parms->iv_len);
  900. /* Convert to spu2 values for cipher alg, hash alg */
  901. err = spu2_cipher_xlate(cipher_parms->alg, cipher_parms->mode,
  902. cipher_parms->type,
  903. &spu2_ciph_type, &spu2_ciph_mode);
  904. /* If we are doing GCM hashing only - either via rfc4543 transform
  905. * or because we happen to do GCM with AAD only and no payload - we
  906. * need to configure hardware to use hash key rather than cipher key
  907. * and put data into payload. This is because unlike SPU-M, running
  908. * GCM cipher with 0 size payload is not permitted.
  909. */
  910. if ((req_opts->is_rfc4543) ||
  911. ((spu2_ciph_mode == SPU2_CIPHER_MODE_GCM) &&
  912. (payload_len == 0))) {
  913. /* Use hashing (only) and set up hash key */
  914. spu2_ciph_type = SPU2_CIPHER_TYPE_NONE;
  915. hash_parms->key_len = cipher_parms->key_len;
  916. memcpy(hash_parms->key_buf, cipher_parms->key_buf,
  917. cipher_parms->key_len);
  918. cipher_parms->key_len = 0;
  919. if (req_opts->is_rfc4543)
  920. payload_len += assoc_size;
  921. else
  922. payload_len = assoc_size;
  923. cipher_offset = 0;
  924. assoc_size = 0;
  925. }
  926. if (err)
  927. return 0;
  928. flow_log("spu2 cipher type %s, cipher mode %s\n",
  929. spu2_ciph_type_name(spu2_ciph_type),
  930. spu2_ciph_mode_name(spu2_ciph_mode));
  931. err = spu2_hash_xlate(hash_parms->alg, hash_parms->mode,
  932. hash_parms->type,
  933. cipher_parms->type,
  934. &spu2_auth_type, &spu2_auth_mode);
  935. if (err)
  936. return 0;
  937. flow_log("spu2 hash type %s, hash mode %s\n",
  938. spu2_hash_type_name(spu2_auth_type),
  939. spu2_hash_mode_name(spu2_auth_mode));
  940. fmd = (struct SPU2_FMD *)spu_hdr;
  941. spu2_fmd_ctrl0_write(fmd, req_opts->is_inbound, req_opts->auth_first,
  942. proto, spu2_ciph_type, spu2_ciph_mode,
  943. spu2_auth_type, spu2_auth_mode);
  944. spu2_fmd_ctrl1_write(fmd, req_opts->is_inbound, assoc_size,
  945. hash_parms->key_len, cipher_parms->key_len,
  946. false, false,
  947. aead_parms->return_iv, aead_parms->ret_iv_len,
  948. aead_parms->ret_iv_off,
  949. cipher_parms->iv_len, hash_parms->digestsize,
  950. !req_opts->bd_suppress, return_md);
  951. spu2_fmd_ctrl2_write(fmd, cipher_offset, hash_parms->key_len, 0,
  952. cipher_parms->key_len, cipher_parms->iv_len);
  953. spu2_fmd_ctrl3_write(fmd, payload_len);
  954. ptr = (u8 *)(fmd + 1);
  955. buf_len = sizeof(struct SPU2_FMD);
  956. /* Write OMD */
  957. if (hash_parms->key_len) {
  958. memcpy(ptr, hash_parms->key_buf, hash_parms->key_len);
  959. ptr += hash_parms->key_len;
  960. buf_len += hash_parms->key_len;
  961. }
  962. if (cipher_parms->key_len) {
  963. memcpy(ptr, cipher_parms->key_buf, cipher_parms->key_len);
  964. ptr += cipher_parms->key_len;
  965. buf_len += cipher_parms->key_len;
  966. }
  967. if (cipher_parms->iv_len) {
  968. memcpy(ptr, cipher_parms->iv_buf, cipher_parms->iv_len);
  969. ptr += cipher_parms->iv_len;
  970. buf_len += cipher_parms->iv_len;
  971. }
  972. packet_dump(" SPU request header: ", spu_hdr, buf_len);
  973. return buf_len;
  974. }
  975. /**
  976. * spu2_cipher_req_init() - Build an skcipher SPU2 request message header,
  977. * including FMD and OMD.
  978. * @spu_hdr: Location of start of SPU request (FMD field)
  979. * @cipher_parms: Parameters describing cipher request
  980. *
  981. * Called at setkey time to initialize a msg header that can be reused for all
  982. * subsequent skcipher requests. Construct the message starting at spu_hdr.
  983. * Caller should allocate this buffer in DMA-able memory at least
  984. * SPU_HEADER_ALLOC_LEN bytes long.
  985. *
  986. * Return: the total length of the SPU header (FMD and OMD) in bytes. 0 if an
  987. * error occurs.
  988. */
  989. u16 spu2_cipher_req_init(u8 *spu_hdr, struct spu_cipher_parms *cipher_parms)
  990. {
  991. struct SPU2_FMD *fmd;
  992. u8 *omd;
  993. enum spu2_cipher_type spu2_type = SPU2_CIPHER_TYPE_NONE;
  994. enum spu2_cipher_mode spu2_mode;
  995. int err;
  996. flow_log("%s()\n", __func__);
  997. flow_log(" cipher alg:%u mode:%u type %u\n", cipher_parms->alg,
  998. cipher_parms->mode, cipher_parms->type);
  999. flow_log(" cipher_iv_len: %u\n", cipher_parms->iv_len);
  1000. flow_log(" key: %d\n", cipher_parms->key_len);
  1001. flow_dump(" key: ", cipher_parms->key_buf, cipher_parms->key_len);
  1002. /* Convert to spu2 values */
  1003. err = spu2_cipher_xlate(cipher_parms->alg, cipher_parms->mode,
  1004. cipher_parms->type, &spu2_type, &spu2_mode);
  1005. if (err)
  1006. return 0;
  1007. flow_log("spu2 cipher type %s, cipher mode %s\n",
  1008. spu2_ciph_type_name(spu2_type),
  1009. spu2_ciph_mode_name(spu2_mode));
  1010. /* Construct the FMD header */
  1011. fmd = (struct SPU2_FMD *)spu_hdr;
  1012. err = spu2_fmd_init(fmd, spu2_type, spu2_mode, cipher_parms->key_len,
  1013. cipher_parms->iv_len);
  1014. if (err)
  1015. return 0;
  1016. /* Write cipher key to OMD */
  1017. omd = (u8 *)(fmd + 1);
  1018. if (cipher_parms->key_buf && cipher_parms->key_len)
  1019. memcpy(omd, cipher_parms->key_buf, cipher_parms->key_len);
  1020. packet_dump(" SPU request header: ", spu_hdr,
  1021. FMD_SIZE + cipher_parms->key_len + cipher_parms->iv_len);
  1022. return FMD_SIZE + cipher_parms->key_len + cipher_parms->iv_len;
  1023. }
  1024. /**
  1025. * spu2_cipher_req_finish() - Finish building a SPU request message header for a
  1026. * block cipher request.
  1027. * @spu_hdr: Start of the request message header (MH field)
  1028. * @spu_req_hdr_len: Length in bytes of the SPU request header
  1029. * @is_inbound: 0 encrypt, 1 decrypt
  1030. * @cipher_parms: Parameters describing cipher operation to be performed
  1031. * @data_size: Length of the data in the BD field
  1032. *
  1033. * Assumes much of the header was already filled in at setkey() time in
  1034. * spu_cipher_req_init().
  1035. * spu_cipher_req_init() fills in the encryption key.
  1036. */
  1037. void spu2_cipher_req_finish(u8 *spu_hdr,
  1038. u16 spu_req_hdr_len,
  1039. unsigned int is_inbound,
  1040. struct spu_cipher_parms *cipher_parms,
  1041. unsigned int data_size)
  1042. {
  1043. struct SPU2_FMD *fmd;
  1044. u8 *omd; /* start of optional metadata */
  1045. u64 ctrl0;
  1046. u64 ctrl3;
  1047. flow_log("%s()\n", __func__);
  1048. flow_log(" in: %u\n", is_inbound);
  1049. flow_log(" cipher alg: %u, cipher_type: %u\n", cipher_parms->alg,
  1050. cipher_parms->type);
  1051. flow_log(" iv len: %d\n", cipher_parms->iv_len);
  1052. flow_dump(" iv: ", cipher_parms->iv_buf, cipher_parms->iv_len);
  1053. flow_log(" data_size: %u\n", data_size);
  1054. fmd = (struct SPU2_FMD *)spu_hdr;
  1055. omd = (u8 *)(fmd + 1);
  1056. /*
  1057. * FMD ctrl0 was initialized at setkey time. update it to indicate
  1058. * whether we are encrypting or decrypting.
  1059. */
  1060. ctrl0 = le64_to_cpu(fmd->ctrl0);
  1061. if (is_inbound)
  1062. ctrl0 &= ~SPU2_CIPH_ENCRYPT_EN; /* decrypt */
  1063. else
  1064. ctrl0 |= SPU2_CIPH_ENCRYPT_EN; /* encrypt */
  1065. fmd->ctrl0 = cpu_to_le64(ctrl0);
  1066. if (cipher_parms->alg && cipher_parms->iv_buf && cipher_parms->iv_len) {
  1067. /* cipher iv provided so put it in here */
  1068. memcpy(omd + cipher_parms->key_len, cipher_parms->iv_buf,
  1069. cipher_parms->iv_len);
  1070. }
  1071. ctrl3 = le64_to_cpu(fmd->ctrl3);
  1072. data_size &= SPU2_PL_LEN;
  1073. ctrl3 |= data_size;
  1074. fmd->ctrl3 = cpu_to_le64(ctrl3);
  1075. packet_dump(" SPU request header: ", spu_hdr, spu_req_hdr_len);
  1076. }
  1077. /**
  1078. * spu2_request_pad() - Create pad bytes at the end of the data.
  1079. * @pad_start: Start of buffer where pad bytes are to be written
  1080. * @gcm_padding: Length of GCM padding, in bytes
  1081. * @hash_pad_len: Number of bytes of padding extend data to full block
  1082. * @auth_alg: Authentication algorithm
  1083. * @auth_mode: Authentication mode
  1084. * @total_sent: Length inserted at end of hash pad
  1085. * @status_padding: Number of bytes of padding to align STATUS word
  1086. *
  1087. * There may be three forms of pad:
  1088. * 1. GCM pad - for GCM mode ciphers, pad to 16-byte alignment
  1089. * 2. hash pad - pad to a block length, with 0x80 data terminator and
  1090. * size at the end
  1091. * 3. STAT pad - to ensure the STAT field is 4-byte aligned
  1092. */
  1093. void spu2_request_pad(u8 *pad_start, u32 gcm_padding, u32 hash_pad_len,
  1094. enum hash_alg auth_alg, enum hash_mode auth_mode,
  1095. unsigned int total_sent, u32 status_padding)
  1096. {
  1097. u8 *ptr = pad_start;
  1098. /* fix data alignent for GCM */
  1099. if (gcm_padding > 0) {
  1100. flow_log(" GCM: padding to 16 byte alignment: %u bytes\n",
  1101. gcm_padding);
  1102. memset(ptr, 0, gcm_padding);
  1103. ptr += gcm_padding;
  1104. }
  1105. if (hash_pad_len > 0) {
  1106. /* clear the padding section */
  1107. memset(ptr, 0, hash_pad_len);
  1108. /* terminate the data */
  1109. *ptr = 0x80;
  1110. ptr += (hash_pad_len - sizeof(u64));
  1111. /* add the size at the end as required per alg */
  1112. if (auth_alg == HASH_ALG_MD5)
  1113. *(__le64 *)ptr = cpu_to_le64(total_sent * 8ull);
  1114. else /* SHA1, SHA2-224, SHA2-256 */
  1115. *(__be64 *)ptr = cpu_to_be64(total_sent * 8ull);
  1116. ptr += sizeof(u64);
  1117. }
  1118. /* pad to a 4byte alignment for STAT */
  1119. if (status_padding > 0) {
  1120. flow_log(" STAT: padding to 4 byte alignment: %u bytes\n",
  1121. status_padding);
  1122. memset(ptr, 0, status_padding);
  1123. ptr += status_padding;
  1124. }
  1125. }
  1126. /**
  1127. * spu2_xts_tweak_in_payload() - Indicate that SPU2 does NOT place the XTS
  1128. * tweak field in the packet payload (it uses IV instead)
  1129. *
  1130. * Return: 0
  1131. */
  1132. u8 spu2_xts_tweak_in_payload(void)
  1133. {
  1134. return 0;
  1135. }
  1136. /**
  1137. * spu2_tx_status_len() - Return the length of the STATUS field in a SPU
  1138. * response message.
  1139. *
  1140. * Return: Length of STATUS field in bytes.
  1141. */
  1142. u8 spu2_tx_status_len(void)
  1143. {
  1144. return SPU2_TX_STATUS_LEN;
  1145. }
  1146. /**
  1147. * spu2_rx_status_len() - Return the length of the STATUS field in a SPU
  1148. * response message.
  1149. *
  1150. * Return: Length of STATUS field in bytes.
  1151. */
  1152. u8 spu2_rx_status_len(void)
  1153. {
  1154. return SPU2_RX_STATUS_LEN;
  1155. }
  1156. /**
  1157. * spu2_status_process() - Process the status from a SPU response message.
  1158. * @statp: start of STATUS word
  1159. *
  1160. * Return: 0 - if status is good and response should be processed
  1161. * !0 - status indicates an error and response is invalid
  1162. */
  1163. int spu2_status_process(u8 *statp)
  1164. {
  1165. /* SPU2 status is 2 bytes by default - SPU_RX_STATUS_LEN */
  1166. u16 status = le16_to_cpu(*(__le16 *)statp);
  1167. if (status == 0)
  1168. return 0;
  1169. flow_log("rx status is %#x\n", status);
  1170. if (status == SPU2_INVALID_ICV)
  1171. return SPU_INVALID_ICV;
  1172. return -EBADMSG;
  1173. }
  1174. /**
  1175. * spu2_ccm_update_iv() - Update the IV as per the requirements for CCM mode.
  1176. *
  1177. * @digestsize: Digest size of this request
  1178. * @cipher_parms: (pointer to) cipher parmaeters, includes IV buf & IV len
  1179. * @assoclen: Length of AAD data
  1180. * @chunksize: length of input data to be sent in this req
  1181. * @is_encrypt: true if this is an output/encrypt operation
  1182. * @is_esp: true if this is an ESP / RFC4309 operation
  1183. *
  1184. */
  1185. void spu2_ccm_update_iv(unsigned int digestsize,
  1186. struct spu_cipher_parms *cipher_parms,
  1187. unsigned int assoclen, unsigned int chunksize,
  1188. bool is_encrypt, bool is_esp)
  1189. {
  1190. int L; /* size of length field, in bytes */
  1191. /*
  1192. * In RFC4309 mode, L is fixed at 4 bytes; otherwise, IV from
  1193. * testmgr contains (L-1) in bottom 3 bits of first byte,
  1194. * per RFC 3610.
  1195. */
  1196. if (is_esp)
  1197. L = CCM_ESP_L_VALUE;
  1198. else
  1199. L = ((cipher_parms->iv_buf[0] & CCM_B0_L_PRIME) >>
  1200. CCM_B0_L_PRIME_SHIFT) + 1;
  1201. /* SPU2 doesn't want these length bytes nor the first byte... */
  1202. cipher_parms->iv_len -= (1 + L);
  1203. memmove(cipher_parms->iv_buf, &cipher_parms->iv_buf[1],
  1204. cipher_parms->iv_len);
  1205. }
  1206. /**
  1207. * spu2_wordalign_padlen() - SPU2 does not require padding.
  1208. * @data_size: length of data field in bytes
  1209. *
  1210. * Return: length of status field padding, in bytes (always 0 on SPU2)
  1211. */
  1212. u32 spu2_wordalign_padlen(u32 data_size)
  1213. {
  1214. return 0;
  1215. }