qcedev.c 71 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * QTI CE device driver.
  4. *
  5. * Copyright (c) 2010-2021, The Linux Foundation. All rights reserved.
  6. */
  7. #include <linux/mman.h>
  8. #include <linux/module.h>
  9. #include <linux/device.h>
  10. #include <linux/types.h>
  11. #include <linux/platform_device.h>
  12. #include <linux/dma-mapping.h>
  13. #include <linux/kernel.h>
  14. #include <linux/dmapool.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/spinlock.h>
  17. #include <linux/init.h>
  18. #include <linux/module.h>
  19. #include <linux/fs.h>
  20. #include <linux/uaccess.h>
  21. #include <linux/debugfs.h>
  22. #include <linux/scatterlist.h>
  23. #include <linux/crypto.h>
  24. #include "linux/platform_data/qcom_crypto_device.h"
  25. #include "linux/qcedev.h"
  26. #include <linux/interconnect.h>
  27. #include <crypto/hash.h>
  28. #include "qcedevi.h"
  29. #include "qce.h"
  30. #include "qcedev_smmu.h"
  31. #include "compat_qcedev.h"
  32. #include <linux/compat.h>
  33. #define CACHE_LINE_SIZE 64
  34. #define CE_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
  35. #define MAX_CEHW_REQ_TRANSFER_SIZE (128*32*1024)
  36. /* Max wait time once a crypt o request is done */
  37. #define MAX_CRYPTO_WAIT_TIME 1500
  38. static uint8_t _std_init_vector_sha1_uint8[] = {
  39. 0x67, 0x45, 0x23, 0x01, 0xEF, 0xCD, 0xAB, 0x89,
  40. 0x98, 0xBA, 0xDC, 0xFE, 0x10, 0x32, 0x54, 0x76,
  41. 0xC3, 0xD2, 0xE1, 0xF0
  42. };
  43. /* standard initialization vector for SHA-256, source: FIPS 180-2 */
  44. static uint8_t _std_init_vector_sha256_uint8[] = {
  45. 0x6A, 0x09, 0xE6, 0x67, 0xBB, 0x67, 0xAE, 0x85,
  46. 0x3C, 0x6E, 0xF3, 0x72, 0xA5, 0x4F, 0xF5, 0x3A,
  47. 0x51, 0x0E, 0x52, 0x7F, 0x9B, 0x05, 0x68, 0x8C,
  48. 0x1F, 0x83, 0xD9, 0xAB, 0x5B, 0xE0, 0xCD, 0x19
  49. };
  50. #define QCEDEV_CTX_KEY_MASK 0x000000ff
  51. #define QCEDEV_CTX_USE_HW_KEY 0x00000001
  52. #define QCEDEV_CTX_USE_PIPE_KEY 0x00000002
  53. // Key timer expiry for pipes 1-15 (Status3)
  54. #define PIPE_KEY_TIMER_EXPIRED_STATUS3_MASK 0x000000FF
  55. // Key timer expiry for pipes 16-19 (Status6)
  56. #define PIPE_KEY_TIMER_EXPIRED_STATUS6_MASK 0x00000003
  57. // Key pause for pipes 1-15 (Status3)
  58. #define PIPE_KEY_PAUSE_STATUS3_MASK 0xFF0000
  59. // Key pause for pipes 16-19 (Status6)
  60. #define PIPE_KEY_PAUSE_STATUS6_MASK 0x30000
  61. #define QCEDEV_STATUS1_ERR_INTR_MASK 0x10
  62. static DEFINE_MUTEX(send_cmd_lock);
  63. static DEFINE_MUTEX(qcedev_sent_bw_req);
  64. static DEFINE_MUTEX(hash_access_lock);
  65. static dev_t qcedev_device_no;
  66. static struct class *driver_class;
  67. static struct device *class_dev;
  68. static const struct of_device_id qcedev_match[] = {
  69. { .compatible = "qcom,qcedev"},
  70. { .compatible = "qcom,qcedev,context-bank"},
  71. {}
  72. };
  73. MODULE_DEVICE_TABLE(of, qcedev_match);
  74. static int qcedev_control_clocks(struct qcedev_control *podev, bool enable)
  75. {
  76. unsigned int control_flag;
  77. int ret = 0;
  78. if (podev->ce_support.req_bw_before_clk) {
  79. if (enable)
  80. control_flag = QCE_BW_REQUEST_FIRST;
  81. else
  82. control_flag = QCE_CLK_DISABLE_FIRST;
  83. } else {
  84. if (enable)
  85. control_flag = QCE_CLK_ENABLE_FIRST;
  86. else
  87. control_flag = QCE_BW_REQUEST_RESET_FIRST;
  88. }
  89. switch (control_flag) {
  90. case QCE_CLK_ENABLE_FIRST:
  91. ret = qce_enable_clk(podev->qce);
  92. if (ret) {
  93. pr_err("%s Unable enable clk\n", __func__);
  94. return ret;
  95. }
  96. ret = icc_set_bw(podev->icc_path,
  97. CRYPTO_AVG_BW, CRYPTO_PEAK_BW);
  98. if (ret) {
  99. pr_err("%s Unable to set high bw\n", __func__);
  100. ret = qce_disable_clk(podev->qce);
  101. if (ret)
  102. pr_err("%s Unable disable clk\n", __func__);
  103. return ret;
  104. }
  105. break;
  106. case QCE_BW_REQUEST_FIRST:
  107. ret = icc_set_bw(podev->icc_path,
  108. CRYPTO_AVG_BW, CRYPTO_PEAK_BW);
  109. if (ret) {
  110. pr_err("%s Unable to set high bw\n", __func__);
  111. return ret;
  112. }
  113. ret = qce_enable_clk(podev->qce);
  114. if (ret) {
  115. pr_err("%s Unable enable clk\n", __func__);
  116. ret = icc_set_bw(podev->icc_path, 0, 0);
  117. if (ret)
  118. pr_err("%s Unable to set low bw\n", __func__);
  119. return ret;
  120. }
  121. break;
  122. case QCE_CLK_DISABLE_FIRST:
  123. ret = qce_disable_clk(podev->qce);
  124. if (ret) {
  125. pr_err("%s Unable to disable clk\n", __func__);
  126. return ret;
  127. }
  128. ret = icc_set_bw(podev->icc_path, 0, 0);
  129. if (ret) {
  130. pr_err("%s Unable to set low bw\n", __func__);
  131. ret = qce_enable_clk(podev->qce);
  132. if (ret)
  133. pr_err("%s Unable enable clk\n", __func__);
  134. return ret;
  135. }
  136. break;
  137. case QCE_BW_REQUEST_RESET_FIRST:
  138. ret = icc_set_bw(podev->icc_path, 0, 0);
  139. if (ret) {
  140. pr_err("%s Unable to set low bw\n", __func__);
  141. return ret;
  142. }
  143. ret = qce_disable_clk(podev->qce);
  144. if (ret) {
  145. pr_err("%s Unable to disable clk\n", __func__);
  146. ret = icc_set_bw(podev->icc_path,
  147. CRYPTO_AVG_BW, CRYPTO_PEAK_BW);
  148. if (ret)
  149. pr_err("%s Unable to set high bw\n", __func__);
  150. return ret;
  151. }
  152. break;
  153. default:
  154. return -ENOENT;
  155. }
  156. return 0;
  157. }
  158. static void qcedev_ce_high_bw_req(struct qcedev_control *podev,
  159. bool high_bw_req)
  160. {
  161. int ret = 0;
  162. mutex_lock(&qcedev_sent_bw_req);
  163. if (high_bw_req) {
  164. if (podev->high_bw_req_count == 0) {
  165. ret = qcedev_control_clocks(podev, true);
  166. if (ret)
  167. goto exit_unlock_mutex;
  168. }
  169. podev->high_bw_req_count++;
  170. } else {
  171. if (podev->high_bw_req_count == 1) {
  172. ret = qcedev_control_clocks(podev, false);
  173. if (ret)
  174. goto exit_unlock_mutex;
  175. }
  176. podev->high_bw_req_count--;
  177. }
  178. exit_unlock_mutex:
  179. mutex_unlock(&qcedev_sent_bw_req);
  180. }
  181. #define QCEDEV_MAGIC 0x56434544 /* "qced" */
  182. static int qcedev_open(struct inode *inode, struct file *file);
  183. static int qcedev_release(struct inode *inode, struct file *file);
  184. static int start_cipher_req(struct qcedev_control *podev,
  185. int *current_req_info);
  186. static int start_offload_cipher_req(struct qcedev_control *podev,
  187. int *current_req_info);
  188. static int start_sha_req(struct qcedev_control *podev,
  189. int *current_req_info);
  190. static const struct file_operations qcedev_fops = {
  191. .owner = THIS_MODULE,
  192. .unlocked_ioctl = qcedev_ioctl,
  193. #ifdef CONFIG_COMPAT
  194. .compat_ioctl = compat_qcedev_ioctl,
  195. #endif
  196. .open = qcedev_open,
  197. .release = qcedev_release,
  198. };
  199. static struct qcedev_control qce_dev[] = {
  200. {
  201. .magic = QCEDEV_MAGIC,
  202. },
  203. };
  204. #define MAX_QCE_DEVICE ARRAY_SIZE(qce_dev)
  205. #define DEBUG_MAX_FNAME 16
  206. #define DEBUG_MAX_RW_BUF 1024
  207. struct qcedev_stat {
  208. u32 qcedev_dec_success;
  209. u32 qcedev_dec_fail;
  210. u32 qcedev_enc_success;
  211. u32 qcedev_enc_fail;
  212. u32 qcedev_sha_success;
  213. u32 qcedev_sha_fail;
  214. };
  215. static struct qcedev_stat _qcedev_stat;
  216. static struct dentry *_debug_dent;
  217. static char _debug_read_buf[DEBUG_MAX_RW_BUF];
  218. static int _debug_qcedev;
  219. static struct qcedev_control *qcedev_minor_to_control(unsigned int n)
  220. {
  221. int i;
  222. for (i = 0; i < MAX_QCE_DEVICE; i++) {
  223. if (qce_dev[i].minor == n)
  224. return &qce_dev[n];
  225. }
  226. return NULL;
  227. }
  228. static int qcedev_open(struct inode *inode, struct file *file)
  229. {
  230. struct qcedev_handle *handle;
  231. struct qcedev_control *podev;
  232. podev = qcedev_minor_to_control(MINOR(inode->i_rdev));
  233. if (podev == NULL) {
  234. pr_err("%s: no such device %d\n", __func__,
  235. MINOR(inode->i_rdev));
  236. return -ENOENT;
  237. }
  238. handle = kzalloc(sizeof(struct qcedev_handle), GFP_KERNEL);
  239. if (handle == NULL)
  240. return -ENOMEM;
  241. handle->cntl = podev;
  242. file->private_data = handle;
  243. mutex_init(&handle->registeredbufs.lock);
  244. INIT_LIST_HEAD(&handle->registeredbufs.list);
  245. return 0;
  246. }
  247. static int qcedev_release(struct inode *inode, struct file *file)
  248. {
  249. struct qcedev_control *podev;
  250. struct qcedev_handle *handle;
  251. handle = file->private_data;
  252. podev = handle->cntl;
  253. if (podev != NULL && podev->magic != QCEDEV_MAGIC) {
  254. pr_err("%s: invalid handle %pK\n",
  255. __func__, podev);
  256. }
  257. if (qcedev_unmap_all_buffers(handle))
  258. pr_err("%s: failed to unmap all ion buffers\n", __func__);
  259. kfree_sensitive(handle);
  260. file->private_data = NULL;
  261. return 0;
  262. }
  263. static void req_done(unsigned long data)
  264. {
  265. struct qcedev_control *podev = (struct qcedev_control *)data;
  266. struct qcedev_async_req *areq;
  267. unsigned long flags = 0;
  268. struct qcedev_async_req *new_req = NULL;
  269. int ret = 0;
  270. int current_req_info = 0;
  271. spin_lock_irqsave(&podev->lock, flags);
  272. areq = podev->active_command;
  273. podev->active_command = NULL;
  274. again:
  275. if (!list_empty(&podev->ready_commands)) {
  276. new_req = container_of(podev->ready_commands.next,
  277. struct qcedev_async_req, list);
  278. list_del(&new_req->list);
  279. podev->active_command = new_req;
  280. new_req->err = 0;
  281. if (new_req->op_type == QCEDEV_CRYPTO_OPER_CIPHER)
  282. ret = start_cipher_req(podev, &current_req_info);
  283. else if (new_req->op_type == QCEDEV_CRYPTO_OPER_OFFLOAD_CIPHER)
  284. ret = start_offload_cipher_req(podev, &current_req_info);
  285. else
  286. ret = start_sha_req(podev, &current_req_info);
  287. }
  288. spin_unlock_irqrestore(&podev->lock, flags);
  289. if (areq)
  290. complete(&areq->complete);
  291. if (new_req && ret) {
  292. complete(&new_req->complete);
  293. spin_lock_irqsave(&podev->lock, flags);
  294. podev->active_command = NULL;
  295. areq = NULL;
  296. ret = 0;
  297. new_req = NULL;
  298. goto again;
  299. }
  300. }
  301. void qcedev_sha_req_cb(void *cookie, unsigned char *digest,
  302. unsigned char *authdata, int ret)
  303. {
  304. struct qcedev_sha_req *areq;
  305. struct qcedev_control *pdev;
  306. struct qcedev_handle *handle;
  307. uint32_t *auth32 = (uint32_t *)authdata;
  308. areq = (struct qcedev_sha_req *) cookie;
  309. handle = (struct qcedev_handle *) areq->cookie;
  310. pdev = handle->cntl;
  311. if (digest)
  312. memcpy(&handle->sha_ctxt.digest[0], digest, 32);
  313. if (authdata) {
  314. handle->sha_ctxt.auth_data[0] = auth32[0];
  315. handle->sha_ctxt.auth_data[1] = auth32[1];
  316. }
  317. tasklet_schedule(&pdev->done_tasklet);
  318. };
  319. void qcedev_cipher_req_cb(void *cookie, unsigned char *icv,
  320. unsigned char *iv, int ret)
  321. {
  322. struct qcedev_cipher_req *areq;
  323. struct qcedev_handle *handle;
  324. struct qcedev_control *podev;
  325. struct qcedev_async_req *qcedev_areq;
  326. areq = (struct qcedev_cipher_req *) cookie;
  327. handle = (struct qcedev_handle *) areq->cookie;
  328. podev = handle->cntl;
  329. qcedev_areq = podev->active_command;
  330. if (iv)
  331. memcpy(&qcedev_areq->cipher_op_req.iv[0], iv,
  332. qcedev_areq->cipher_op_req.ivlen);
  333. tasklet_schedule(&podev->done_tasklet);
  334. };
  335. static int start_cipher_req(struct qcedev_control *podev,
  336. int *current_req_info)
  337. {
  338. struct qcedev_async_req *qcedev_areq;
  339. struct qce_req creq;
  340. int ret = 0;
  341. /* start the command on the podev->active_command */
  342. qcedev_areq = podev->active_command;
  343. qcedev_areq->cipher_req.cookie = qcedev_areq->handle;
  344. if (qcedev_areq->cipher_op_req.use_pmem == QCEDEV_USE_PMEM) {
  345. pr_err("%s: Use of PMEM is not supported\n", __func__);
  346. goto unsupported;
  347. }
  348. creq.pmem = NULL;
  349. switch (qcedev_areq->cipher_op_req.alg) {
  350. case QCEDEV_ALG_DES:
  351. creq.alg = CIPHER_ALG_DES;
  352. break;
  353. case QCEDEV_ALG_3DES:
  354. creq.alg = CIPHER_ALG_3DES;
  355. break;
  356. case QCEDEV_ALG_AES:
  357. creq.alg = CIPHER_ALG_AES;
  358. break;
  359. default:
  360. return -EINVAL;
  361. }
  362. switch (qcedev_areq->cipher_op_req.mode) {
  363. case QCEDEV_AES_MODE_CBC:
  364. case QCEDEV_DES_MODE_CBC:
  365. creq.mode = QCE_MODE_CBC;
  366. break;
  367. case QCEDEV_AES_MODE_ECB:
  368. case QCEDEV_DES_MODE_ECB:
  369. creq.mode = QCE_MODE_ECB;
  370. break;
  371. case QCEDEV_AES_MODE_CTR:
  372. creq.mode = QCE_MODE_CTR;
  373. break;
  374. case QCEDEV_AES_MODE_XTS:
  375. creq.mode = QCE_MODE_XTS;
  376. break;
  377. default:
  378. return -EINVAL;
  379. }
  380. if ((creq.alg == CIPHER_ALG_AES) &&
  381. (creq.mode == QCE_MODE_CTR)) {
  382. creq.dir = QCE_ENCRYPT;
  383. } else {
  384. if (qcedev_areq->cipher_op_req.op == QCEDEV_OPER_ENC)
  385. creq.dir = QCE_ENCRYPT;
  386. else
  387. creq.dir = QCE_DECRYPT;
  388. }
  389. creq.iv = &qcedev_areq->cipher_op_req.iv[0];
  390. creq.ivsize = qcedev_areq->cipher_op_req.ivlen;
  391. creq.enckey = &qcedev_areq->cipher_op_req.enckey[0];
  392. creq.encklen = qcedev_areq->cipher_op_req.encklen;
  393. creq.cryptlen = qcedev_areq->cipher_op_req.data_len;
  394. if (qcedev_areq->cipher_op_req.encklen == 0) {
  395. if ((qcedev_areq->cipher_op_req.op == QCEDEV_OPER_ENC_NO_KEY)
  396. || (qcedev_areq->cipher_op_req.op ==
  397. QCEDEV_OPER_DEC_NO_KEY))
  398. creq.op = QCE_REQ_ABLK_CIPHER_NO_KEY;
  399. else {
  400. int i;
  401. for (i = 0; i < QCEDEV_MAX_KEY_SIZE; i++) {
  402. if (qcedev_areq->cipher_op_req.enckey[i] != 0)
  403. break;
  404. }
  405. if ((podev->platform_support.hw_key_support == 1) &&
  406. (i == QCEDEV_MAX_KEY_SIZE))
  407. creq.op = QCE_REQ_ABLK_CIPHER;
  408. else {
  409. ret = -EINVAL;
  410. goto unsupported;
  411. }
  412. }
  413. } else {
  414. creq.op = QCE_REQ_ABLK_CIPHER;
  415. }
  416. creq.qce_cb = qcedev_cipher_req_cb;
  417. creq.areq = (void *)&qcedev_areq->cipher_req;
  418. creq.flags = 0;
  419. creq.offload_op = 0;
  420. ret = qce_ablk_cipher_req(podev->qce, &creq);
  421. *current_req_info = creq.current_req_info;
  422. unsupported:
  423. qcedev_areq->err = ret ? -ENXIO : 0;
  424. return ret;
  425. };
  426. void qcedev_offload_cipher_req_cb(void *cookie, unsigned char *icv,
  427. unsigned char *iv, int ret)
  428. {
  429. struct qcedev_cipher_req *areq;
  430. struct qcedev_handle *handle;
  431. struct qcedev_control *podev;
  432. struct qcedev_async_req *qcedev_areq;
  433. areq = (struct qcedev_cipher_req *) cookie;
  434. handle = (struct qcedev_handle *) areq->cookie;
  435. podev = handle->cntl;
  436. qcedev_areq = podev->active_command;
  437. if (iv)
  438. memcpy(&qcedev_areq->offload_cipher_op_req.iv[0], iv,
  439. qcedev_areq->offload_cipher_op_req.ivlen);
  440. tasklet_schedule(&podev->done_tasklet);
  441. }
  442. static int start_offload_cipher_req(struct qcedev_control *podev,
  443. int *current_req_info)
  444. {
  445. struct qcedev_async_req *qcedev_areq;
  446. struct qce_req creq;
  447. u8 patt_sz = 0, proc_data_sz = 0;
  448. int ret = 0;
  449. /* Start the command on the podev->active_command */
  450. qcedev_areq = podev->active_command;
  451. qcedev_areq->cipher_req.cookie = qcedev_areq->handle;
  452. switch (qcedev_areq->offload_cipher_op_req.alg) {
  453. case QCEDEV_ALG_AES:
  454. creq.alg = CIPHER_ALG_AES;
  455. break;
  456. default:
  457. return -EINVAL;
  458. }
  459. switch (qcedev_areq->offload_cipher_op_req.mode) {
  460. case QCEDEV_AES_MODE_CBC:
  461. creq.mode = QCE_MODE_CBC;
  462. break;
  463. case QCEDEV_AES_MODE_CTR:
  464. creq.mode = QCE_MODE_CTR;
  465. break;
  466. default:
  467. return -EINVAL;
  468. }
  469. if (qcedev_areq->offload_cipher_op_req.is_copy_op) {
  470. creq.dir = QCE_ENCRYPT;
  471. } else {
  472. switch(qcedev_areq->offload_cipher_op_req.op) {
  473. case QCEDEV_OFFLOAD_HLOS_HLOS:
  474. case QCEDEV_OFFLOAD_HLOS_CPB:
  475. creq.dir = QCE_DECRYPT;
  476. break;
  477. case QCEDEV_OFFLOAD_CPB_HLOS:
  478. creq.dir = QCE_ENCRYPT;
  479. break;
  480. default:
  481. return -EINVAL;
  482. }
  483. }
  484. creq.iv = &qcedev_areq->offload_cipher_op_req.iv[0];
  485. creq.ivsize = qcedev_areq->offload_cipher_op_req.ivlen;
  486. creq.iv_ctr_size = qcedev_areq->offload_cipher_op_req.iv_ctr_size;
  487. creq.encklen = qcedev_areq->offload_cipher_op_req.encklen;
  488. /* OFFLOAD use cases use PIPE keys so no need to set keys */
  489. creq.flags = QCEDEV_CTX_USE_PIPE_KEY;
  490. creq.op = QCE_REQ_ABLK_CIPHER_NO_KEY;
  491. creq.offload_op = (int)qcedev_areq->offload_cipher_op_req.op;
  492. if (qcedev_areq->offload_cipher_op_req.is_copy_op)
  493. creq.is_copy_op = true;
  494. creq.cryptlen = qcedev_areq->offload_cipher_op_req.data_len;
  495. creq.qce_cb = qcedev_offload_cipher_req_cb;
  496. creq.areq = (void *)&qcedev_areq->cipher_req;
  497. patt_sz = qcedev_areq->offload_cipher_op_req.pattern_info.patt_sz;
  498. proc_data_sz =
  499. qcedev_areq->offload_cipher_op_req.pattern_info.proc_data_sz;
  500. creq.is_pattern_valid =
  501. qcedev_areq->offload_cipher_op_req.is_pattern_valid;
  502. if (creq.is_pattern_valid) {
  503. creq.pattern_info = 0x1;
  504. if (patt_sz)
  505. creq.pattern_info |= (patt_sz - 1) << 4;
  506. if (proc_data_sz)
  507. creq.pattern_info |= (proc_data_sz - 1) << 8;
  508. creq.pattern_info |=
  509. qcedev_areq->offload_cipher_op_req.pattern_info.patt_offset << 12;
  510. }
  511. creq.block_offset = qcedev_areq->offload_cipher_op_req.block_offset;
  512. ret = qce_ablk_cipher_req(podev->qce, &creq);
  513. *current_req_info = creq.current_req_info;
  514. qcedev_areq->err = ret ? -ENXIO : 0;
  515. return ret;
  516. }
  517. static int start_sha_req(struct qcedev_control *podev,
  518. int *current_req_info)
  519. {
  520. struct qcedev_async_req *qcedev_areq;
  521. struct qce_sha_req sreq;
  522. int ret = 0;
  523. struct qcedev_handle *handle;
  524. /* start the command on the podev->active_command */
  525. qcedev_areq = podev->active_command;
  526. handle = qcedev_areq->handle;
  527. switch (qcedev_areq->sha_op_req.alg) {
  528. case QCEDEV_ALG_SHA1:
  529. sreq.alg = QCE_HASH_SHA1;
  530. break;
  531. case QCEDEV_ALG_SHA256:
  532. sreq.alg = QCE_HASH_SHA256;
  533. break;
  534. case QCEDEV_ALG_SHA1_HMAC:
  535. if (podev->ce_support.sha_hmac) {
  536. sreq.alg = QCE_HASH_SHA1_HMAC;
  537. sreq.authkey = &handle->sha_ctxt.authkey[0];
  538. sreq.authklen = QCEDEV_MAX_SHA_BLOCK_SIZE;
  539. } else {
  540. sreq.alg = QCE_HASH_SHA1;
  541. sreq.authkey = NULL;
  542. }
  543. break;
  544. case QCEDEV_ALG_SHA256_HMAC:
  545. if (podev->ce_support.sha_hmac) {
  546. sreq.alg = QCE_HASH_SHA256_HMAC;
  547. sreq.authkey = &handle->sha_ctxt.authkey[0];
  548. sreq.authklen = QCEDEV_MAX_SHA_BLOCK_SIZE;
  549. } else {
  550. sreq.alg = QCE_HASH_SHA256;
  551. sreq.authkey = NULL;
  552. }
  553. break;
  554. case QCEDEV_ALG_AES_CMAC:
  555. sreq.alg = QCE_HASH_AES_CMAC;
  556. sreq.authkey = &handle->sha_ctxt.authkey[0];
  557. sreq.authklen = qcedev_areq->sha_op_req.authklen;
  558. break;
  559. default:
  560. pr_err("Algorithm %d not supported, exiting\n",
  561. qcedev_areq->sha_op_req.alg);
  562. return -EINVAL;
  563. }
  564. qcedev_areq->sha_req.cookie = handle;
  565. sreq.qce_cb = qcedev_sha_req_cb;
  566. if (qcedev_areq->sha_op_req.alg != QCEDEV_ALG_AES_CMAC) {
  567. sreq.auth_data[0] = handle->sha_ctxt.auth_data[0];
  568. sreq.auth_data[1] = handle->sha_ctxt.auth_data[1];
  569. sreq.auth_data[2] = handle->sha_ctxt.auth_data[2];
  570. sreq.auth_data[3] = handle->sha_ctxt.auth_data[3];
  571. sreq.digest = &handle->sha_ctxt.digest[0];
  572. sreq.first_blk = handle->sha_ctxt.first_blk;
  573. sreq.last_blk = handle->sha_ctxt.last_blk;
  574. }
  575. sreq.size = qcedev_areq->sha_req.sreq.nbytes;
  576. sreq.src = qcedev_areq->sha_req.sreq.src;
  577. sreq.areq = (void *)&qcedev_areq->sha_req;
  578. sreq.flags = 0;
  579. ret = qce_process_sha_req(podev->qce, &sreq);
  580. *current_req_info = sreq.current_req_info;
  581. qcedev_areq->err = ret ? -ENXIO : 0;
  582. return ret;
  583. };
  584. static void qcedev_check_crypto_status(
  585. struct qcedev_async_req *qcedev_areq, void *handle,
  586. bool print_err)
  587. {
  588. unsigned int s1, s2, s3, s4, s5, s6;
  589. qcedev_areq->offload_cipher_op_req.err = QCEDEV_OFFLOAD_NO_ERROR;
  590. qce_get_crypto_status(handle, &s1, &s2, &s3, &s4, &s5, &s6);
  591. if (print_err) {
  592. pr_err("%s: sts = 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", __func__,
  593. s1, s2, s3, s4, s5, s6);
  594. }
  595. // Check for key timer expiry
  596. if ((s6 & PIPE_KEY_TIMER_EXPIRED_STATUS6_MASK) ||
  597. (s3 & PIPE_KEY_TIMER_EXPIRED_STATUS3_MASK)) {
  598. pr_info("%s: crypto timer expired\n", __func__);
  599. pr_info("%s: sts = 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", __func__,
  600. s1, s2, s3, s4, s5, s6);
  601. qcedev_areq->offload_cipher_op_req.err =
  602. QCEDEV_OFFLOAD_KEY_TIMER_EXPIRED_ERROR;
  603. return;
  604. }
  605. // Check for key pause
  606. if ((s6 & PIPE_KEY_PAUSE_STATUS6_MASK) ||
  607. (s3 & PIPE_KEY_PAUSE_STATUS3_MASK)) {
  608. pr_info("%s: crypto key paused\n", __func__);
  609. pr_info("%s: sts = 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", __func__,
  610. s1, s2, s3, s4, s5, s6);
  611. qcedev_areq->offload_cipher_op_req.err =
  612. QCEDEV_OFFLOAD_KEY_PAUSE_ERROR;
  613. return;
  614. }
  615. // Check for generic error
  616. if (s1 & QCEDEV_STATUS1_ERR_INTR_MASK) {
  617. pr_err("%s: generic crypto error\n", __func__);
  618. pr_info("%s: sts = 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", __func__,
  619. s1, s2, s3, s4, s5, s6);
  620. qcedev_areq->offload_cipher_op_req.err =
  621. QCEDEV_OFFLOAD_GENERIC_ERROR;
  622. return;
  623. }
  624. }
  625. static int submit_req(struct qcedev_async_req *qcedev_areq,
  626. struct qcedev_handle *handle)
  627. {
  628. struct qcedev_control *podev;
  629. unsigned long flags = 0;
  630. int ret = 0;
  631. struct qcedev_stat *pstat;
  632. int current_req_info = 0;
  633. int wait = 0;
  634. bool print_sts = false;
  635. qcedev_areq->err = 0;
  636. podev = handle->cntl;
  637. qcedev_check_crypto_status(qcedev_areq, podev->qce, print_sts);
  638. if (qcedev_areq->offload_cipher_op_req.err != QCEDEV_OFFLOAD_NO_ERROR)
  639. return 0;
  640. spin_lock_irqsave(&podev->lock, flags);
  641. if (podev->active_command == NULL) {
  642. podev->active_command = qcedev_areq;
  643. if (qcedev_areq->op_type == QCEDEV_CRYPTO_OPER_CIPHER)
  644. ret = start_cipher_req(podev, &current_req_info);
  645. else if (qcedev_areq->op_type == QCEDEV_CRYPTO_OPER_OFFLOAD_CIPHER)
  646. ret = start_offload_cipher_req(podev, &current_req_info);
  647. else
  648. ret = start_sha_req(podev, &current_req_info);
  649. } else {
  650. list_add_tail(&qcedev_areq->list, &podev->ready_commands);
  651. }
  652. if (ret != 0)
  653. podev->active_command = NULL;
  654. spin_unlock_irqrestore(&podev->lock, flags);
  655. if (ret == 0)
  656. wait = wait_for_completion_timeout(&qcedev_areq->complete,
  657. msecs_to_jiffies(MAX_CRYPTO_WAIT_TIME));
  658. if (!wait) {
  659. /*
  660. * This means wait timed out, and the callback routine was not
  661. * exercised. The callback sequence does some housekeeping which
  662. * would be missed here, hence having a call to qce here to do
  663. * that.
  664. */
  665. pr_err("%s: wait timed out, req info = %d\n", __func__,
  666. current_req_info);
  667. print_sts = true;
  668. qcedev_check_crypto_status(qcedev_areq, podev->qce, print_sts);
  669. qce_manage_timeout(podev->qce, current_req_info);
  670. if (qcedev_areq->offload_cipher_op_req.err !=
  671. QCEDEV_OFFLOAD_NO_ERROR)
  672. return 0;
  673. }
  674. if (ret)
  675. qcedev_areq->err = -EIO;
  676. qcedev_check_crypto_status(qcedev_areq, podev->qce, print_sts);
  677. if (qcedev_areq->offload_cipher_op_req.err != QCEDEV_OFFLOAD_NO_ERROR)
  678. return 0;
  679. pstat = &_qcedev_stat;
  680. if (qcedev_areq->op_type == QCEDEV_CRYPTO_OPER_CIPHER) {
  681. switch (qcedev_areq->cipher_op_req.op) {
  682. case QCEDEV_OPER_DEC:
  683. if (qcedev_areq->err)
  684. pstat->qcedev_dec_fail++;
  685. else
  686. pstat->qcedev_dec_success++;
  687. break;
  688. case QCEDEV_OPER_ENC:
  689. if (qcedev_areq->err)
  690. pstat->qcedev_enc_fail++;
  691. else
  692. pstat->qcedev_enc_success++;
  693. break;
  694. default:
  695. break;
  696. }
  697. } else if (qcedev_areq->op_type == QCEDEV_CRYPTO_OPER_OFFLOAD_CIPHER) {
  698. //Do nothing
  699. } else {
  700. if (qcedev_areq->err)
  701. pstat->qcedev_sha_fail++;
  702. else
  703. pstat->qcedev_sha_success++;
  704. }
  705. return qcedev_areq->err;
  706. }
  707. static int qcedev_sha_init(struct qcedev_async_req *areq,
  708. struct qcedev_handle *handle)
  709. {
  710. struct qcedev_sha_ctxt *sha_ctxt = &handle->sha_ctxt;
  711. memset(sha_ctxt, 0, sizeof(struct qcedev_sha_ctxt));
  712. sha_ctxt->first_blk = 1;
  713. if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA1) ||
  714. (areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC)) {
  715. memcpy(&sha_ctxt->digest[0],
  716. &_std_init_vector_sha1_uint8[0], SHA1_DIGEST_SIZE);
  717. sha_ctxt->diglen = SHA1_DIGEST_SIZE;
  718. } else {
  719. if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA256) ||
  720. (areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC)) {
  721. memcpy(&sha_ctxt->digest[0],
  722. &_std_init_vector_sha256_uint8[0],
  723. SHA256_DIGEST_SIZE);
  724. sha_ctxt->diglen = SHA256_DIGEST_SIZE;
  725. }
  726. }
  727. sha_ctxt->init_done = true;
  728. return 0;
  729. }
  730. static int qcedev_sha_update_max_xfer(struct qcedev_async_req *qcedev_areq,
  731. struct qcedev_handle *handle,
  732. struct scatterlist *sg_src)
  733. {
  734. int err = 0;
  735. int i = 0;
  736. uint32_t total;
  737. uint8_t *user_src = NULL;
  738. uint8_t *k_src = NULL;
  739. uint8_t *k_buf_src = NULL;
  740. uint8_t *k_align_src = NULL;
  741. uint32_t sha_pad_len = 0;
  742. uint32_t trailing_buf_len = 0;
  743. uint32_t t_buf = handle->sha_ctxt.trailing_buf_len;
  744. uint32_t sha_block_size;
  745. total = qcedev_areq->sha_op_req.data_len + t_buf;
  746. if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1)
  747. sha_block_size = SHA1_BLOCK_SIZE;
  748. else
  749. sha_block_size = SHA256_BLOCK_SIZE;
  750. if (total <= sha_block_size) {
  751. uint32_t len = qcedev_areq->sha_op_req.data_len;
  752. i = 0;
  753. k_src = &handle->sha_ctxt.trailing_buf[t_buf];
  754. /* Copy data from user src(s) */
  755. while (len > 0) {
  756. user_src = qcedev_areq->sha_op_req.data[i].vaddr;
  757. if (user_src && copy_from_user(k_src,
  758. (void __user *)user_src,
  759. qcedev_areq->sha_op_req.data[i].len))
  760. return -EFAULT;
  761. len -= qcedev_areq->sha_op_req.data[i].len;
  762. k_src += qcedev_areq->sha_op_req.data[i].len;
  763. i++;
  764. }
  765. handle->sha_ctxt.trailing_buf_len = total;
  766. return 0;
  767. }
  768. k_buf_src = kmalloc(total + CACHE_LINE_SIZE * 2,
  769. GFP_KERNEL);
  770. if (k_buf_src == NULL)
  771. return -ENOMEM;
  772. k_align_src = (uint8_t *)ALIGN(((uintptr_t)k_buf_src),
  773. CACHE_LINE_SIZE);
  774. k_src = k_align_src;
  775. /* check for trailing buffer from previous updates and append it */
  776. if (t_buf > 0) {
  777. memcpy(k_src, &handle->sha_ctxt.trailing_buf[0],
  778. t_buf);
  779. k_src += t_buf;
  780. }
  781. /* Copy data from user src(s) */
  782. user_src = qcedev_areq->sha_op_req.data[0].vaddr;
  783. if (user_src && copy_from_user(k_src,
  784. (void __user *)user_src,
  785. qcedev_areq->sha_op_req.data[0].len)) {
  786. memset(k_buf_src, 0, ksize((void *)k_buf_src));
  787. kfree(k_buf_src);
  788. return -EFAULT;
  789. }
  790. k_src += qcedev_areq->sha_op_req.data[0].len;
  791. for (i = 1; i < qcedev_areq->sha_op_req.entries; i++) {
  792. user_src = qcedev_areq->sha_op_req.data[i].vaddr;
  793. if (user_src && copy_from_user(k_src,
  794. (void __user *)user_src,
  795. qcedev_areq->sha_op_req.data[i].len)) {
  796. memset(k_buf_src, 0, ksize((void *)k_buf_src));
  797. kfree(k_buf_src);
  798. return -EFAULT;
  799. }
  800. k_src += qcedev_areq->sha_op_req.data[i].len;
  801. }
  802. /* get new trailing buffer */
  803. sha_pad_len = ALIGN(total, CE_SHA_BLOCK_SIZE) - total;
  804. trailing_buf_len = CE_SHA_BLOCK_SIZE - sha_pad_len;
  805. qcedev_areq->sha_req.sreq.src = sg_src;
  806. sg_init_one(qcedev_areq->sha_req.sreq.src, k_align_src,
  807. total-trailing_buf_len);
  808. qcedev_areq->sha_req.sreq.nbytes = total - trailing_buf_len;
  809. /* update sha_ctxt trailing buf content to new trailing buf */
  810. if (trailing_buf_len > 0) {
  811. memset(&handle->sha_ctxt.trailing_buf[0], 0, 64);
  812. memcpy(&handle->sha_ctxt.trailing_buf[0],
  813. (k_src - trailing_buf_len),
  814. trailing_buf_len);
  815. }
  816. handle->sha_ctxt.trailing_buf_len = trailing_buf_len;
  817. err = submit_req(qcedev_areq, handle);
  818. handle->sha_ctxt.last_blk = 0;
  819. handle->sha_ctxt.first_blk = 0;
  820. memset(k_buf_src, 0, ksize((void *)k_buf_src));
  821. kfree(k_buf_src);
  822. return err;
  823. }
  824. static int qcedev_sha_update(struct qcedev_async_req *qcedev_areq,
  825. struct qcedev_handle *handle,
  826. struct scatterlist *sg_src)
  827. {
  828. int err = 0;
  829. int i = 0;
  830. int j = 0;
  831. int k = 0;
  832. int num_entries = 0;
  833. uint32_t total = 0;
  834. if (!handle->sha_ctxt.init_done) {
  835. pr_err("%s Init was not called\n", __func__);
  836. return -EINVAL;
  837. }
  838. if (qcedev_areq->sha_op_req.data_len > QCE_MAX_OPER_DATA) {
  839. struct qcedev_sha_op_req *saved_req;
  840. struct qcedev_sha_op_req req;
  841. struct qcedev_sha_op_req *sreq = &qcedev_areq->sha_op_req;
  842. /* save the original req structure */
  843. saved_req =
  844. kmalloc(sizeof(struct qcedev_sha_op_req), GFP_KERNEL);
  845. if (saved_req == NULL) {
  846. pr_err("%s:Can't Allocate mem:saved_req 0x%lx\n",
  847. __func__, (uintptr_t)saved_req);
  848. return -ENOMEM;
  849. }
  850. memcpy(&req, sreq, sizeof(struct qcedev_sha_op_req));
  851. memcpy(saved_req, sreq, sizeof(struct qcedev_sha_op_req));
  852. i = 0;
  853. /* Address 32 KB at a time */
  854. while ((i < req.entries) && (err == 0)) {
  855. if (sreq->data[i].len > QCE_MAX_OPER_DATA) {
  856. sreq->data[0].len = QCE_MAX_OPER_DATA;
  857. if (i > 0) {
  858. sreq->data[0].vaddr =
  859. sreq->data[i].vaddr;
  860. }
  861. sreq->data_len = QCE_MAX_OPER_DATA;
  862. sreq->entries = 1;
  863. err = qcedev_sha_update_max_xfer(qcedev_areq,
  864. handle, sg_src);
  865. sreq->data[i].len = req.data[i].len -
  866. QCE_MAX_OPER_DATA;
  867. sreq->data[i].vaddr = req.data[i].vaddr +
  868. QCE_MAX_OPER_DATA;
  869. req.data[i].vaddr = sreq->data[i].vaddr;
  870. req.data[i].len = sreq->data[i].len;
  871. } else {
  872. total = 0;
  873. for (j = i; j < req.entries; j++) {
  874. num_entries++;
  875. if ((total + sreq->data[j].len) >=
  876. QCE_MAX_OPER_DATA) {
  877. sreq->data[j].len =
  878. (QCE_MAX_OPER_DATA - total);
  879. total = QCE_MAX_OPER_DATA;
  880. break;
  881. }
  882. total += sreq->data[j].len;
  883. }
  884. sreq->data_len = total;
  885. if (i > 0)
  886. for (k = 0; k < num_entries; k++) {
  887. sreq->data[k].len =
  888. sreq->data[i+k].len;
  889. sreq->data[k].vaddr =
  890. sreq->data[i+k].vaddr;
  891. }
  892. sreq->entries = num_entries;
  893. i = j;
  894. err = qcedev_sha_update_max_xfer(qcedev_areq,
  895. handle, sg_src);
  896. num_entries = 0;
  897. sreq->data[i].vaddr = req.data[i].vaddr +
  898. sreq->data[i].len;
  899. sreq->data[i].len = req.data[i].len -
  900. sreq->data[i].len;
  901. req.data[i].vaddr = sreq->data[i].vaddr;
  902. req.data[i].len = sreq->data[i].len;
  903. if (sreq->data[i].len == 0)
  904. i++;
  905. }
  906. } /* end of while ((i < req.entries) && (err == 0)) */
  907. /* Restore the original req structure */
  908. for (i = 0; i < saved_req->entries; i++) {
  909. sreq->data[i].len = saved_req->data[i].len;
  910. sreq->data[i].vaddr = saved_req->data[i].vaddr;
  911. }
  912. sreq->entries = saved_req->entries;
  913. sreq->data_len = saved_req->data_len;
  914. memset(saved_req, 0, ksize((void *)saved_req));
  915. kfree(saved_req);
  916. } else
  917. err = qcedev_sha_update_max_xfer(qcedev_areq, handle, sg_src);
  918. return err;
  919. }
  920. static int qcedev_sha_final(struct qcedev_async_req *qcedev_areq,
  921. struct qcedev_handle *handle)
  922. {
  923. int err = 0;
  924. struct scatterlist sg_src;
  925. uint32_t total;
  926. uint8_t *k_buf_src = NULL;
  927. uint8_t *k_align_src = NULL;
  928. if (!handle->sha_ctxt.init_done) {
  929. pr_err("%s Init was not called\n", __func__);
  930. return -EINVAL;
  931. }
  932. handle->sha_ctxt.last_blk = 1;
  933. total = handle->sha_ctxt.trailing_buf_len;
  934. k_buf_src = kmalloc(total + CACHE_LINE_SIZE * 2,
  935. GFP_KERNEL);
  936. if (k_buf_src == NULL)
  937. return -ENOMEM;
  938. k_align_src = (uint8_t *)ALIGN(((uintptr_t)k_buf_src),
  939. CACHE_LINE_SIZE);
  940. memcpy(k_align_src, &handle->sha_ctxt.trailing_buf[0], total);
  941. qcedev_areq->sha_req.sreq.src = (struct scatterlist *) &sg_src;
  942. sg_init_one(qcedev_areq->sha_req.sreq.src, k_align_src, total);
  943. qcedev_areq->sha_req.sreq.nbytes = total;
  944. err = submit_req(qcedev_areq, handle);
  945. handle->sha_ctxt.first_blk = 0;
  946. handle->sha_ctxt.last_blk = 0;
  947. handle->sha_ctxt.auth_data[0] = 0;
  948. handle->sha_ctxt.auth_data[1] = 0;
  949. handle->sha_ctxt.trailing_buf_len = 0;
  950. handle->sha_ctxt.init_done = false;
  951. memset(&handle->sha_ctxt.trailing_buf[0], 0, 64);
  952. memset(k_buf_src, 0, ksize((void *)k_buf_src));
  953. kfree(k_buf_src);
  954. qcedev_areq->sha_req.sreq.src = NULL;
  955. return err;
  956. }
  957. static int qcedev_hash_cmac(struct qcedev_async_req *qcedev_areq,
  958. struct qcedev_handle *handle,
  959. struct scatterlist *sg_src)
  960. {
  961. int err = 0;
  962. int i = 0;
  963. uint32_t total;
  964. uint8_t *user_src = NULL;
  965. uint8_t *k_src = NULL;
  966. uint8_t *k_buf_src = NULL;
  967. total = qcedev_areq->sha_op_req.data_len;
  968. if ((qcedev_areq->sha_op_req.authklen != QCEDEV_AES_KEY_128) &&
  969. (qcedev_areq->sha_op_req.authklen != QCEDEV_AES_KEY_256)) {
  970. pr_err("%s: unsupported key length\n", __func__);
  971. return -EINVAL;
  972. }
  973. if (copy_from_user(&handle->sha_ctxt.authkey[0],
  974. (void __user *)qcedev_areq->sha_op_req.authkey,
  975. qcedev_areq->sha_op_req.authklen))
  976. return -EFAULT;
  977. if (total > U32_MAX - CACHE_LINE_SIZE * 2)
  978. return -EINVAL;
  979. k_buf_src = kmalloc(total + CACHE_LINE_SIZE * 2, GFP_KERNEL);
  980. if (k_buf_src == NULL)
  981. return -ENOMEM;
  982. k_src = k_buf_src;
  983. /* Copy data from user src(s) */
  984. user_src = qcedev_areq->sha_op_req.data[0].vaddr;
  985. for (i = 0; i < qcedev_areq->sha_op_req.entries; i++) {
  986. user_src = qcedev_areq->sha_op_req.data[i].vaddr;
  987. if (user_src && copy_from_user(k_src, (void __user *)user_src,
  988. qcedev_areq->sha_op_req.data[i].len)) {
  989. memset(k_buf_src, 0, ksize((void *)k_buf_src));
  990. kfree(k_buf_src);
  991. return -EFAULT;
  992. }
  993. k_src += qcedev_areq->sha_op_req.data[i].len;
  994. }
  995. qcedev_areq->sha_req.sreq.src = sg_src;
  996. sg_init_one(qcedev_areq->sha_req.sreq.src, k_buf_src, total);
  997. qcedev_areq->sha_req.sreq.nbytes = total;
  998. handle->sha_ctxt.diglen = qcedev_areq->sha_op_req.diglen;
  999. err = submit_req(qcedev_areq, handle);
  1000. memset(k_buf_src, 0, ksize((void *)k_buf_src));
  1001. kfree(k_buf_src);
  1002. return err;
  1003. }
  1004. static int qcedev_set_hmac_auth_key(struct qcedev_async_req *areq,
  1005. struct qcedev_handle *handle,
  1006. struct scatterlist *sg_src)
  1007. {
  1008. int err = 0;
  1009. if (areq->sha_op_req.authklen <= QCEDEV_MAX_KEY_SIZE) {
  1010. qcedev_sha_init(areq, handle);
  1011. if (copy_from_user(&handle->sha_ctxt.authkey[0],
  1012. (void __user *)areq->sha_op_req.authkey,
  1013. areq->sha_op_req.authklen))
  1014. return -EFAULT;
  1015. } else {
  1016. struct qcedev_async_req authkey_areq;
  1017. uint8_t authkey[QCEDEV_MAX_SHA_BLOCK_SIZE];
  1018. init_completion(&authkey_areq.complete);
  1019. authkey_areq.sha_op_req.entries = 1;
  1020. authkey_areq.sha_op_req.data[0].vaddr =
  1021. areq->sha_op_req.authkey;
  1022. authkey_areq.sha_op_req.data[0].len = areq->sha_op_req.authklen;
  1023. authkey_areq.sha_op_req.data_len = areq->sha_op_req.authklen;
  1024. authkey_areq.sha_op_req.diglen = 0;
  1025. authkey_areq.handle = handle;
  1026. memset(&authkey_areq.sha_op_req.digest[0], 0,
  1027. QCEDEV_MAX_SHA_DIGEST);
  1028. if (areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC)
  1029. authkey_areq.sha_op_req.alg = QCEDEV_ALG_SHA1;
  1030. if (areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC)
  1031. authkey_areq.sha_op_req.alg = QCEDEV_ALG_SHA256;
  1032. authkey_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
  1033. qcedev_sha_init(&authkey_areq, handle);
  1034. err = qcedev_sha_update(&authkey_areq, handle, sg_src);
  1035. if (!err)
  1036. err = qcedev_sha_final(&authkey_areq, handle);
  1037. else
  1038. return err;
  1039. memcpy(&authkey[0], &handle->sha_ctxt.digest[0],
  1040. handle->sha_ctxt.diglen);
  1041. qcedev_sha_init(areq, handle);
  1042. memcpy(&handle->sha_ctxt.authkey[0], &authkey[0],
  1043. handle->sha_ctxt.diglen);
  1044. }
  1045. return err;
  1046. }
  1047. static int qcedev_hmac_get_ohash(struct qcedev_async_req *qcedev_areq,
  1048. struct qcedev_handle *handle)
  1049. {
  1050. int err = 0;
  1051. struct scatterlist sg_src;
  1052. uint8_t *k_src = NULL;
  1053. uint32_t sha_block_size = 0;
  1054. uint32_t sha_digest_size = 0;
  1055. if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC) {
  1056. sha_digest_size = SHA1_DIGEST_SIZE;
  1057. sha_block_size = SHA1_BLOCK_SIZE;
  1058. } else {
  1059. if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC) {
  1060. sha_digest_size = SHA256_DIGEST_SIZE;
  1061. sha_block_size = SHA256_BLOCK_SIZE;
  1062. }
  1063. }
  1064. k_src = kmalloc(sha_block_size, GFP_KERNEL);
  1065. if (k_src == NULL)
  1066. return -ENOMEM;
  1067. /* check for trailing buffer from previous updates and append it */
  1068. memcpy(k_src, &handle->sha_ctxt.trailing_buf[0],
  1069. handle->sha_ctxt.trailing_buf_len);
  1070. qcedev_areq->sha_req.sreq.src = (struct scatterlist *) &sg_src;
  1071. sg_init_one(qcedev_areq->sha_req.sreq.src, k_src, sha_block_size);
  1072. qcedev_areq->sha_req.sreq.nbytes = sha_block_size;
  1073. memset(&handle->sha_ctxt.trailing_buf[0], 0, sha_block_size);
  1074. memcpy(&handle->sha_ctxt.trailing_buf[0], &handle->sha_ctxt.digest[0],
  1075. sha_digest_size);
  1076. handle->sha_ctxt.trailing_buf_len = sha_digest_size;
  1077. handle->sha_ctxt.first_blk = 1;
  1078. handle->sha_ctxt.last_blk = 0;
  1079. handle->sha_ctxt.auth_data[0] = 0;
  1080. handle->sha_ctxt.auth_data[1] = 0;
  1081. if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC) {
  1082. memcpy(&handle->sha_ctxt.digest[0],
  1083. &_std_init_vector_sha1_uint8[0], SHA1_DIGEST_SIZE);
  1084. handle->sha_ctxt.diglen = SHA1_DIGEST_SIZE;
  1085. }
  1086. if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC) {
  1087. memcpy(&handle->sha_ctxt.digest[0],
  1088. &_std_init_vector_sha256_uint8[0], SHA256_DIGEST_SIZE);
  1089. handle->sha_ctxt.diglen = SHA256_DIGEST_SIZE;
  1090. }
  1091. err = submit_req(qcedev_areq, handle);
  1092. handle->sha_ctxt.last_blk = 0;
  1093. handle->sha_ctxt.first_blk = 0;
  1094. memset(k_src, 0, ksize((void *)k_src));
  1095. kfree(k_src);
  1096. qcedev_areq->sha_req.sreq.src = NULL;
  1097. return err;
  1098. }
  1099. static int qcedev_hmac_update_iokey(struct qcedev_async_req *areq,
  1100. struct qcedev_handle *handle, bool ikey)
  1101. {
  1102. int i;
  1103. uint32_t constant;
  1104. uint32_t sha_block_size;
  1105. if (ikey)
  1106. constant = 0x36;
  1107. else
  1108. constant = 0x5c;
  1109. if (areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC)
  1110. sha_block_size = SHA1_BLOCK_SIZE;
  1111. else
  1112. sha_block_size = SHA256_BLOCK_SIZE;
  1113. memset(&handle->sha_ctxt.trailing_buf[0], 0, sha_block_size);
  1114. for (i = 0; i < sha_block_size; i++)
  1115. handle->sha_ctxt.trailing_buf[i] =
  1116. (handle->sha_ctxt.authkey[i] ^ constant);
  1117. handle->sha_ctxt.trailing_buf_len = sha_block_size;
  1118. return 0;
  1119. }
  1120. static int qcedev_hmac_init(struct qcedev_async_req *areq,
  1121. struct qcedev_handle *handle,
  1122. struct scatterlist *sg_src)
  1123. {
  1124. int err;
  1125. struct qcedev_control *podev = handle->cntl;
  1126. err = qcedev_set_hmac_auth_key(areq, handle, sg_src);
  1127. if (err)
  1128. return err;
  1129. if (!podev->ce_support.sha_hmac)
  1130. qcedev_hmac_update_iokey(areq, handle, true);
  1131. return 0;
  1132. }
  1133. static int qcedev_hmac_final(struct qcedev_async_req *areq,
  1134. struct qcedev_handle *handle)
  1135. {
  1136. int err;
  1137. struct qcedev_control *podev = handle->cntl;
  1138. err = qcedev_sha_final(areq, handle);
  1139. if (podev->ce_support.sha_hmac)
  1140. return err;
  1141. qcedev_hmac_update_iokey(areq, handle, false);
  1142. err = qcedev_hmac_get_ohash(areq, handle);
  1143. if (err)
  1144. return err;
  1145. err = qcedev_sha_final(areq, handle);
  1146. return err;
  1147. }
  1148. static int qcedev_hash_init(struct qcedev_async_req *areq,
  1149. struct qcedev_handle *handle,
  1150. struct scatterlist *sg_src)
  1151. {
  1152. if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA1) ||
  1153. (areq->sha_op_req.alg == QCEDEV_ALG_SHA256))
  1154. return qcedev_sha_init(areq, handle);
  1155. else
  1156. return qcedev_hmac_init(areq, handle, sg_src);
  1157. }
  1158. static int qcedev_hash_update(struct qcedev_async_req *qcedev_areq,
  1159. struct qcedev_handle *handle,
  1160. struct scatterlist *sg_src)
  1161. {
  1162. return qcedev_sha_update(qcedev_areq, handle, sg_src);
  1163. }
  1164. static int qcedev_hash_final(struct qcedev_async_req *areq,
  1165. struct qcedev_handle *handle)
  1166. {
  1167. if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA1) ||
  1168. (areq->sha_op_req.alg == QCEDEV_ALG_SHA256))
  1169. return qcedev_sha_final(areq, handle);
  1170. else
  1171. return qcedev_hmac_final(areq, handle);
  1172. }
  1173. static int qcedev_vbuf_ablk_cipher_max_xfer(struct qcedev_async_req *areq,
  1174. int *di, struct qcedev_handle *handle,
  1175. uint8_t *k_align_src)
  1176. {
  1177. int err = 0;
  1178. int i = 0;
  1179. int dst_i = *di;
  1180. struct scatterlist sg_src;
  1181. uint32_t byteoffset = 0;
  1182. uint8_t *user_src = NULL;
  1183. uint8_t *k_align_dst = k_align_src;
  1184. struct qcedev_cipher_op_req *creq = &areq->cipher_op_req;
  1185. if (areq->cipher_op_req.mode == QCEDEV_AES_MODE_CTR)
  1186. byteoffset = areq->cipher_op_req.byteoffset;
  1187. user_src = areq->cipher_op_req.vbuf.src[0].vaddr;
  1188. if (user_src && copy_from_user((k_align_src + byteoffset),
  1189. (void __user *)user_src,
  1190. areq->cipher_op_req.vbuf.src[0].len))
  1191. return -EFAULT;
  1192. k_align_src += byteoffset + areq->cipher_op_req.vbuf.src[0].len;
  1193. for (i = 1; i < areq->cipher_op_req.entries; i++) {
  1194. user_src = areq->cipher_op_req.vbuf.src[i].vaddr;
  1195. if (user_src && copy_from_user(k_align_src,
  1196. (void __user *)user_src,
  1197. areq->cipher_op_req.vbuf.src[i].len)) {
  1198. return -EFAULT;
  1199. }
  1200. k_align_src += areq->cipher_op_req.vbuf.src[i].len;
  1201. }
  1202. /* restore src beginning */
  1203. k_align_src = k_align_dst;
  1204. areq->cipher_op_req.data_len += byteoffset;
  1205. areq->cipher_req.creq.src = (struct scatterlist *) &sg_src;
  1206. areq->cipher_req.creq.dst = (struct scatterlist *) &sg_src;
  1207. /* In place encryption/decryption */
  1208. sg_init_one(areq->cipher_req.creq.src,
  1209. k_align_dst,
  1210. areq->cipher_op_req.data_len);
  1211. areq->cipher_req.creq.cryptlen = areq->cipher_op_req.data_len;
  1212. areq->cipher_req.creq.iv = areq->cipher_op_req.iv;
  1213. areq->cipher_op_req.entries = 1;
  1214. err = submit_req(areq, handle);
  1215. /* copy data to destination buffer*/
  1216. creq->data_len -= byteoffset;
  1217. while (creq->data_len > 0) {
  1218. if (creq->vbuf.dst[dst_i].len <= creq->data_len) {
  1219. if (err == 0 && copy_to_user(
  1220. (void __user *)creq->vbuf.dst[dst_i].vaddr,
  1221. (k_align_dst + byteoffset),
  1222. creq->vbuf.dst[dst_i].len)) {
  1223. err = -EFAULT;
  1224. goto exit;
  1225. }
  1226. k_align_dst += creq->vbuf.dst[dst_i].len;
  1227. creq->data_len -= creq->vbuf.dst[dst_i].len;
  1228. dst_i++;
  1229. } else {
  1230. if (err == 0 && copy_to_user(
  1231. (void __user *)creq->vbuf.dst[dst_i].vaddr,
  1232. (k_align_dst + byteoffset),
  1233. creq->data_len)) {
  1234. err = -EFAULT;
  1235. goto exit;
  1236. }
  1237. k_align_dst += creq->data_len;
  1238. creq->vbuf.dst[dst_i].len -= creq->data_len;
  1239. creq->vbuf.dst[dst_i].vaddr += creq->data_len;
  1240. creq->data_len = 0;
  1241. }
  1242. }
  1243. *di = dst_i;
  1244. exit:
  1245. areq->cipher_req.creq.src = NULL;
  1246. areq->cipher_req.creq.dst = NULL;
  1247. return err;
  1248. };
  1249. static int qcedev_vbuf_ablk_cipher(struct qcedev_async_req *areq,
  1250. struct qcedev_handle *handle)
  1251. {
  1252. int err = 0;
  1253. int di = 0;
  1254. int i = 0;
  1255. int j = 0;
  1256. int k = 0;
  1257. uint32_t byteoffset = 0;
  1258. int num_entries = 0;
  1259. uint32_t total = 0;
  1260. uint32_t len;
  1261. uint8_t *k_buf_src = NULL;
  1262. uint8_t *k_align_src = NULL;
  1263. uint32_t max_data_xfer;
  1264. struct qcedev_cipher_op_req *saved_req;
  1265. struct qcedev_cipher_op_req *creq = &areq->cipher_op_req;
  1266. total = 0;
  1267. if (areq->cipher_op_req.mode == QCEDEV_AES_MODE_CTR)
  1268. byteoffset = areq->cipher_op_req.byteoffset;
  1269. k_buf_src = kmalloc(QCE_MAX_OPER_DATA + CACHE_LINE_SIZE * 2,
  1270. GFP_KERNEL);
  1271. if (k_buf_src == NULL)
  1272. return -ENOMEM;
  1273. k_align_src = (uint8_t *)ALIGN(((uintptr_t)k_buf_src),
  1274. CACHE_LINE_SIZE);
  1275. max_data_xfer = QCE_MAX_OPER_DATA - byteoffset;
  1276. saved_req = kmemdup(creq, sizeof(struct qcedev_cipher_op_req),
  1277. GFP_KERNEL);
  1278. if (saved_req == NULL) {
  1279. memset(k_buf_src, 0, ksize((void *)k_buf_src));
  1280. kfree(k_buf_src);
  1281. return -ENOMEM;
  1282. }
  1283. if (areq->cipher_op_req.data_len > max_data_xfer) {
  1284. struct qcedev_cipher_op_req req;
  1285. /* save the original req structure */
  1286. memcpy(&req, creq, sizeof(struct qcedev_cipher_op_req));
  1287. i = 0;
  1288. /* Address 32 KB at a time */
  1289. while ((i < req.entries) && (err == 0)) {
  1290. if (creq->vbuf.src[i].len > max_data_xfer) {
  1291. creq->vbuf.src[0].len = max_data_xfer;
  1292. if (i > 0) {
  1293. creq->vbuf.src[0].vaddr =
  1294. creq->vbuf.src[i].vaddr;
  1295. }
  1296. creq->data_len = max_data_xfer;
  1297. creq->entries = 1;
  1298. err = qcedev_vbuf_ablk_cipher_max_xfer(areq,
  1299. &di, handle, k_align_src);
  1300. if (err < 0) {
  1301. memset(saved_req, 0,
  1302. ksize((void *)saved_req));
  1303. memset(k_buf_src, 0,
  1304. ksize((void *)k_buf_src));
  1305. kfree(k_buf_src);
  1306. kfree(saved_req);
  1307. return err;
  1308. }
  1309. creq->vbuf.src[i].len = req.vbuf.src[i].len -
  1310. max_data_xfer;
  1311. creq->vbuf.src[i].vaddr =
  1312. req.vbuf.src[i].vaddr +
  1313. max_data_xfer;
  1314. req.vbuf.src[i].vaddr =
  1315. creq->vbuf.src[i].vaddr;
  1316. req.vbuf.src[i].len = creq->vbuf.src[i].len;
  1317. } else {
  1318. total = areq->cipher_op_req.byteoffset;
  1319. for (j = i; j < req.entries; j++) {
  1320. num_entries++;
  1321. if ((total + creq->vbuf.src[j].len)
  1322. >= max_data_xfer) {
  1323. creq->vbuf.src[j].len =
  1324. max_data_xfer - total;
  1325. total = max_data_xfer;
  1326. break;
  1327. }
  1328. total += creq->vbuf.src[j].len;
  1329. }
  1330. creq->data_len = total;
  1331. if (i > 0)
  1332. for (k = 0; k < num_entries; k++) {
  1333. creq->vbuf.src[k].len =
  1334. creq->vbuf.src[i+k].len;
  1335. creq->vbuf.src[k].vaddr =
  1336. creq->vbuf.src[i+k].vaddr;
  1337. }
  1338. creq->entries = num_entries;
  1339. i = j;
  1340. err = qcedev_vbuf_ablk_cipher_max_xfer(areq,
  1341. &di, handle, k_align_src);
  1342. if (err < 0) {
  1343. memset(saved_req, 0,
  1344. ksize((void *)saved_req));
  1345. memset(k_buf_src, 0,
  1346. ksize((void *)k_buf_src));
  1347. kfree(k_buf_src);
  1348. kfree(saved_req);
  1349. return err;
  1350. }
  1351. num_entries = 0;
  1352. areq->cipher_op_req.byteoffset = 0;
  1353. creq->vbuf.src[i].vaddr = req.vbuf.src[i].vaddr
  1354. + creq->vbuf.src[i].len;
  1355. creq->vbuf.src[i].len = req.vbuf.src[i].len -
  1356. creq->vbuf.src[i].len;
  1357. req.vbuf.src[i].vaddr =
  1358. creq->vbuf.src[i].vaddr;
  1359. req.vbuf.src[i].len = creq->vbuf.src[i].len;
  1360. if (creq->vbuf.src[i].len == 0)
  1361. i++;
  1362. }
  1363. areq->cipher_op_req.byteoffset = 0;
  1364. max_data_xfer = QCE_MAX_OPER_DATA;
  1365. byteoffset = 0;
  1366. } /* end of while ((i < req.entries) && (err == 0)) */
  1367. } else
  1368. err = qcedev_vbuf_ablk_cipher_max_xfer(areq, &di, handle,
  1369. k_align_src);
  1370. /* Restore the original req structure */
  1371. for (i = 0; i < saved_req->entries; i++) {
  1372. creq->vbuf.src[i].len = saved_req->vbuf.src[i].len;
  1373. creq->vbuf.src[i].vaddr = saved_req->vbuf.src[i].vaddr;
  1374. }
  1375. for (len = 0, i = 0; len < saved_req->data_len; i++) {
  1376. creq->vbuf.dst[i].len = saved_req->vbuf.dst[i].len;
  1377. creq->vbuf.dst[i].vaddr = saved_req->vbuf.dst[i].vaddr;
  1378. len += saved_req->vbuf.dst[i].len;
  1379. }
  1380. creq->entries = saved_req->entries;
  1381. creq->data_len = saved_req->data_len;
  1382. creq->byteoffset = saved_req->byteoffset;
  1383. memset(saved_req, 0, ksize((void *)saved_req));
  1384. memset(k_buf_src, 0, ksize((void *)k_buf_src));
  1385. kfree(saved_req);
  1386. kfree(k_buf_src);
  1387. return err;
  1388. }
  1389. static int qcedev_smmu_ablk_offload_cipher(struct qcedev_async_req *areq,
  1390. struct qcedev_handle *handle)
  1391. {
  1392. int i = 0;
  1393. int err = 0;
  1394. size_t byteoffset = 0;
  1395. size_t transfer_data_len = 0;
  1396. size_t pending_data_len = 0;
  1397. size_t max_data_xfer = MAX_CEHW_REQ_TRANSFER_SIZE - byteoffset;
  1398. uint8_t *user_src = NULL;
  1399. uint8_t *user_dst = NULL;
  1400. struct scatterlist sg_src;
  1401. struct scatterlist sg_dst;
  1402. if (areq->offload_cipher_op_req.mode == QCEDEV_AES_MODE_CTR)
  1403. byteoffset = areq->offload_cipher_op_req.byteoffset;
  1404. /*
  1405. * areq has two components:
  1406. * a) Request that comes from userspace i.e. offload_cipher_op_req
  1407. * b) Request that QCE understands - skcipher i.e. cipher_req.creq
  1408. * skcipher has sglist pointers src and dest that would carry
  1409. * data to/from CE.
  1410. */
  1411. areq->cipher_req.creq.src = &sg_src;
  1412. areq->cipher_req.creq.dst = &sg_dst;
  1413. sg_init_table(&sg_src, 1);
  1414. sg_init_table(&sg_dst, 1);
  1415. for (i = 0; i < areq->offload_cipher_op_req.entries; i++) {
  1416. transfer_data_len = 0;
  1417. pending_data_len = areq->offload_cipher_op_req.vbuf.src[i].len;
  1418. user_src = areq->offload_cipher_op_req.vbuf.src[i].vaddr;
  1419. user_src += byteoffset;
  1420. user_dst = areq->offload_cipher_op_req.vbuf.dst[i].vaddr;
  1421. user_dst += byteoffset;
  1422. areq->cipher_req.creq.iv = areq->offload_cipher_op_req.iv;
  1423. while (pending_data_len) {
  1424. transfer_data_len = min(max_data_xfer,
  1425. pending_data_len);
  1426. sg_src.dma_address = (dma_addr_t)user_src;
  1427. sg_dst.dma_address = (dma_addr_t)user_dst;
  1428. areq->cipher_req.creq.cryptlen = transfer_data_len;
  1429. sg_src.length = transfer_data_len;
  1430. sg_dst.length = transfer_data_len;
  1431. err = submit_req(areq, handle);
  1432. if (err) {
  1433. pr_err("%s: Error processing req, err = %d\n",
  1434. __func__, err);
  1435. goto exit;
  1436. }
  1437. /* update data len to be processed */
  1438. pending_data_len -= transfer_data_len;
  1439. user_src += transfer_data_len;
  1440. user_dst += transfer_data_len;
  1441. }
  1442. }
  1443. exit:
  1444. return err;
  1445. }
  1446. static int qcedev_check_cipher_key(struct qcedev_cipher_op_req *req,
  1447. struct qcedev_control *podev)
  1448. {
  1449. /* if intending to use HW key make sure key fields are set
  1450. * correctly and HW key is indeed supported in target
  1451. */
  1452. if (req->encklen == 0) {
  1453. int i;
  1454. for (i = 0; i < QCEDEV_MAX_KEY_SIZE; i++) {
  1455. if (req->enckey[i]) {
  1456. pr_err("%s: Invalid key: non-zero key input\n",
  1457. __func__);
  1458. goto error;
  1459. }
  1460. }
  1461. if ((req->op != QCEDEV_OPER_ENC_NO_KEY) &&
  1462. (req->op != QCEDEV_OPER_DEC_NO_KEY))
  1463. if (!podev->platform_support.hw_key_support) {
  1464. pr_err("%s: Invalid op %d\n", __func__,
  1465. (uint32_t)req->op);
  1466. goto error;
  1467. }
  1468. } else {
  1469. if (req->encklen == QCEDEV_AES_KEY_192) {
  1470. if (!podev->ce_support.aes_key_192) {
  1471. pr_err("%s: AES-192 not supported\n", __func__);
  1472. goto error;
  1473. }
  1474. } else {
  1475. /* if not using HW key make sure key
  1476. * length is valid
  1477. */
  1478. if (req->mode == QCEDEV_AES_MODE_XTS) {
  1479. if ((req->encklen != QCEDEV_AES_KEY_128*2) &&
  1480. (req->encklen != QCEDEV_AES_KEY_256*2)) {
  1481. pr_err("%s: unsupported key size: %d\n",
  1482. __func__, req->encklen);
  1483. goto error;
  1484. }
  1485. } else {
  1486. if ((req->encklen != QCEDEV_AES_KEY_128) &&
  1487. (req->encklen != QCEDEV_AES_KEY_256)) {
  1488. pr_err("%s: unsupported key size %d\n",
  1489. __func__, req->encklen);
  1490. goto error;
  1491. }
  1492. }
  1493. }
  1494. }
  1495. return 0;
  1496. error:
  1497. return -EINVAL;
  1498. }
  1499. static int qcedev_check_cipher_params(struct qcedev_cipher_op_req *req,
  1500. struct qcedev_control *podev)
  1501. {
  1502. uint32_t total = 0;
  1503. uint32_t i;
  1504. if (req->use_pmem) {
  1505. pr_err("%s: Use of PMEM is not supported\n", __func__);
  1506. goto error;
  1507. }
  1508. if ((req->entries == 0) || (req->data_len == 0) ||
  1509. (req->entries > QCEDEV_MAX_BUFFERS)) {
  1510. pr_err("%s: Invalid cipher length/entries\n", __func__);
  1511. goto error;
  1512. }
  1513. if ((req->alg >= QCEDEV_ALG_LAST) ||
  1514. (req->mode >= QCEDEV_AES_DES_MODE_LAST)) {
  1515. pr_err("%s: Invalid algorithm %d\n", __func__,
  1516. (uint32_t)req->alg);
  1517. goto error;
  1518. }
  1519. if ((req->mode == QCEDEV_AES_MODE_XTS) &&
  1520. (!podev->ce_support.aes_xts)) {
  1521. pr_err("%s: XTS algorithm is not supported\n", __func__);
  1522. goto error;
  1523. }
  1524. if (req->alg == QCEDEV_ALG_AES) {
  1525. if (qcedev_check_cipher_key(req, podev))
  1526. goto error;
  1527. }
  1528. /* if using a byteoffset, make sure it is CTR mode using vbuf */
  1529. if (req->byteoffset) {
  1530. if (req->mode != QCEDEV_AES_MODE_CTR) {
  1531. pr_err("%s: Operation on byte offset not supported\n",
  1532. __func__);
  1533. goto error;
  1534. }
  1535. if (req->byteoffset >= AES_CE_BLOCK_SIZE) {
  1536. pr_err("%s: Invalid byte offset\n", __func__);
  1537. goto error;
  1538. }
  1539. total = req->byteoffset;
  1540. for (i = 0; i < req->entries; i++) {
  1541. if (total > U32_MAX - req->vbuf.src[i].len) {
  1542. pr_err("%s:Integer overflow on total src len\n",
  1543. __func__);
  1544. goto error;
  1545. }
  1546. total += req->vbuf.src[i].len;
  1547. }
  1548. }
  1549. if (req->data_len < req->byteoffset) {
  1550. pr_err("%s: req data length %u is less than byteoffset %u\n",
  1551. __func__, req->data_len, req->byteoffset);
  1552. goto error;
  1553. }
  1554. /* Ensure IV size */
  1555. if (req->ivlen > QCEDEV_MAX_IV_SIZE) {
  1556. pr_err("%s: ivlen is not correct: %u\n", __func__, req->ivlen);
  1557. goto error;
  1558. }
  1559. /* Ensure Key size */
  1560. if (req->encklen > QCEDEV_MAX_KEY_SIZE) {
  1561. pr_err("%s: Klen is not correct: %u\n", __func__, req->encklen);
  1562. goto error;
  1563. }
  1564. /* Ensure zer ivlen for ECB mode */
  1565. if (req->ivlen > 0) {
  1566. if ((req->mode == QCEDEV_AES_MODE_ECB) ||
  1567. (req->mode == QCEDEV_DES_MODE_ECB)) {
  1568. pr_err("%s: Expecting a zero length IV\n", __func__);
  1569. goto error;
  1570. }
  1571. } else {
  1572. if ((req->mode != QCEDEV_AES_MODE_ECB) &&
  1573. (req->mode != QCEDEV_DES_MODE_ECB)) {
  1574. pr_err("%s: Expecting a non-zero ength IV\n", __func__);
  1575. goto error;
  1576. }
  1577. }
  1578. /* Check for sum of all dst length is equal to data_len */
  1579. for (i = 0, total = 0; i < req->entries; i++) {
  1580. if (!req->vbuf.dst[i].vaddr && req->vbuf.dst[i].len) {
  1581. pr_err("%s: NULL req dst vbuf[%d] with length %d\n",
  1582. __func__, i, req->vbuf.dst[i].len);
  1583. goto error;
  1584. }
  1585. if (req->vbuf.dst[i].len >= U32_MAX - total) {
  1586. pr_err("%s: Integer overflow on total req dst vbuf length\n",
  1587. __func__);
  1588. goto error;
  1589. }
  1590. total += req->vbuf.dst[i].len;
  1591. }
  1592. if (total != req->data_len) {
  1593. pr_err("%s: Total (i=%d) dst(%d) buf size != data_len (%d)\n",
  1594. __func__, i, total, req->data_len);
  1595. goto error;
  1596. }
  1597. /* Check for sum of all src length is equal to data_len */
  1598. for (i = 0, total = 0; i < req->entries; i++) {
  1599. if (!req->vbuf.src[i].vaddr && req->vbuf.src[i].len) {
  1600. pr_err("%s: NULL req src vbuf[%d] with length %d\n",
  1601. __func__, i, req->vbuf.src[i].len);
  1602. goto error;
  1603. }
  1604. if (req->vbuf.src[i].len > U32_MAX - total) {
  1605. pr_err("%s: Integer overflow on total req src vbuf length\n",
  1606. __func__);
  1607. goto error;
  1608. }
  1609. total += req->vbuf.src[i].len;
  1610. }
  1611. if (total != req->data_len) {
  1612. pr_err("%s: Total src(%d) buf size != data_len (%d)\n",
  1613. __func__, total, req->data_len);
  1614. goto error;
  1615. }
  1616. return 0;
  1617. error:
  1618. return -EINVAL;
  1619. }
  1620. static int qcedev_check_sha_params(struct qcedev_sha_op_req *req,
  1621. struct qcedev_control *podev)
  1622. {
  1623. uint32_t total = 0;
  1624. uint32_t i;
  1625. if ((req->alg == QCEDEV_ALG_AES_CMAC) &&
  1626. (!podev->ce_support.cmac)) {
  1627. pr_err("%s: CMAC not supported\n", __func__);
  1628. goto sha_error;
  1629. }
  1630. if ((!req->entries) || (req->entries > QCEDEV_MAX_BUFFERS)) {
  1631. pr_err("%s: Invalid num entries (%d)\n",
  1632. __func__, req->entries);
  1633. goto sha_error;
  1634. }
  1635. if (req->alg >= QCEDEV_ALG_SHA_ALG_LAST) {
  1636. pr_err("%s: Invalid algorithm (%d)\n", __func__, req->alg);
  1637. goto sha_error;
  1638. }
  1639. if ((req->alg == QCEDEV_ALG_SHA1_HMAC) ||
  1640. (req->alg == QCEDEV_ALG_SHA256_HMAC)) {
  1641. if (req->authkey == NULL) {
  1642. pr_err("%s: Invalid authkey pointer\n", __func__);
  1643. goto sha_error;
  1644. }
  1645. if (req->authklen <= 0) {
  1646. pr_err("%s: Invalid authkey length (%d)\n",
  1647. __func__, req->authklen);
  1648. goto sha_error;
  1649. }
  1650. }
  1651. if (req->alg == QCEDEV_ALG_AES_CMAC) {
  1652. if ((req->authklen != QCEDEV_AES_KEY_128) &&
  1653. (req->authklen != QCEDEV_AES_KEY_256)) {
  1654. pr_err("%s: unsupported key length\n", __func__);
  1655. goto sha_error;
  1656. }
  1657. }
  1658. /* Check for sum of all src length is equal to data_len */
  1659. for (i = 0, total = 0; i < req->entries; i++) {
  1660. if (req->data[i].len > U32_MAX - total) {
  1661. pr_err("%s: Integer overflow on total req buf length\n",
  1662. __func__);
  1663. goto sha_error;
  1664. }
  1665. total += req->data[i].len;
  1666. }
  1667. if (total != req->data_len) {
  1668. pr_err("%s: Total src(%d) buf size != data_len (%d)\n",
  1669. __func__, total, req->data_len);
  1670. goto sha_error;
  1671. }
  1672. return 0;
  1673. sha_error:
  1674. return -EINVAL;
  1675. }
  1676. static int qcedev_check_offload_cipher_key(struct qcedev_offload_cipher_op_req *req,
  1677. struct qcedev_control *podev)
  1678. {
  1679. if (req->encklen == 0)
  1680. return -EINVAL;
  1681. /* AES-192 is not a valid option for OFFLOAD use case */
  1682. if ((req->encklen != QCEDEV_AES_KEY_128) &&
  1683. (req->encklen != QCEDEV_AES_KEY_256)) {
  1684. pr_err("%s: unsupported key size %d\n",
  1685. __func__, req->encklen);
  1686. goto error;
  1687. }
  1688. return 0;
  1689. error:
  1690. return -EINVAL;
  1691. }
  1692. static int qcedev_check_offload_cipher_params(struct qcedev_offload_cipher_op_req *req,
  1693. struct qcedev_control *podev)
  1694. {
  1695. uint32_t total = 0;
  1696. int i = 0;
  1697. if ((req->entries == 0) || (req->data_len == 0) ||
  1698. (req->entries > QCEDEV_MAX_BUFFERS)) {
  1699. pr_err("%s: Invalid cipher length/entries\n", __func__);
  1700. goto error;
  1701. }
  1702. if ((req->alg != QCEDEV_ALG_AES) ||
  1703. (req->mode > QCEDEV_AES_MODE_CTR)) {
  1704. pr_err("%s: Invalid algorithm %d\n", __func__,
  1705. (uint32_t)req->alg);
  1706. goto error;
  1707. }
  1708. if (qcedev_check_offload_cipher_key(req, podev))
  1709. goto error;
  1710. if (req->block_offset >= AES_CE_BLOCK_SIZE)
  1711. goto error;
  1712. /* if using a byteoffset, make sure it is CTR mode using vbuf */
  1713. if (req->byteoffset) {
  1714. if (req->mode != QCEDEV_AES_MODE_CTR) {
  1715. pr_err("%s: Operation on byte offset not supported\n",
  1716. __func__);
  1717. goto error;
  1718. }
  1719. if (req->byteoffset >= AES_CE_BLOCK_SIZE) {
  1720. pr_err("%s: Invalid byte offset\n", __func__);
  1721. goto error;
  1722. }
  1723. total = req->byteoffset;
  1724. for (i = 0; i < req->entries; i++) {
  1725. if (total > U32_MAX - req->vbuf.src[i].len) {
  1726. pr_err("%s:Int overflow on total src len\n",
  1727. __func__);
  1728. goto error;
  1729. }
  1730. total += req->vbuf.src[i].len;
  1731. }
  1732. }
  1733. if (req->data_len < req->byteoffset) {
  1734. pr_err("%s: req data length %u is less than byteoffset %u\n",
  1735. __func__, req->data_len, req->byteoffset);
  1736. goto error;
  1737. }
  1738. /* Ensure IV size */
  1739. if (req->ivlen > QCEDEV_MAX_IV_SIZE) {
  1740. pr_err("%s: ivlen is not correct: %u\n", __func__, req->ivlen);
  1741. goto error;
  1742. }
  1743. /* Ensure Key size */
  1744. if (req->encklen > QCEDEV_MAX_KEY_SIZE) {
  1745. pr_err("%s: Klen is not correct: %u\n", __func__,
  1746. req->encklen);
  1747. goto error;
  1748. }
  1749. /* Check for sum of all dst length is equal to data_len */
  1750. for (i = 0, total = 0; i < req->entries; i++) {
  1751. if (!req->vbuf.dst[i].vaddr && req->vbuf.dst[i].len) {
  1752. pr_err("%s: NULL req dst vbuf[%d] with length %d\n",
  1753. __func__, i, req->vbuf.dst[i].len);
  1754. goto error;
  1755. }
  1756. if (req->vbuf.dst[i].len >= U32_MAX - total) {
  1757. pr_err("%s: Int overflow on total req dst vbuf len\n",
  1758. __func__);
  1759. goto error;
  1760. }
  1761. total += req->vbuf.dst[i].len;
  1762. }
  1763. if (total != req->data_len) {
  1764. pr_err("%s: Total (i=%d) dst(%d) buf size != data_len (%d)\n",
  1765. __func__, i, total, req->data_len);
  1766. goto error;
  1767. }
  1768. /* Check for sum of all src length is equal to data_len */
  1769. for (i = 0, total = 0; i < req->entries; i++) {
  1770. if (!req->vbuf.src[i].vaddr && req->vbuf.src[i].len) {
  1771. pr_err("%s: NULL req src vbuf[%d] with length %d\n",
  1772. __func__, i, req->vbuf.src[i].len);
  1773. goto error;
  1774. }
  1775. if (req->vbuf.src[i].len > U32_MAX - total) {
  1776. pr_err("%s: Int overflow on total req src vbuf len\n",
  1777. __func__);
  1778. goto error;
  1779. }
  1780. total += req->vbuf.src[i].len;
  1781. }
  1782. if (total != req->data_len) {
  1783. pr_err("%s: Total src(%d) buf size != data_len (%d)\n",
  1784. __func__, total, req->data_len);
  1785. goto error;
  1786. }
  1787. return 0;
  1788. error:
  1789. return -EINVAL;
  1790. }
  1791. long qcedev_ioctl(struct file *file,
  1792. unsigned int cmd, unsigned long arg)
  1793. {
  1794. int err = 0;
  1795. struct qcedev_handle *handle;
  1796. struct qcedev_control *podev;
  1797. struct qcedev_async_req *qcedev_areq;
  1798. struct qcedev_stat *pstat;
  1799. qcedev_areq = kzalloc(sizeof(struct qcedev_async_req), GFP_KERNEL);
  1800. if (!qcedev_areq)
  1801. return -ENOMEM;
  1802. handle = file->private_data;
  1803. podev = handle->cntl;
  1804. qcedev_areq->handle = handle;
  1805. if (podev == NULL || podev->magic != QCEDEV_MAGIC) {
  1806. pr_err("%s: invalid handle %pK\n",
  1807. __func__, podev);
  1808. err = -ENOENT;
  1809. goto exit_free_qcedev_areq;
  1810. }
  1811. /* Verify user arguments. */
  1812. if (_IOC_TYPE(cmd) != QCEDEV_IOC_MAGIC) {
  1813. err = -ENOTTY;
  1814. goto exit_free_qcedev_areq;
  1815. }
  1816. init_completion(&qcedev_areq->complete);
  1817. pstat = &_qcedev_stat;
  1818. if (cmd != QCEDEV_IOCTL_MAP_BUF_REQ &&
  1819. cmd != QCEDEV_IOCTL_UNMAP_BUF_REQ)
  1820. qcedev_ce_high_bw_req(podev, true);
  1821. switch (cmd) {
  1822. case QCEDEV_IOCTL_ENC_REQ:
  1823. case QCEDEV_IOCTL_DEC_REQ:
  1824. if (copy_from_user(&qcedev_areq->cipher_op_req,
  1825. (void __user *)arg,
  1826. sizeof(struct qcedev_cipher_op_req))) {
  1827. err = -EFAULT;
  1828. goto exit_free_qcedev_areq;
  1829. }
  1830. qcedev_areq->op_type = QCEDEV_CRYPTO_OPER_CIPHER;
  1831. if (qcedev_check_cipher_params(&qcedev_areq->cipher_op_req,
  1832. podev)) {
  1833. err = -EINVAL;
  1834. goto exit_free_qcedev_areq;
  1835. }
  1836. err = qcedev_vbuf_ablk_cipher(qcedev_areq, handle);
  1837. if (err)
  1838. goto exit_free_qcedev_areq;
  1839. if (copy_to_user((void __user *)arg,
  1840. &qcedev_areq->cipher_op_req,
  1841. sizeof(struct qcedev_cipher_op_req))) {
  1842. err = -EFAULT;
  1843. goto exit_free_qcedev_areq;
  1844. }
  1845. break;
  1846. case QCEDEV_IOCTL_OFFLOAD_OP_REQ:
  1847. if (copy_from_user(&qcedev_areq->offload_cipher_op_req,
  1848. (void __user *)arg,
  1849. sizeof(struct qcedev_offload_cipher_op_req))) {
  1850. err = -EFAULT;
  1851. goto exit_free_qcedev_areq;
  1852. }
  1853. qcedev_areq->op_type = QCEDEV_CRYPTO_OPER_OFFLOAD_CIPHER;
  1854. if (qcedev_check_offload_cipher_params(
  1855. &qcedev_areq->offload_cipher_op_req, podev)) {
  1856. err = -EINVAL;
  1857. goto exit_free_qcedev_areq;
  1858. }
  1859. err = qcedev_smmu_ablk_offload_cipher(qcedev_areq, handle);
  1860. if (err)
  1861. goto exit_free_qcedev_areq;
  1862. if (copy_to_user((void __user *)arg,
  1863. &qcedev_areq->offload_cipher_op_req,
  1864. sizeof(struct qcedev_offload_cipher_op_req))) {
  1865. err = -EFAULT;
  1866. goto exit_free_qcedev_areq;
  1867. }
  1868. break;
  1869. case QCEDEV_IOCTL_SHA_INIT_REQ:
  1870. {
  1871. struct scatterlist sg_src;
  1872. if (copy_from_user(&qcedev_areq->sha_op_req,
  1873. (void __user *)arg,
  1874. sizeof(struct qcedev_sha_op_req))) {
  1875. err = -EFAULT;
  1876. goto exit_free_qcedev_areq;
  1877. }
  1878. mutex_lock(&hash_access_lock);
  1879. if (qcedev_check_sha_params(&qcedev_areq->sha_op_req, podev)) {
  1880. mutex_unlock(&hash_access_lock);
  1881. err = -EINVAL;
  1882. goto exit_free_qcedev_areq;
  1883. }
  1884. qcedev_areq->op_type = QCEDEV_CRYPTO_OPER_SHA;
  1885. err = qcedev_hash_init(qcedev_areq, handle, &sg_src);
  1886. if (err) {
  1887. mutex_unlock(&hash_access_lock);
  1888. goto exit_free_qcedev_areq;
  1889. }
  1890. mutex_unlock(&hash_access_lock);
  1891. if (copy_to_user((void __user *)arg, &qcedev_areq->sha_op_req,
  1892. sizeof(struct qcedev_sha_op_req))) {
  1893. err = -EFAULT;
  1894. goto exit_free_qcedev_areq;
  1895. }
  1896. handle->sha_ctxt.init_done = true;
  1897. }
  1898. break;
  1899. case QCEDEV_IOCTL_GET_CMAC_REQ:
  1900. if (!podev->ce_support.cmac) {
  1901. err = -ENOTTY;
  1902. goto exit_free_qcedev_areq;
  1903. }
  1904. /* Fall-through */
  1905. case QCEDEV_IOCTL_SHA_UPDATE_REQ:
  1906. {
  1907. struct scatterlist sg_src;
  1908. if (copy_from_user(&qcedev_areq->sha_op_req,
  1909. (void __user *)arg,
  1910. sizeof(struct qcedev_sha_op_req))) {
  1911. err = -EFAULT;
  1912. goto exit_free_qcedev_areq;
  1913. }
  1914. mutex_lock(&hash_access_lock);
  1915. if (qcedev_check_sha_params(&qcedev_areq->sha_op_req, podev)) {
  1916. mutex_unlock(&hash_access_lock);
  1917. err = -EINVAL;
  1918. goto exit_free_qcedev_areq;
  1919. }
  1920. qcedev_areq->op_type = QCEDEV_CRYPTO_OPER_SHA;
  1921. if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_AES_CMAC) {
  1922. err = qcedev_hash_cmac(qcedev_areq, handle, &sg_src);
  1923. if (err) {
  1924. mutex_unlock(&hash_access_lock);
  1925. goto exit_free_qcedev_areq;
  1926. }
  1927. } else {
  1928. if (!handle->sha_ctxt.init_done) {
  1929. pr_err("%s Init was not called\n", __func__);
  1930. mutex_unlock(&hash_access_lock);
  1931. err = -EINVAL;
  1932. goto exit_free_qcedev_areq;
  1933. }
  1934. err = qcedev_hash_update(qcedev_areq, handle, &sg_src);
  1935. if (err) {
  1936. mutex_unlock(&hash_access_lock);
  1937. goto exit_free_qcedev_areq;
  1938. }
  1939. }
  1940. if (handle->sha_ctxt.diglen > QCEDEV_MAX_SHA_DIGEST) {
  1941. pr_err("Invalid sha_ctxt.diglen %d\n",
  1942. handle->sha_ctxt.diglen);
  1943. mutex_unlock(&hash_access_lock);
  1944. err = -EINVAL;
  1945. goto exit_free_qcedev_areq;
  1946. }
  1947. memcpy(&qcedev_areq->sha_op_req.digest[0],
  1948. &handle->sha_ctxt.digest[0],
  1949. handle->sha_ctxt.diglen);
  1950. mutex_unlock(&hash_access_lock);
  1951. if (copy_to_user((void __user *)arg, &qcedev_areq->sha_op_req,
  1952. sizeof(struct qcedev_sha_op_req))) {
  1953. err = -EFAULT;
  1954. goto exit_free_qcedev_areq;
  1955. }
  1956. }
  1957. break;
  1958. case QCEDEV_IOCTL_SHA_FINAL_REQ:
  1959. if (!handle->sha_ctxt.init_done) {
  1960. pr_err("%s Init was not called\n", __func__);
  1961. err = -EINVAL;
  1962. goto exit_free_qcedev_areq;
  1963. }
  1964. if (copy_from_user(&qcedev_areq->sha_op_req,
  1965. (void __user *)arg,
  1966. sizeof(struct qcedev_sha_op_req))) {
  1967. err = -EFAULT;
  1968. goto exit_free_qcedev_areq;
  1969. }
  1970. mutex_lock(&hash_access_lock);
  1971. if (qcedev_check_sha_params(&qcedev_areq->sha_op_req, podev)) {
  1972. mutex_unlock(&hash_access_lock);
  1973. err = -EINVAL;
  1974. goto exit_free_qcedev_areq;
  1975. }
  1976. qcedev_areq->op_type = QCEDEV_CRYPTO_OPER_SHA;
  1977. err = qcedev_hash_final(qcedev_areq, handle);
  1978. if (err) {
  1979. mutex_unlock(&hash_access_lock);
  1980. goto exit_free_qcedev_areq;
  1981. }
  1982. if (handle->sha_ctxt.diglen > QCEDEV_MAX_SHA_DIGEST) {
  1983. pr_err("Invalid sha_ctxt.diglen %d\n",
  1984. handle->sha_ctxt.diglen);
  1985. mutex_unlock(&hash_access_lock);
  1986. err = -EINVAL;
  1987. goto exit_free_qcedev_areq;
  1988. }
  1989. qcedev_areq->sha_op_req.diglen = handle->sha_ctxt.diglen;
  1990. memcpy(&qcedev_areq->sha_op_req.digest[0],
  1991. &handle->sha_ctxt.digest[0],
  1992. handle->sha_ctxt.diglen);
  1993. mutex_unlock(&hash_access_lock);
  1994. if (copy_to_user((void __user *)arg, &qcedev_areq->sha_op_req,
  1995. sizeof(struct qcedev_sha_op_req))) {
  1996. err = -EFAULT;
  1997. goto exit_free_qcedev_areq;
  1998. }
  1999. handle->sha_ctxt.init_done = false;
  2000. break;
  2001. case QCEDEV_IOCTL_GET_SHA_REQ:
  2002. {
  2003. struct scatterlist sg_src;
  2004. if (copy_from_user(&qcedev_areq->sha_op_req,
  2005. (void __user *)arg,
  2006. sizeof(struct qcedev_sha_op_req))) {
  2007. err = -EFAULT;
  2008. goto exit_free_qcedev_areq;
  2009. }
  2010. mutex_lock(&hash_access_lock);
  2011. if (qcedev_check_sha_params(&qcedev_areq->sha_op_req, podev)) {
  2012. mutex_unlock(&hash_access_lock);
  2013. err = -EINVAL;
  2014. goto exit_free_qcedev_areq;
  2015. }
  2016. qcedev_areq->op_type = QCEDEV_CRYPTO_OPER_SHA;
  2017. qcedev_hash_init(qcedev_areq, handle, &sg_src);
  2018. err = qcedev_hash_update(qcedev_areq, handle, &sg_src);
  2019. if (err) {
  2020. mutex_unlock(&hash_access_lock);
  2021. goto exit_free_qcedev_areq;
  2022. }
  2023. err = qcedev_hash_final(qcedev_areq, handle);
  2024. if (err) {
  2025. mutex_unlock(&hash_access_lock);
  2026. goto exit_free_qcedev_areq;
  2027. }
  2028. if (handle->sha_ctxt.diglen > QCEDEV_MAX_SHA_DIGEST) {
  2029. pr_err("Invalid sha_ctxt.diglen %d\n",
  2030. handle->sha_ctxt.diglen);
  2031. mutex_unlock(&hash_access_lock);
  2032. err = -EINVAL;
  2033. goto exit_free_qcedev_areq;
  2034. }
  2035. qcedev_areq->sha_op_req.diglen = handle->sha_ctxt.diglen;
  2036. memcpy(&qcedev_areq->sha_op_req.digest[0],
  2037. &handle->sha_ctxt.digest[0],
  2038. handle->sha_ctxt.diglen);
  2039. mutex_unlock(&hash_access_lock);
  2040. if (copy_to_user((void __user *)arg, &qcedev_areq->sha_op_req,
  2041. sizeof(struct qcedev_sha_op_req))) {
  2042. err = -EFAULT;
  2043. goto exit_free_qcedev_areq;
  2044. }
  2045. }
  2046. break;
  2047. case QCEDEV_IOCTL_MAP_BUF_REQ:
  2048. {
  2049. unsigned long long vaddr = 0;
  2050. struct qcedev_map_buf_req map_buf = { {0} };
  2051. int i = 0;
  2052. if (copy_from_user(&map_buf,
  2053. (void __user *)arg, sizeof(map_buf))) {
  2054. err = -EFAULT;
  2055. goto exit_free_qcedev_areq;
  2056. }
  2057. if (map_buf.num_fds > QCEDEV_MAX_BUFFERS) {
  2058. err = -EINVAL;
  2059. goto exit_free_qcedev_areq;
  2060. }
  2061. for (i = 0; i < map_buf.num_fds; i++) {
  2062. err = qcedev_check_and_map_buffer(handle,
  2063. map_buf.fd[i],
  2064. map_buf.fd_offset[i],
  2065. map_buf.fd_size[i],
  2066. &vaddr);
  2067. if (err) {
  2068. pr_err(
  2069. "%s: err: failed to map fd(%d) - %d\n",
  2070. __func__, map_buf.fd[i], err);
  2071. goto exit_free_qcedev_areq;
  2072. }
  2073. map_buf.buf_vaddr[i] = vaddr;
  2074. pr_info("%s: info: vaddr = %llx\n, fd = %d",
  2075. __func__, vaddr, map_buf.fd[i]);
  2076. }
  2077. if (copy_to_user((void __user *)arg, &map_buf,
  2078. sizeof(map_buf))) {
  2079. err = -EFAULT;
  2080. goto exit_free_qcedev_areq;
  2081. }
  2082. break;
  2083. }
  2084. case QCEDEV_IOCTL_UNMAP_BUF_REQ:
  2085. {
  2086. struct qcedev_unmap_buf_req unmap_buf = { { 0 } };
  2087. int i = 0;
  2088. if (copy_from_user(&unmap_buf,
  2089. (void __user *)arg, sizeof(unmap_buf))) {
  2090. err = -EFAULT;
  2091. goto exit_free_qcedev_areq;
  2092. }
  2093. for (i = 0; i < unmap_buf.num_fds; i++) {
  2094. err = qcedev_check_and_unmap_buffer(handle,
  2095. unmap_buf.fd[i]);
  2096. if (err) {
  2097. pr_err(
  2098. "%s: err: failed to unmap fd(%d) - %d\n",
  2099. __func__,
  2100. unmap_buf.fd[i], err);
  2101. goto exit_free_qcedev_areq;
  2102. }
  2103. }
  2104. break;
  2105. }
  2106. default:
  2107. err = -ENOTTY;
  2108. goto exit_free_qcedev_areq;
  2109. }
  2110. exit_free_qcedev_areq:
  2111. if (cmd != QCEDEV_IOCTL_MAP_BUF_REQ &&
  2112. cmd != QCEDEV_IOCTL_UNMAP_BUF_REQ && podev != NULL)
  2113. qcedev_ce_high_bw_req(podev, false);
  2114. kfree(qcedev_areq);
  2115. return err;
  2116. }
  2117. static int qcedev_probe_device(struct platform_device *pdev)
  2118. {
  2119. void *handle = NULL;
  2120. int rc = 0;
  2121. struct qcedev_control *podev;
  2122. struct msm_ce_hw_support *platform_support;
  2123. podev = &qce_dev[0];
  2124. rc = alloc_chrdev_region(&qcedev_device_no, 0, 1, QCEDEV_DEV);
  2125. if (rc < 0) {
  2126. pr_err("alloc_chrdev_region failed %d\n", rc);
  2127. return rc;
  2128. }
  2129. driver_class = class_create(THIS_MODULE, QCEDEV_DEV);
  2130. if (IS_ERR(driver_class)) {
  2131. rc = -ENOMEM;
  2132. pr_err("class_create failed %d\n", rc);
  2133. goto exit_unreg_chrdev_region;
  2134. }
  2135. class_dev = device_create(driver_class, NULL, qcedev_device_no, NULL,
  2136. QCEDEV_DEV);
  2137. if (IS_ERR(class_dev)) {
  2138. pr_err("class_device_create failed %d\n", rc);
  2139. rc = -ENOMEM;
  2140. goto exit_destroy_class;
  2141. }
  2142. cdev_init(&podev->cdev, &qcedev_fops);
  2143. podev->cdev.owner = THIS_MODULE;
  2144. rc = cdev_add(&podev->cdev, MKDEV(MAJOR(qcedev_device_no), 0), 1);
  2145. if (rc < 0) {
  2146. pr_err("cdev_add failed %d\n", rc);
  2147. goto exit_destroy_device;
  2148. }
  2149. podev->minor = 0;
  2150. podev->high_bw_req_count = 0;
  2151. INIT_LIST_HEAD(&podev->ready_commands);
  2152. podev->active_command = NULL;
  2153. INIT_LIST_HEAD(&podev->context_banks);
  2154. spin_lock_init(&podev->lock);
  2155. tasklet_init(&podev->done_tasklet, req_done, (unsigned long)podev);
  2156. podev->icc_path = of_icc_get(&pdev->dev, "data_path");
  2157. if (IS_ERR(podev->icc_path)) {
  2158. rc = PTR_ERR(podev->icc_path);
  2159. pr_err("%s Failed to get icc path with error %d\n",
  2160. __func__, rc);
  2161. goto exit_del_cdev;
  2162. }
  2163. rc = icc_set_bw(podev->icc_path, CRYPTO_AVG_BW, CRYPTO_PEAK_BW);
  2164. if (rc) {
  2165. pr_err("%s Unable to set high bandwidth\n", __func__);
  2166. goto exit_unregister_bus_scale;
  2167. }
  2168. handle = qce_open(pdev, &rc);
  2169. if (handle == NULL) {
  2170. rc = -ENODEV;
  2171. goto exit_scale_busbandwidth;
  2172. }
  2173. rc = icc_set_bw(podev->icc_path, 0, 0);
  2174. if (rc) {
  2175. pr_err("%s Unable to set to low bandwidth\n", __func__);
  2176. goto exit_qce_close;
  2177. }
  2178. podev->qce = handle;
  2179. podev->pdev = pdev;
  2180. platform_set_drvdata(pdev, podev);
  2181. qce_hw_support(podev->qce, &podev->ce_support);
  2182. if (podev->ce_support.bam) {
  2183. podev->platform_support.ce_shared = 0;
  2184. podev->platform_support.shared_ce_resource = 0;
  2185. podev->platform_support.hw_key_support =
  2186. podev->ce_support.hw_key;
  2187. podev->platform_support.sha_hmac = 1;
  2188. } else {
  2189. platform_support =
  2190. (struct msm_ce_hw_support *)pdev->dev.platform_data;
  2191. podev->platform_support.ce_shared = platform_support->ce_shared;
  2192. podev->platform_support.shared_ce_resource =
  2193. platform_support->shared_ce_resource;
  2194. podev->platform_support.hw_key_support =
  2195. platform_support->hw_key_support;
  2196. podev->platform_support.sha_hmac = platform_support->sha_hmac;
  2197. }
  2198. podev->mem_client = qcedev_mem_new_client(MEM_ION);
  2199. if (!podev->mem_client) {
  2200. pr_err("%s: err: qcedev_mem_new_client failed\n", __func__);
  2201. goto exit_qce_close;
  2202. }
  2203. rc = of_platform_populate(pdev->dev.of_node, qcedev_match,
  2204. NULL, &pdev->dev);
  2205. if (rc) {
  2206. pr_err("%s: err: of_platform_populate failed: %d\n",
  2207. __func__, rc);
  2208. goto exit_mem_new_client;
  2209. }
  2210. return 0;
  2211. exit_mem_new_client:
  2212. if (podev->mem_client)
  2213. qcedev_mem_delete_client(podev->mem_client);
  2214. podev->mem_client = NULL;
  2215. exit_qce_close:
  2216. if (handle)
  2217. qce_close(handle);
  2218. exit_scale_busbandwidth:
  2219. icc_set_bw(podev->icc_path, 0, 0);
  2220. exit_unregister_bus_scale:
  2221. if (podev->icc_path)
  2222. icc_put(podev->icc_path);
  2223. exit_del_cdev:
  2224. cdev_del(&podev->cdev);
  2225. exit_destroy_device:
  2226. device_destroy(driver_class, qcedev_device_no);
  2227. exit_destroy_class:
  2228. class_destroy(driver_class);
  2229. exit_unreg_chrdev_region:
  2230. unregister_chrdev_region(qcedev_device_no, 1);
  2231. podev->icc_path = NULL;
  2232. platform_set_drvdata(pdev, NULL);
  2233. podev->pdev = NULL;
  2234. podev->qce = NULL;
  2235. return rc;
  2236. }
  2237. static int qcedev_probe(struct platform_device *pdev)
  2238. {
  2239. if (of_device_is_compatible(pdev->dev.of_node, "qcom,qcedev"))
  2240. return qcedev_probe_device(pdev);
  2241. else if (of_device_is_compatible(pdev->dev.of_node,
  2242. "qcom,qcedev,context-bank"))
  2243. return qcedev_parse_context_bank(pdev);
  2244. return -EINVAL;
  2245. };
  2246. static int qcedev_remove(struct platform_device *pdev)
  2247. {
  2248. struct qcedev_control *podev;
  2249. podev = platform_get_drvdata(pdev);
  2250. if (!podev)
  2251. return 0;
  2252. if (podev->qce)
  2253. qce_close(podev->qce);
  2254. if (podev->icc_path)
  2255. icc_put(podev->icc_path);
  2256. tasklet_kill(&podev->done_tasklet);
  2257. cdev_del(&podev->cdev);
  2258. device_destroy(driver_class, qcedev_device_no);
  2259. class_destroy(driver_class);
  2260. unregister_chrdev_region(qcedev_device_no, 1);
  2261. return 0;
  2262. };
  2263. static int qcedev_suspend(struct platform_device *pdev, pm_message_t state)
  2264. {
  2265. struct qcedev_control *podev;
  2266. int ret;
  2267. podev = platform_get_drvdata(pdev);
  2268. if (!podev)
  2269. return 0;
  2270. mutex_lock(&qcedev_sent_bw_req);
  2271. if (podev->high_bw_req_count) {
  2272. ret = qcedev_control_clocks(podev, false);
  2273. if (ret)
  2274. goto suspend_exit;
  2275. }
  2276. suspend_exit:
  2277. mutex_unlock(&qcedev_sent_bw_req);
  2278. return 0;
  2279. }
  2280. static int qcedev_resume(struct platform_device *pdev)
  2281. {
  2282. struct qcedev_control *podev;
  2283. int ret;
  2284. podev = platform_get_drvdata(pdev);
  2285. if (!podev)
  2286. return 0;
  2287. mutex_lock(&qcedev_sent_bw_req);
  2288. if (podev->high_bw_req_count) {
  2289. ret = qcedev_control_clocks(podev, true);
  2290. if (ret)
  2291. goto resume_exit;
  2292. }
  2293. resume_exit:
  2294. mutex_unlock(&qcedev_sent_bw_req);
  2295. return 0;
  2296. }
  2297. static struct platform_driver qcedev_plat_driver = {
  2298. .probe = qcedev_probe,
  2299. .remove = qcedev_remove,
  2300. .suspend = qcedev_suspend,
  2301. .resume = qcedev_resume,
  2302. .driver = {
  2303. .name = "qce",
  2304. .of_match_table = qcedev_match,
  2305. },
  2306. };
  2307. static int _disp_stats(int id)
  2308. {
  2309. struct qcedev_stat *pstat;
  2310. int len = 0;
  2311. pstat = &_qcedev_stat;
  2312. len = scnprintf(_debug_read_buf, DEBUG_MAX_RW_BUF - 1,
  2313. "\nQTI QCE dev driver %d Statistics:\n",
  2314. id + 1);
  2315. len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
  2316. " Encryption operation success : %d\n",
  2317. pstat->qcedev_enc_success);
  2318. len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
  2319. " Encryption operation fail : %d\n",
  2320. pstat->qcedev_enc_fail);
  2321. len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
  2322. " Decryption operation success : %d\n",
  2323. pstat->qcedev_dec_success);
  2324. len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
  2325. " Encryption operation fail : %d\n",
  2326. pstat->qcedev_dec_fail);
  2327. return len;
  2328. }
  2329. static ssize_t _debug_stats_read(struct file *file, char __user *buf,
  2330. size_t count, loff_t *ppos)
  2331. {
  2332. ssize_t rc = -EINVAL;
  2333. int qcedev = *((int *) file->private_data);
  2334. int len;
  2335. len = _disp_stats(qcedev);
  2336. if (len <= count)
  2337. rc = simple_read_from_buffer((void __user *) buf, len,
  2338. ppos, (void *) _debug_read_buf, len);
  2339. return rc;
  2340. }
  2341. static ssize_t _debug_stats_write(struct file *file, const char __user *buf,
  2342. size_t count, loff_t *ppos)
  2343. {
  2344. memset((char *)&_qcedev_stat, 0, sizeof(struct qcedev_stat));
  2345. return count;
  2346. };
  2347. static const struct file_operations _debug_stats_ops = {
  2348. .open = simple_open,
  2349. .read = _debug_stats_read,
  2350. .write = _debug_stats_write,
  2351. };
  2352. static int _qcedev_debug_init(void)
  2353. {
  2354. int rc;
  2355. char name[DEBUG_MAX_FNAME];
  2356. struct dentry *dent;
  2357. _debug_dent = debugfs_create_dir("qcedev", NULL);
  2358. if (IS_ERR(_debug_dent)) {
  2359. pr_debug("qcedev debugfs_create_dir fail, error %ld\n",
  2360. PTR_ERR(_debug_dent));
  2361. return PTR_ERR(_debug_dent);
  2362. }
  2363. snprintf(name, DEBUG_MAX_FNAME-1, "stats-%d", 1);
  2364. _debug_qcedev = 0;
  2365. dent = debugfs_create_file(name, 0644, _debug_dent,
  2366. &_debug_qcedev, &_debug_stats_ops);
  2367. if (dent == NULL) {
  2368. pr_debug("qcedev debugfs_create_file fail, error %ld\n",
  2369. PTR_ERR(dent));
  2370. rc = PTR_ERR(dent);
  2371. goto err;
  2372. }
  2373. return 0;
  2374. err:
  2375. debugfs_remove_recursive(_debug_dent);
  2376. return rc;
  2377. }
  2378. static int qcedev_init(void)
  2379. {
  2380. _qcedev_debug_init();
  2381. return platform_driver_register(&qcedev_plat_driver);
  2382. }
  2383. static void qcedev_exit(void)
  2384. {
  2385. debugfs_remove_recursive(_debug_dent);
  2386. platform_driver_unregister(&qcedev_plat_driver);
  2387. }
  2388. MODULE_LICENSE("GPL v2");
  2389. MODULE_DESCRIPTION("QTI DEV Crypto driver");
  2390. module_init(qcedev_init);
  2391. module_exit(qcedev_exit);