qcedev.c 71 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * QTI CE device driver.
  4. *
  5. * Copyright (c) 2010-2021, The Linux Foundation. All rights reserved.
  6. */
  7. #include <linux/mman.h>
  8. #include <linux/module.h>
  9. #include <linux/device.h>
  10. #include <linux/types.h>
  11. #include <linux/platform_device.h>
  12. #include <linux/dma-mapping.h>
  13. #include <linux/kernel.h>
  14. #include <linux/dmapool.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/spinlock.h>
  17. #include <linux/init.h>
  18. #include <linux/module.h>
  19. #include <linux/fs.h>
  20. #include <linux/uaccess.h>
  21. #include <linux/debugfs.h>
  22. #include <linux/scatterlist.h>
  23. #include <linux/crypto.h>
  24. #include "linux/platform_data/qcom_crypto_device.h"
  25. #include "linux/qcedev.h"
  26. #include <linux/interconnect.h>
  27. #include <crypto/hash.h>
  28. #include "qcedevi.h"
  29. #include "qce.h"
  30. #include "qcedev_smmu.h"
  31. #include "compat_qcedev.h"
  32. #include <linux/compat.h>
  33. #define CACHE_LINE_SIZE 64
  34. #define CE_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
  35. #define MAX_CEHW_REQ_TRANSFER_SIZE (128*32*1024)
  36. /*
  37. * Max wait time once a crypto request is done.
  38. * Assuming 5ms per crypto operation, this is calculated for
  39. * the scenario of having 3 offload reqs + 1 tz req + buffer.
  40. */
  41. #define MAX_CRYPTO_WAIT_TIME 25
  42. static uint8_t _std_init_vector_sha1_uint8[] = {
  43. 0x67, 0x45, 0x23, 0x01, 0xEF, 0xCD, 0xAB, 0x89,
  44. 0x98, 0xBA, 0xDC, 0xFE, 0x10, 0x32, 0x54, 0x76,
  45. 0xC3, 0xD2, 0xE1, 0xF0
  46. };
  47. /* standard initialization vector for SHA-256, source: FIPS 180-2 */
  48. static uint8_t _std_init_vector_sha256_uint8[] = {
  49. 0x6A, 0x09, 0xE6, 0x67, 0xBB, 0x67, 0xAE, 0x85,
  50. 0x3C, 0x6E, 0xF3, 0x72, 0xA5, 0x4F, 0xF5, 0x3A,
  51. 0x51, 0x0E, 0x52, 0x7F, 0x9B, 0x05, 0x68, 0x8C,
  52. 0x1F, 0x83, 0xD9, 0xAB, 0x5B, 0xE0, 0xCD, 0x19
  53. };
  54. #define QCEDEV_CTX_KEY_MASK 0x000000ff
  55. #define QCEDEV_CTX_USE_HW_KEY 0x00000001
  56. #define QCEDEV_CTX_USE_PIPE_KEY 0x00000002
  57. // Key timer expiry for pipes 1-15 (Status3)
  58. #define PIPE_KEY_TIMER_EXPIRED_STATUS3_MASK 0x000000FF
  59. // Key timer expiry for pipes 16-19 (Status6)
  60. #define PIPE_KEY_TIMER_EXPIRED_STATUS6_MASK 0x00000003
  61. // Key pause for pipes 1-15 (Status3)
  62. #define PIPE_KEY_PAUSE_STATUS3_MASK 0xFF0000
  63. // Key pause for pipes 16-19 (Status6)
  64. #define PIPE_KEY_PAUSE_STATUS6_MASK 0x30000
  65. #define QCEDEV_STATUS1_ERR_INTR_MASK 0x10
  66. static DEFINE_MUTEX(send_cmd_lock);
  67. static DEFINE_MUTEX(qcedev_sent_bw_req);
  68. static DEFINE_MUTEX(hash_access_lock);
  69. static dev_t qcedev_device_no;
  70. static struct class *driver_class;
  71. static struct device *class_dev;
  72. static const struct of_device_id qcedev_match[] = {
  73. { .compatible = "qcom,qcedev"},
  74. { .compatible = "qcom,qcedev,context-bank"},
  75. {}
  76. };
  77. MODULE_DEVICE_TABLE(of, qcedev_match);
  78. static int qcedev_control_clocks(struct qcedev_control *podev, bool enable)
  79. {
  80. unsigned int control_flag;
  81. int ret = 0;
  82. if (podev->ce_support.req_bw_before_clk) {
  83. if (enable)
  84. control_flag = QCE_BW_REQUEST_FIRST;
  85. else
  86. control_flag = QCE_CLK_DISABLE_FIRST;
  87. } else {
  88. if (enable)
  89. control_flag = QCE_CLK_ENABLE_FIRST;
  90. else
  91. control_flag = QCE_BW_REQUEST_RESET_FIRST;
  92. }
  93. switch (control_flag) {
  94. case QCE_CLK_ENABLE_FIRST:
  95. ret = qce_enable_clk(podev->qce);
  96. if (ret) {
  97. pr_err("%s Unable enable clk\n", __func__);
  98. return ret;
  99. }
  100. ret = icc_set_bw(podev->icc_path,
  101. CRYPTO_AVG_BW, CRYPTO_PEAK_BW);
  102. if (ret) {
  103. pr_err("%s Unable to set high bw\n", __func__);
  104. ret = qce_disable_clk(podev->qce);
  105. if (ret)
  106. pr_err("%s Unable disable clk\n", __func__);
  107. return ret;
  108. }
  109. break;
  110. case QCE_BW_REQUEST_FIRST:
  111. ret = icc_set_bw(podev->icc_path,
  112. CRYPTO_AVG_BW, CRYPTO_PEAK_BW);
  113. if (ret) {
  114. pr_err("%s Unable to set high bw\n", __func__);
  115. return ret;
  116. }
  117. ret = qce_enable_clk(podev->qce);
  118. if (ret) {
  119. pr_err("%s Unable enable clk\n", __func__);
  120. ret = icc_set_bw(podev->icc_path, 0, 0);
  121. if (ret)
  122. pr_err("%s Unable to set low bw\n", __func__);
  123. return ret;
  124. }
  125. break;
  126. case QCE_CLK_DISABLE_FIRST:
  127. ret = qce_disable_clk(podev->qce);
  128. if (ret) {
  129. pr_err("%s Unable to disable clk\n", __func__);
  130. return ret;
  131. }
  132. ret = icc_set_bw(podev->icc_path, 0, 0);
  133. if (ret) {
  134. pr_err("%s Unable to set low bw\n", __func__);
  135. ret = qce_enable_clk(podev->qce);
  136. if (ret)
  137. pr_err("%s Unable enable clk\n", __func__);
  138. return ret;
  139. }
  140. break;
  141. case QCE_BW_REQUEST_RESET_FIRST:
  142. ret = icc_set_bw(podev->icc_path, 0, 0);
  143. if (ret) {
  144. pr_err("%s Unable to set low bw\n", __func__);
  145. return ret;
  146. }
  147. ret = qce_disable_clk(podev->qce);
  148. if (ret) {
  149. pr_err("%s Unable to disable clk\n", __func__);
  150. ret = icc_set_bw(podev->icc_path,
  151. CRYPTO_AVG_BW, CRYPTO_PEAK_BW);
  152. if (ret)
  153. pr_err("%s Unable to set high bw\n", __func__);
  154. return ret;
  155. }
  156. break;
  157. default:
  158. return -ENOENT;
  159. }
  160. return 0;
  161. }
  162. static void qcedev_ce_high_bw_req(struct qcedev_control *podev,
  163. bool high_bw_req)
  164. {
  165. int ret = 0;
  166. mutex_lock(&qcedev_sent_bw_req);
  167. if (high_bw_req) {
  168. if (podev->high_bw_req_count == 0) {
  169. ret = qcedev_control_clocks(podev, true);
  170. if (ret)
  171. goto exit_unlock_mutex;
  172. }
  173. podev->high_bw_req_count++;
  174. } else {
  175. if (podev->high_bw_req_count == 1) {
  176. ret = qcedev_control_clocks(podev, false);
  177. if (ret)
  178. goto exit_unlock_mutex;
  179. }
  180. podev->high_bw_req_count--;
  181. }
  182. exit_unlock_mutex:
  183. mutex_unlock(&qcedev_sent_bw_req);
  184. }
  185. #define QCEDEV_MAGIC 0x56434544 /* "qced" */
  186. static int qcedev_open(struct inode *inode, struct file *file);
  187. static int qcedev_release(struct inode *inode, struct file *file);
  188. static int start_cipher_req(struct qcedev_control *podev,
  189. int *current_req_info);
  190. static int start_offload_cipher_req(struct qcedev_control *podev,
  191. int *current_req_info);
  192. static int start_sha_req(struct qcedev_control *podev,
  193. int *current_req_info);
  194. static const struct file_operations qcedev_fops = {
  195. .owner = THIS_MODULE,
  196. .unlocked_ioctl = qcedev_ioctl,
  197. #ifdef CONFIG_COMPAT
  198. .compat_ioctl = compat_qcedev_ioctl,
  199. #endif
  200. .open = qcedev_open,
  201. .release = qcedev_release,
  202. };
  203. static struct qcedev_control qce_dev[] = {
  204. {
  205. .magic = QCEDEV_MAGIC,
  206. },
  207. };
  208. #define MAX_QCE_DEVICE ARRAY_SIZE(qce_dev)
  209. #define DEBUG_MAX_FNAME 16
  210. #define DEBUG_MAX_RW_BUF 1024
  211. struct qcedev_stat {
  212. u32 qcedev_dec_success;
  213. u32 qcedev_dec_fail;
  214. u32 qcedev_enc_success;
  215. u32 qcedev_enc_fail;
  216. u32 qcedev_sha_success;
  217. u32 qcedev_sha_fail;
  218. };
  219. static struct qcedev_stat _qcedev_stat;
  220. static struct dentry *_debug_dent;
  221. static char _debug_read_buf[DEBUG_MAX_RW_BUF];
  222. static int _debug_qcedev;
  223. static struct qcedev_control *qcedev_minor_to_control(unsigned int n)
  224. {
  225. int i;
  226. for (i = 0; i < MAX_QCE_DEVICE; i++) {
  227. if (qce_dev[i].minor == n)
  228. return &qce_dev[n];
  229. }
  230. return NULL;
  231. }
  232. static int qcedev_open(struct inode *inode, struct file *file)
  233. {
  234. struct qcedev_handle *handle;
  235. struct qcedev_control *podev;
  236. podev = qcedev_minor_to_control(MINOR(inode->i_rdev));
  237. if (podev == NULL) {
  238. pr_err("%s: no such device %d\n", __func__,
  239. MINOR(inode->i_rdev));
  240. return -ENOENT;
  241. }
  242. handle = kzalloc(sizeof(struct qcedev_handle), GFP_KERNEL);
  243. if (handle == NULL)
  244. return -ENOMEM;
  245. handle->cntl = podev;
  246. file->private_data = handle;
  247. qcedev_ce_high_bw_req(podev, true);
  248. mutex_init(&handle->registeredbufs.lock);
  249. INIT_LIST_HEAD(&handle->registeredbufs.list);
  250. return 0;
  251. }
  252. static int qcedev_release(struct inode *inode, struct file *file)
  253. {
  254. struct qcedev_control *podev;
  255. struct qcedev_handle *handle;
  256. handle = file->private_data;
  257. podev = handle->cntl;
  258. if (podev != NULL && podev->magic != QCEDEV_MAGIC) {
  259. pr_err("%s: invalid handle %pK\n",
  260. __func__, podev);
  261. }
  262. qcedev_ce_high_bw_req(podev, false);
  263. if (qcedev_unmap_all_buffers(handle))
  264. pr_err("%s: failed to unmap all ion buffers\n", __func__);
  265. kfree_sensitive(handle);
  266. file->private_data = NULL;
  267. return 0;
  268. }
  269. static void req_done(unsigned long data)
  270. {
  271. struct qcedev_control *podev = (struct qcedev_control *)data;
  272. struct qcedev_async_req *areq;
  273. unsigned long flags = 0;
  274. struct qcedev_async_req *new_req = NULL;
  275. int ret = 0;
  276. int current_req_info = 0;
  277. spin_lock_irqsave(&podev->lock, flags);
  278. areq = podev->active_command;
  279. podev->active_command = NULL;
  280. again:
  281. if (!list_empty(&podev->ready_commands)) {
  282. new_req = container_of(podev->ready_commands.next,
  283. struct qcedev_async_req, list);
  284. list_del(&new_req->list);
  285. podev->active_command = new_req;
  286. new_req->err = 0;
  287. if (new_req->op_type == QCEDEV_CRYPTO_OPER_CIPHER)
  288. ret = start_cipher_req(podev, &current_req_info);
  289. else if (new_req->op_type == QCEDEV_CRYPTO_OPER_OFFLOAD_CIPHER)
  290. ret = start_offload_cipher_req(podev, &current_req_info);
  291. else
  292. ret = start_sha_req(podev, &current_req_info);
  293. }
  294. spin_unlock_irqrestore(&podev->lock, flags);
  295. if (areq)
  296. complete(&areq->complete);
  297. if (new_req && ret) {
  298. complete(&new_req->complete);
  299. spin_lock_irqsave(&podev->lock, flags);
  300. podev->active_command = NULL;
  301. areq = NULL;
  302. ret = 0;
  303. new_req = NULL;
  304. goto again;
  305. }
  306. }
  307. void qcedev_sha_req_cb(void *cookie, unsigned char *digest,
  308. unsigned char *authdata, int ret)
  309. {
  310. struct qcedev_sha_req *areq;
  311. struct qcedev_control *pdev;
  312. struct qcedev_handle *handle;
  313. uint32_t *auth32 = (uint32_t *)authdata;
  314. areq = (struct qcedev_sha_req *) cookie;
  315. if (!areq || !areq->cookie)
  316. return;
  317. handle = (struct qcedev_handle *) areq->cookie;
  318. pdev = handle->cntl;
  319. if (!pdev)
  320. return;
  321. if (digest)
  322. memcpy(&handle->sha_ctxt.digest[0], digest, 32);
  323. if (authdata) {
  324. handle->sha_ctxt.auth_data[0] = auth32[0];
  325. handle->sha_ctxt.auth_data[1] = auth32[1];
  326. }
  327. tasklet_schedule(&pdev->done_tasklet);
  328. };
  329. void qcedev_cipher_req_cb(void *cookie, unsigned char *icv,
  330. unsigned char *iv, int ret)
  331. {
  332. struct qcedev_cipher_req *areq;
  333. struct qcedev_handle *handle;
  334. struct qcedev_control *podev;
  335. struct qcedev_async_req *qcedev_areq;
  336. areq = (struct qcedev_cipher_req *) cookie;
  337. if (!areq || !areq->cookie)
  338. return;
  339. handle = (struct qcedev_handle *) areq->cookie;
  340. podev = handle->cntl;
  341. if (!podev)
  342. return;
  343. qcedev_areq = podev->active_command;
  344. if (iv)
  345. memcpy(&qcedev_areq->cipher_op_req.iv[0], iv,
  346. qcedev_areq->cipher_op_req.ivlen);
  347. tasklet_schedule(&podev->done_tasklet);
  348. };
  349. static int start_cipher_req(struct qcedev_control *podev,
  350. int *current_req_info)
  351. {
  352. struct qcedev_async_req *qcedev_areq;
  353. struct qce_req creq;
  354. int ret = 0;
  355. /* start the command on the podev->active_command */
  356. qcedev_areq = podev->active_command;
  357. qcedev_areq->cipher_req.cookie = qcedev_areq->handle;
  358. if (qcedev_areq->cipher_op_req.use_pmem == QCEDEV_USE_PMEM) {
  359. pr_err("%s: Use of PMEM is not supported\n", __func__);
  360. goto unsupported;
  361. }
  362. creq.pmem = NULL;
  363. switch (qcedev_areq->cipher_op_req.alg) {
  364. case QCEDEV_ALG_DES:
  365. creq.alg = CIPHER_ALG_DES;
  366. break;
  367. case QCEDEV_ALG_3DES:
  368. creq.alg = CIPHER_ALG_3DES;
  369. break;
  370. case QCEDEV_ALG_AES:
  371. creq.alg = CIPHER_ALG_AES;
  372. break;
  373. default:
  374. return -EINVAL;
  375. }
  376. switch (qcedev_areq->cipher_op_req.mode) {
  377. case QCEDEV_AES_MODE_CBC:
  378. case QCEDEV_DES_MODE_CBC:
  379. creq.mode = QCE_MODE_CBC;
  380. break;
  381. case QCEDEV_AES_MODE_ECB:
  382. case QCEDEV_DES_MODE_ECB:
  383. creq.mode = QCE_MODE_ECB;
  384. break;
  385. case QCEDEV_AES_MODE_CTR:
  386. creq.mode = QCE_MODE_CTR;
  387. break;
  388. case QCEDEV_AES_MODE_XTS:
  389. creq.mode = QCE_MODE_XTS;
  390. break;
  391. default:
  392. return -EINVAL;
  393. }
  394. if ((creq.alg == CIPHER_ALG_AES) &&
  395. (creq.mode == QCE_MODE_CTR)) {
  396. creq.dir = QCE_ENCRYPT;
  397. } else {
  398. if (qcedev_areq->cipher_op_req.op == QCEDEV_OPER_ENC)
  399. creq.dir = QCE_ENCRYPT;
  400. else
  401. creq.dir = QCE_DECRYPT;
  402. }
  403. creq.iv = &qcedev_areq->cipher_op_req.iv[0];
  404. creq.ivsize = qcedev_areq->cipher_op_req.ivlen;
  405. creq.enckey = &qcedev_areq->cipher_op_req.enckey[0];
  406. creq.encklen = qcedev_areq->cipher_op_req.encklen;
  407. creq.cryptlen = qcedev_areq->cipher_op_req.data_len;
  408. if (qcedev_areq->cipher_op_req.encklen == 0) {
  409. if ((qcedev_areq->cipher_op_req.op == QCEDEV_OPER_ENC_NO_KEY)
  410. || (qcedev_areq->cipher_op_req.op ==
  411. QCEDEV_OPER_DEC_NO_KEY))
  412. creq.op = QCE_REQ_ABLK_CIPHER_NO_KEY;
  413. else {
  414. int i;
  415. for (i = 0; i < QCEDEV_MAX_KEY_SIZE; i++) {
  416. if (qcedev_areq->cipher_op_req.enckey[i] != 0)
  417. break;
  418. }
  419. if ((podev->platform_support.hw_key_support == 1) &&
  420. (i == QCEDEV_MAX_KEY_SIZE))
  421. creq.op = QCE_REQ_ABLK_CIPHER;
  422. else {
  423. ret = -EINVAL;
  424. goto unsupported;
  425. }
  426. }
  427. } else {
  428. creq.op = QCE_REQ_ABLK_CIPHER;
  429. }
  430. creq.qce_cb = qcedev_cipher_req_cb;
  431. creq.areq = (void *)&qcedev_areq->cipher_req;
  432. creq.flags = 0;
  433. creq.offload_op = 0;
  434. ret = qce_ablk_cipher_req(podev->qce, &creq);
  435. *current_req_info = creq.current_req_info;
  436. unsupported:
  437. qcedev_areq->err = ret ? -ENXIO : 0;
  438. return ret;
  439. };
  440. void qcedev_offload_cipher_req_cb(void *cookie, unsigned char *icv,
  441. unsigned char *iv, int ret)
  442. {
  443. struct qcedev_cipher_req *areq;
  444. struct qcedev_handle *handle;
  445. struct qcedev_control *podev;
  446. struct qcedev_async_req *qcedev_areq;
  447. areq = (struct qcedev_cipher_req *) cookie;
  448. if (!areq || !areq->cookie)
  449. return;
  450. handle = (struct qcedev_handle *) areq->cookie;
  451. podev = handle->cntl;
  452. if (!podev)
  453. return;
  454. qcedev_areq = podev->active_command;
  455. if (iv)
  456. memcpy(&qcedev_areq->offload_cipher_op_req.iv[0], iv,
  457. qcedev_areq->offload_cipher_op_req.ivlen);
  458. tasklet_schedule(&podev->done_tasklet);
  459. }
  460. static int start_offload_cipher_req(struct qcedev_control *podev,
  461. int *current_req_info)
  462. {
  463. struct qcedev_async_req *qcedev_areq;
  464. struct qce_req creq;
  465. u8 patt_sz = 0, proc_data_sz = 0;
  466. int ret = 0;
  467. memset(&creq, 0, sizeof(creq));
  468. /* Start the command on the podev->active_command */
  469. qcedev_areq = podev->active_command;
  470. qcedev_areq->cipher_req.cookie = qcedev_areq->handle;
  471. switch (qcedev_areq->offload_cipher_op_req.alg) {
  472. case QCEDEV_ALG_AES:
  473. creq.alg = CIPHER_ALG_AES;
  474. break;
  475. default:
  476. return -EINVAL;
  477. }
  478. switch (qcedev_areq->offload_cipher_op_req.mode) {
  479. case QCEDEV_AES_MODE_CBC:
  480. creq.mode = QCE_MODE_CBC;
  481. break;
  482. case QCEDEV_AES_MODE_CTR:
  483. creq.mode = QCE_MODE_CTR;
  484. break;
  485. default:
  486. return -EINVAL;
  487. }
  488. if (qcedev_areq->offload_cipher_op_req.is_copy_op) {
  489. creq.dir = QCE_ENCRYPT;
  490. } else {
  491. switch(qcedev_areq->offload_cipher_op_req.op) {
  492. case QCEDEV_OFFLOAD_HLOS_HLOS:
  493. case QCEDEV_OFFLOAD_HLOS_CPB:
  494. creq.dir = QCE_DECRYPT;
  495. break;
  496. case QCEDEV_OFFLOAD_CPB_HLOS:
  497. creq.dir = QCE_ENCRYPT;
  498. break;
  499. default:
  500. return -EINVAL;
  501. }
  502. }
  503. creq.iv = &qcedev_areq->offload_cipher_op_req.iv[0];
  504. creq.ivsize = qcedev_areq->offload_cipher_op_req.ivlen;
  505. creq.iv_ctr_size = qcedev_areq->offload_cipher_op_req.iv_ctr_size;
  506. creq.encklen = qcedev_areq->offload_cipher_op_req.encklen;
  507. /* OFFLOAD use cases use PIPE keys so no need to set keys */
  508. creq.flags = QCEDEV_CTX_USE_PIPE_KEY;
  509. creq.op = QCE_REQ_ABLK_CIPHER_NO_KEY;
  510. creq.offload_op = (int)qcedev_areq->offload_cipher_op_req.op;
  511. if (qcedev_areq->offload_cipher_op_req.is_copy_op)
  512. creq.is_copy_op = true;
  513. creq.cryptlen = qcedev_areq->offload_cipher_op_req.data_len;
  514. creq.qce_cb = qcedev_offload_cipher_req_cb;
  515. creq.areq = (void *)&qcedev_areq->cipher_req;
  516. patt_sz = qcedev_areq->offload_cipher_op_req.pattern_info.patt_sz;
  517. proc_data_sz =
  518. qcedev_areq->offload_cipher_op_req.pattern_info.proc_data_sz;
  519. creq.is_pattern_valid =
  520. qcedev_areq->offload_cipher_op_req.is_pattern_valid;
  521. if (creq.is_pattern_valid) {
  522. creq.pattern_info = 0x1;
  523. if (patt_sz)
  524. creq.pattern_info |= (patt_sz - 1) << 4;
  525. if (proc_data_sz)
  526. creq.pattern_info |= (proc_data_sz - 1) << 8;
  527. creq.pattern_info |=
  528. qcedev_areq->offload_cipher_op_req.pattern_info.patt_offset << 12;
  529. }
  530. creq.block_offset = qcedev_areq->offload_cipher_op_req.block_offset;
  531. ret = qce_ablk_cipher_req(podev->qce, &creq);
  532. *current_req_info = creq.current_req_info;
  533. qcedev_areq->err = ret ? -ENXIO : 0;
  534. return ret;
  535. }
  536. static int start_sha_req(struct qcedev_control *podev,
  537. int *current_req_info)
  538. {
  539. struct qcedev_async_req *qcedev_areq;
  540. struct qce_sha_req sreq;
  541. int ret = 0;
  542. struct qcedev_handle *handle;
  543. /* start the command on the podev->active_command */
  544. qcedev_areq = podev->active_command;
  545. handle = qcedev_areq->handle;
  546. switch (qcedev_areq->sha_op_req.alg) {
  547. case QCEDEV_ALG_SHA1:
  548. sreq.alg = QCE_HASH_SHA1;
  549. break;
  550. case QCEDEV_ALG_SHA256:
  551. sreq.alg = QCE_HASH_SHA256;
  552. break;
  553. case QCEDEV_ALG_SHA1_HMAC:
  554. if (podev->ce_support.sha_hmac) {
  555. sreq.alg = QCE_HASH_SHA1_HMAC;
  556. sreq.authkey = &handle->sha_ctxt.authkey[0];
  557. sreq.authklen = QCEDEV_MAX_SHA_BLOCK_SIZE;
  558. } else {
  559. sreq.alg = QCE_HASH_SHA1;
  560. sreq.authkey = NULL;
  561. }
  562. break;
  563. case QCEDEV_ALG_SHA256_HMAC:
  564. if (podev->ce_support.sha_hmac) {
  565. sreq.alg = QCE_HASH_SHA256_HMAC;
  566. sreq.authkey = &handle->sha_ctxt.authkey[0];
  567. sreq.authklen = QCEDEV_MAX_SHA_BLOCK_SIZE;
  568. } else {
  569. sreq.alg = QCE_HASH_SHA256;
  570. sreq.authkey = NULL;
  571. }
  572. break;
  573. case QCEDEV_ALG_AES_CMAC:
  574. sreq.alg = QCE_HASH_AES_CMAC;
  575. sreq.authkey = &handle->sha_ctxt.authkey[0];
  576. sreq.authklen = qcedev_areq->sha_op_req.authklen;
  577. break;
  578. default:
  579. pr_err("Algorithm %d not supported, exiting\n",
  580. qcedev_areq->sha_op_req.alg);
  581. return -EINVAL;
  582. }
  583. qcedev_areq->sha_req.cookie = handle;
  584. sreq.qce_cb = qcedev_sha_req_cb;
  585. if (qcedev_areq->sha_op_req.alg != QCEDEV_ALG_AES_CMAC) {
  586. sreq.auth_data[0] = handle->sha_ctxt.auth_data[0];
  587. sreq.auth_data[1] = handle->sha_ctxt.auth_data[1];
  588. sreq.auth_data[2] = handle->sha_ctxt.auth_data[2];
  589. sreq.auth_data[3] = handle->sha_ctxt.auth_data[3];
  590. sreq.digest = &handle->sha_ctxt.digest[0];
  591. sreq.first_blk = handle->sha_ctxt.first_blk;
  592. sreq.last_blk = handle->sha_ctxt.last_blk;
  593. }
  594. sreq.size = qcedev_areq->sha_req.sreq.nbytes;
  595. sreq.src = qcedev_areq->sha_req.sreq.src;
  596. sreq.areq = (void *)&qcedev_areq->sha_req;
  597. sreq.flags = 0;
  598. ret = qce_process_sha_req(podev->qce, &sreq);
  599. *current_req_info = sreq.current_req_info;
  600. qcedev_areq->err = ret ? -ENXIO : 0;
  601. return ret;
  602. };
  603. static void qcedev_check_crypto_status(
  604. struct qcedev_async_req *qcedev_areq, void *handle,
  605. bool print_err)
  606. {
  607. unsigned int s1, s2, s3, s4, s5, s6;
  608. qcedev_areq->offload_cipher_op_req.err = QCEDEV_OFFLOAD_NO_ERROR;
  609. qce_get_crypto_status(handle, &s1, &s2, &s3, &s4, &s5, &s6);
  610. if (print_err) {
  611. pr_err("%s: sts = 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", __func__,
  612. s1, s2, s3, s4, s5, s6);
  613. }
  614. // Check for key timer expiry
  615. if ((s6 & PIPE_KEY_TIMER_EXPIRED_STATUS6_MASK) ||
  616. (s3 & PIPE_KEY_TIMER_EXPIRED_STATUS3_MASK)) {
  617. pr_info("%s: crypto timer expired\n", __func__);
  618. pr_info("%s: sts = 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", __func__,
  619. s1, s2, s3, s4, s5, s6);
  620. qcedev_areq->offload_cipher_op_req.err =
  621. QCEDEV_OFFLOAD_KEY_TIMER_EXPIRED_ERROR;
  622. return;
  623. }
  624. // Check for key pause
  625. if ((s6 & PIPE_KEY_PAUSE_STATUS6_MASK) ||
  626. (s3 & PIPE_KEY_PAUSE_STATUS3_MASK)) {
  627. pr_info("%s: crypto key paused\n", __func__);
  628. pr_info("%s: sts = 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", __func__,
  629. s1, s2, s3, s4, s5, s6);
  630. qcedev_areq->offload_cipher_op_req.err =
  631. QCEDEV_OFFLOAD_KEY_PAUSE_ERROR;
  632. return;
  633. }
  634. // Check for generic error
  635. if (s1 & QCEDEV_STATUS1_ERR_INTR_MASK) {
  636. pr_err("%s: generic crypto error\n", __func__);
  637. pr_info("%s: sts = 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", __func__,
  638. s1, s2, s3, s4, s5, s6);
  639. qcedev_areq->offload_cipher_op_req.err =
  640. QCEDEV_OFFLOAD_GENERIC_ERROR;
  641. return;
  642. }
  643. }
  644. static int submit_req(struct qcedev_async_req *qcedev_areq,
  645. struct qcedev_handle *handle)
  646. {
  647. struct qcedev_control *podev;
  648. unsigned long flags = 0;
  649. int ret = 0;
  650. struct qcedev_stat *pstat;
  651. int current_req_info = 0;
  652. int wait = MAX_CRYPTO_WAIT_TIME;
  653. bool print_sts = false;
  654. qcedev_areq->err = 0;
  655. podev = handle->cntl;
  656. spin_lock_irqsave(&podev->lock, flags);
  657. if (podev->active_command == NULL) {
  658. podev->active_command = qcedev_areq;
  659. if (qcedev_areq->op_type == QCEDEV_CRYPTO_OPER_CIPHER)
  660. ret = start_cipher_req(podev, &current_req_info);
  661. else if (qcedev_areq->op_type == QCEDEV_CRYPTO_OPER_OFFLOAD_CIPHER)
  662. ret = start_offload_cipher_req(podev, &current_req_info);
  663. else
  664. ret = start_sha_req(podev, &current_req_info);
  665. } else {
  666. list_add_tail(&qcedev_areq->list, &podev->ready_commands);
  667. }
  668. if (ret != 0)
  669. podev->active_command = NULL;
  670. spin_unlock_irqrestore(&podev->lock, flags);
  671. if (ret == 0)
  672. wait = wait_for_completion_timeout(&qcedev_areq->complete,
  673. msecs_to_jiffies(MAX_CRYPTO_WAIT_TIME));
  674. if (!wait) {
  675. /*
  676. * This means wait timed out, and the callback routine was not
  677. * exercised. The callback sequence does some housekeeping which
  678. * would be missed here, hence having a call to qce here to do
  679. * that.
  680. */
  681. pr_err("%s: wait timed out, req info = %d\n", __func__,
  682. current_req_info);
  683. print_sts = true;
  684. qcedev_check_crypto_status(qcedev_areq, podev->qce, print_sts);
  685. qce_manage_timeout(podev->qce, current_req_info);
  686. if (qcedev_areq->offload_cipher_op_req.err !=
  687. QCEDEV_OFFLOAD_NO_ERROR)
  688. return 0;
  689. }
  690. if (ret)
  691. qcedev_areq->err = -EIO;
  692. pstat = &_qcedev_stat;
  693. if (qcedev_areq->op_type == QCEDEV_CRYPTO_OPER_CIPHER) {
  694. switch (qcedev_areq->cipher_op_req.op) {
  695. case QCEDEV_OPER_DEC:
  696. if (qcedev_areq->err)
  697. pstat->qcedev_dec_fail++;
  698. else
  699. pstat->qcedev_dec_success++;
  700. break;
  701. case QCEDEV_OPER_ENC:
  702. if (qcedev_areq->err)
  703. pstat->qcedev_enc_fail++;
  704. else
  705. pstat->qcedev_enc_success++;
  706. break;
  707. default:
  708. break;
  709. }
  710. } else if (qcedev_areq->op_type == QCEDEV_CRYPTO_OPER_OFFLOAD_CIPHER) {
  711. //Do nothing
  712. } else {
  713. if (qcedev_areq->err)
  714. pstat->qcedev_sha_fail++;
  715. else
  716. pstat->qcedev_sha_success++;
  717. }
  718. return qcedev_areq->err;
  719. }
  720. static int qcedev_sha_init(struct qcedev_async_req *areq,
  721. struct qcedev_handle *handle)
  722. {
  723. struct qcedev_sha_ctxt *sha_ctxt = &handle->sha_ctxt;
  724. memset(sha_ctxt, 0, sizeof(struct qcedev_sha_ctxt));
  725. sha_ctxt->first_blk = 1;
  726. if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA1) ||
  727. (areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC)) {
  728. memcpy(&sha_ctxt->digest[0],
  729. &_std_init_vector_sha1_uint8[0], SHA1_DIGEST_SIZE);
  730. sha_ctxt->diglen = SHA1_DIGEST_SIZE;
  731. } else {
  732. if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA256) ||
  733. (areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC)) {
  734. memcpy(&sha_ctxt->digest[0],
  735. &_std_init_vector_sha256_uint8[0],
  736. SHA256_DIGEST_SIZE);
  737. sha_ctxt->diglen = SHA256_DIGEST_SIZE;
  738. }
  739. }
  740. sha_ctxt->init_done = true;
  741. return 0;
  742. }
  743. static int qcedev_sha_update_max_xfer(struct qcedev_async_req *qcedev_areq,
  744. struct qcedev_handle *handle,
  745. struct scatterlist *sg_src)
  746. {
  747. int err = 0;
  748. int i = 0;
  749. uint32_t total;
  750. uint8_t *user_src = NULL;
  751. uint8_t *k_src = NULL;
  752. uint8_t *k_buf_src = NULL;
  753. uint8_t *k_align_src = NULL;
  754. uint32_t sha_pad_len = 0;
  755. uint32_t trailing_buf_len = 0;
  756. uint32_t t_buf = handle->sha_ctxt.trailing_buf_len;
  757. uint32_t sha_block_size;
  758. total = qcedev_areq->sha_op_req.data_len + t_buf;
  759. if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1)
  760. sha_block_size = SHA1_BLOCK_SIZE;
  761. else
  762. sha_block_size = SHA256_BLOCK_SIZE;
  763. if (total <= sha_block_size) {
  764. uint32_t len = qcedev_areq->sha_op_req.data_len;
  765. i = 0;
  766. k_src = &handle->sha_ctxt.trailing_buf[t_buf];
  767. /* Copy data from user src(s) */
  768. while (len > 0) {
  769. user_src = qcedev_areq->sha_op_req.data[i].vaddr;
  770. if (user_src && copy_from_user(k_src,
  771. (void __user *)user_src,
  772. qcedev_areq->sha_op_req.data[i].len))
  773. return -EFAULT;
  774. len -= qcedev_areq->sha_op_req.data[i].len;
  775. k_src += qcedev_areq->sha_op_req.data[i].len;
  776. i++;
  777. }
  778. handle->sha_ctxt.trailing_buf_len = total;
  779. return 0;
  780. }
  781. k_buf_src = kmalloc(total + CACHE_LINE_SIZE * 2,
  782. GFP_KERNEL);
  783. if (k_buf_src == NULL)
  784. return -ENOMEM;
  785. k_align_src = (uint8_t *)ALIGN(((uintptr_t)k_buf_src),
  786. CACHE_LINE_SIZE);
  787. k_src = k_align_src;
  788. /* check for trailing buffer from previous updates and append it */
  789. if (t_buf > 0) {
  790. memcpy(k_src, &handle->sha_ctxt.trailing_buf[0],
  791. t_buf);
  792. k_src += t_buf;
  793. }
  794. /* Copy data from user src(s) */
  795. user_src = qcedev_areq->sha_op_req.data[0].vaddr;
  796. if (user_src && copy_from_user(k_src,
  797. (void __user *)user_src,
  798. qcedev_areq->sha_op_req.data[0].len)) {
  799. memset(k_buf_src, 0, ksize((void *)k_buf_src));
  800. kfree(k_buf_src);
  801. return -EFAULT;
  802. }
  803. k_src += qcedev_areq->sha_op_req.data[0].len;
  804. for (i = 1; i < qcedev_areq->sha_op_req.entries; i++) {
  805. user_src = qcedev_areq->sha_op_req.data[i].vaddr;
  806. if (user_src && copy_from_user(k_src,
  807. (void __user *)user_src,
  808. qcedev_areq->sha_op_req.data[i].len)) {
  809. memset(k_buf_src, 0, ksize((void *)k_buf_src));
  810. kfree(k_buf_src);
  811. return -EFAULT;
  812. }
  813. k_src += qcedev_areq->sha_op_req.data[i].len;
  814. }
  815. /* get new trailing buffer */
  816. sha_pad_len = ALIGN(total, CE_SHA_BLOCK_SIZE) - total;
  817. trailing_buf_len = CE_SHA_BLOCK_SIZE - sha_pad_len;
  818. qcedev_areq->sha_req.sreq.src = sg_src;
  819. sg_init_one(qcedev_areq->sha_req.sreq.src, k_align_src,
  820. total-trailing_buf_len);
  821. qcedev_areq->sha_req.sreq.nbytes = total - trailing_buf_len;
  822. /* update sha_ctxt trailing buf content to new trailing buf */
  823. if (trailing_buf_len > 0) {
  824. memset(&handle->sha_ctxt.trailing_buf[0], 0, 64);
  825. memcpy(&handle->sha_ctxt.trailing_buf[0],
  826. (k_src - trailing_buf_len),
  827. trailing_buf_len);
  828. }
  829. handle->sha_ctxt.trailing_buf_len = trailing_buf_len;
  830. err = submit_req(qcedev_areq, handle);
  831. handle->sha_ctxt.last_blk = 0;
  832. handle->sha_ctxt.first_blk = 0;
  833. memset(k_buf_src, 0, ksize((void *)k_buf_src));
  834. kfree(k_buf_src);
  835. return err;
  836. }
  837. static int qcedev_sha_update(struct qcedev_async_req *qcedev_areq,
  838. struct qcedev_handle *handle,
  839. struct scatterlist *sg_src)
  840. {
  841. int err = 0;
  842. int i = 0;
  843. int j = 0;
  844. int k = 0;
  845. int num_entries = 0;
  846. uint32_t total = 0;
  847. if (!handle->sha_ctxt.init_done) {
  848. pr_err("%s Init was not called\n", __func__);
  849. return -EINVAL;
  850. }
  851. if (qcedev_areq->sha_op_req.data_len > QCE_MAX_OPER_DATA) {
  852. struct qcedev_sha_op_req *saved_req;
  853. struct qcedev_sha_op_req req;
  854. struct qcedev_sha_op_req *sreq = &qcedev_areq->sha_op_req;
  855. /* save the original req structure */
  856. saved_req =
  857. kmalloc(sizeof(struct qcedev_sha_op_req), GFP_KERNEL);
  858. if (saved_req == NULL) {
  859. pr_err("%s:Can't Allocate mem:saved_req 0x%lx\n",
  860. __func__, (uintptr_t)saved_req);
  861. return -ENOMEM;
  862. }
  863. memcpy(&req, sreq, sizeof(struct qcedev_sha_op_req));
  864. memcpy(saved_req, sreq, sizeof(struct qcedev_sha_op_req));
  865. i = 0;
  866. /* Address 32 KB at a time */
  867. while ((i < req.entries) && (err == 0)) {
  868. if (sreq->data[i].len > QCE_MAX_OPER_DATA) {
  869. sreq->data[0].len = QCE_MAX_OPER_DATA;
  870. if (i > 0) {
  871. sreq->data[0].vaddr =
  872. sreq->data[i].vaddr;
  873. }
  874. sreq->data_len = QCE_MAX_OPER_DATA;
  875. sreq->entries = 1;
  876. err = qcedev_sha_update_max_xfer(qcedev_areq,
  877. handle, sg_src);
  878. sreq->data[i].len = req.data[i].len -
  879. QCE_MAX_OPER_DATA;
  880. sreq->data[i].vaddr = req.data[i].vaddr +
  881. QCE_MAX_OPER_DATA;
  882. req.data[i].vaddr = sreq->data[i].vaddr;
  883. req.data[i].len = sreq->data[i].len;
  884. } else {
  885. total = 0;
  886. for (j = i; j < req.entries; j++) {
  887. num_entries++;
  888. if ((total + sreq->data[j].len) >=
  889. QCE_MAX_OPER_DATA) {
  890. sreq->data[j].len =
  891. (QCE_MAX_OPER_DATA - total);
  892. total = QCE_MAX_OPER_DATA;
  893. break;
  894. }
  895. total += sreq->data[j].len;
  896. }
  897. sreq->data_len = total;
  898. if (i > 0)
  899. for (k = 0; k < num_entries; k++) {
  900. sreq->data[k].len =
  901. sreq->data[i+k].len;
  902. sreq->data[k].vaddr =
  903. sreq->data[i+k].vaddr;
  904. }
  905. sreq->entries = num_entries;
  906. i = j;
  907. err = qcedev_sha_update_max_xfer(qcedev_areq,
  908. handle, sg_src);
  909. num_entries = 0;
  910. sreq->data[i].vaddr = req.data[i].vaddr +
  911. sreq->data[i].len;
  912. sreq->data[i].len = req.data[i].len -
  913. sreq->data[i].len;
  914. req.data[i].vaddr = sreq->data[i].vaddr;
  915. req.data[i].len = sreq->data[i].len;
  916. if (sreq->data[i].len == 0)
  917. i++;
  918. }
  919. } /* end of while ((i < req.entries) && (err == 0)) */
  920. /* Restore the original req structure */
  921. for (i = 0; i < saved_req->entries; i++) {
  922. sreq->data[i].len = saved_req->data[i].len;
  923. sreq->data[i].vaddr = saved_req->data[i].vaddr;
  924. }
  925. sreq->entries = saved_req->entries;
  926. sreq->data_len = saved_req->data_len;
  927. memset(saved_req, 0, ksize((void *)saved_req));
  928. kfree(saved_req);
  929. } else
  930. err = qcedev_sha_update_max_xfer(qcedev_areq, handle, sg_src);
  931. return err;
  932. }
  933. static int qcedev_sha_final(struct qcedev_async_req *qcedev_areq,
  934. struct qcedev_handle *handle)
  935. {
  936. int err = 0;
  937. struct scatterlist sg_src;
  938. uint32_t total;
  939. uint8_t *k_buf_src = NULL;
  940. uint8_t *k_align_src = NULL;
  941. if (!handle->sha_ctxt.init_done) {
  942. pr_err("%s Init was not called\n", __func__);
  943. return -EINVAL;
  944. }
  945. handle->sha_ctxt.last_blk = 1;
  946. total = handle->sha_ctxt.trailing_buf_len;
  947. k_buf_src = kmalloc(total + CACHE_LINE_SIZE * 2,
  948. GFP_KERNEL);
  949. if (k_buf_src == NULL)
  950. return -ENOMEM;
  951. k_align_src = (uint8_t *)ALIGN(((uintptr_t)k_buf_src),
  952. CACHE_LINE_SIZE);
  953. memcpy(k_align_src, &handle->sha_ctxt.trailing_buf[0], total);
  954. qcedev_areq->sha_req.sreq.src = (struct scatterlist *) &sg_src;
  955. sg_init_one(qcedev_areq->sha_req.sreq.src, k_align_src, total);
  956. qcedev_areq->sha_req.sreq.nbytes = total;
  957. err = submit_req(qcedev_areq, handle);
  958. handle->sha_ctxt.first_blk = 0;
  959. handle->sha_ctxt.last_blk = 0;
  960. handle->sha_ctxt.auth_data[0] = 0;
  961. handle->sha_ctxt.auth_data[1] = 0;
  962. handle->sha_ctxt.trailing_buf_len = 0;
  963. handle->sha_ctxt.init_done = false;
  964. memset(&handle->sha_ctxt.trailing_buf[0], 0, 64);
  965. memset(k_buf_src, 0, ksize((void *)k_buf_src));
  966. kfree(k_buf_src);
  967. qcedev_areq->sha_req.sreq.src = NULL;
  968. return err;
  969. }
  970. static int qcedev_hash_cmac(struct qcedev_async_req *qcedev_areq,
  971. struct qcedev_handle *handle,
  972. struct scatterlist *sg_src)
  973. {
  974. int err = 0;
  975. int i = 0;
  976. uint32_t total;
  977. uint8_t *user_src = NULL;
  978. uint8_t *k_src = NULL;
  979. uint8_t *k_buf_src = NULL;
  980. total = qcedev_areq->sha_op_req.data_len;
  981. if ((qcedev_areq->sha_op_req.authklen != QCEDEV_AES_KEY_128) &&
  982. (qcedev_areq->sha_op_req.authklen != QCEDEV_AES_KEY_256)) {
  983. pr_err("%s: unsupported key length\n", __func__);
  984. return -EINVAL;
  985. }
  986. if (copy_from_user(&handle->sha_ctxt.authkey[0],
  987. (void __user *)qcedev_areq->sha_op_req.authkey,
  988. qcedev_areq->sha_op_req.authklen))
  989. return -EFAULT;
  990. if (total > U32_MAX - CACHE_LINE_SIZE * 2)
  991. return -EINVAL;
  992. k_buf_src = kmalloc(total + CACHE_LINE_SIZE * 2, GFP_KERNEL);
  993. if (k_buf_src == NULL)
  994. return -ENOMEM;
  995. k_src = k_buf_src;
  996. /* Copy data from user src(s) */
  997. user_src = qcedev_areq->sha_op_req.data[0].vaddr;
  998. for (i = 0; i < qcedev_areq->sha_op_req.entries; i++) {
  999. user_src = qcedev_areq->sha_op_req.data[i].vaddr;
  1000. if (user_src && copy_from_user(k_src, (void __user *)user_src,
  1001. qcedev_areq->sha_op_req.data[i].len)) {
  1002. memset(k_buf_src, 0, ksize((void *)k_buf_src));
  1003. kfree(k_buf_src);
  1004. return -EFAULT;
  1005. }
  1006. k_src += qcedev_areq->sha_op_req.data[i].len;
  1007. }
  1008. qcedev_areq->sha_req.sreq.src = sg_src;
  1009. sg_init_one(qcedev_areq->sha_req.sreq.src, k_buf_src, total);
  1010. qcedev_areq->sha_req.sreq.nbytes = total;
  1011. handle->sha_ctxt.diglen = qcedev_areq->sha_op_req.diglen;
  1012. err = submit_req(qcedev_areq, handle);
  1013. memset(k_buf_src, 0, ksize((void *)k_buf_src));
  1014. kfree(k_buf_src);
  1015. return err;
  1016. }
  1017. static int qcedev_set_hmac_auth_key(struct qcedev_async_req *areq,
  1018. struct qcedev_handle *handle,
  1019. struct scatterlist *sg_src)
  1020. {
  1021. int err = 0;
  1022. if (areq->sha_op_req.authklen <= QCEDEV_MAX_KEY_SIZE) {
  1023. qcedev_sha_init(areq, handle);
  1024. if (copy_from_user(&handle->sha_ctxt.authkey[0],
  1025. (void __user *)areq->sha_op_req.authkey,
  1026. areq->sha_op_req.authklen))
  1027. return -EFAULT;
  1028. } else {
  1029. struct qcedev_async_req authkey_areq;
  1030. uint8_t authkey[QCEDEV_MAX_SHA_BLOCK_SIZE];
  1031. init_completion(&authkey_areq.complete);
  1032. authkey_areq.sha_op_req.entries = 1;
  1033. authkey_areq.sha_op_req.data[0].vaddr =
  1034. areq->sha_op_req.authkey;
  1035. authkey_areq.sha_op_req.data[0].len = areq->sha_op_req.authklen;
  1036. authkey_areq.sha_op_req.data_len = areq->sha_op_req.authklen;
  1037. authkey_areq.sha_op_req.diglen = 0;
  1038. authkey_areq.handle = handle;
  1039. memset(&authkey_areq.sha_op_req.digest[0], 0,
  1040. QCEDEV_MAX_SHA_DIGEST);
  1041. if (areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC)
  1042. authkey_areq.sha_op_req.alg = QCEDEV_ALG_SHA1;
  1043. if (areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC)
  1044. authkey_areq.sha_op_req.alg = QCEDEV_ALG_SHA256;
  1045. authkey_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
  1046. qcedev_sha_init(&authkey_areq, handle);
  1047. err = qcedev_sha_update(&authkey_areq, handle, sg_src);
  1048. if (!err)
  1049. err = qcedev_sha_final(&authkey_areq, handle);
  1050. else
  1051. return err;
  1052. memcpy(&authkey[0], &handle->sha_ctxt.digest[0],
  1053. handle->sha_ctxt.diglen);
  1054. qcedev_sha_init(areq, handle);
  1055. memcpy(&handle->sha_ctxt.authkey[0], &authkey[0],
  1056. handle->sha_ctxt.diglen);
  1057. }
  1058. return err;
  1059. }
  1060. static int qcedev_hmac_get_ohash(struct qcedev_async_req *qcedev_areq,
  1061. struct qcedev_handle *handle)
  1062. {
  1063. int err = 0;
  1064. struct scatterlist sg_src;
  1065. uint8_t *k_src = NULL;
  1066. uint32_t sha_block_size = 0;
  1067. uint32_t sha_digest_size = 0;
  1068. if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC) {
  1069. sha_digest_size = SHA1_DIGEST_SIZE;
  1070. sha_block_size = SHA1_BLOCK_SIZE;
  1071. } else {
  1072. if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC) {
  1073. sha_digest_size = SHA256_DIGEST_SIZE;
  1074. sha_block_size = SHA256_BLOCK_SIZE;
  1075. }
  1076. }
  1077. k_src = kmalloc(sha_block_size, GFP_KERNEL);
  1078. if (k_src == NULL)
  1079. return -ENOMEM;
  1080. /* check for trailing buffer from previous updates and append it */
  1081. memcpy(k_src, &handle->sha_ctxt.trailing_buf[0],
  1082. handle->sha_ctxt.trailing_buf_len);
  1083. qcedev_areq->sha_req.sreq.src = (struct scatterlist *) &sg_src;
  1084. sg_init_one(qcedev_areq->sha_req.sreq.src, k_src, sha_block_size);
  1085. qcedev_areq->sha_req.sreq.nbytes = sha_block_size;
  1086. memset(&handle->sha_ctxt.trailing_buf[0], 0, sha_block_size);
  1087. memcpy(&handle->sha_ctxt.trailing_buf[0], &handle->sha_ctxt.digest[0],
  1088. sha_digest_size);
  1089. handle->sha_ctxt.trailing_buf_len = sha_digest_size;
  1090. handle->sha_ctxt.first_blk = 1;
  1091. handle->sha_ctxt.last_blk = 0;
  1092. handle->sha_ctxt.auth_data[0] = 0;
  1093. handle->sha_ctxt.auth_data[1] = 0;
  1094. if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC) {
  1095. memcpy(&handle->sha_ctxt.digest[0],
  1096. &_std_init_vector_sha1_uint8[0], SHA1_DIGEST_SIZE);
  1097. handle->sha_ctxt.diglen = SHA1_DIGEST_SIZE;
  1098. }
  1099. if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC) {
  1100. memcpy(&handle->sha_ctxt.digest[0],
  1101. &_std_init_vector_sha256_uint8[0], SHA256_DIGEST_SIZE);
  1102. handle->sha_ctxt.diglen = SHA256_DIGEST_SIZE;
  1103. }
  1104. err = submit_req(qcedev_areq, handle);
  1105. handle->sha_ctxt.last_blk = 0;
  1106. handle->sha_ctxt.first_blk = 0;
  1107. memset(k_src, 0, ksize((void *)k_src));
  1108. kfree(k_src);
  1109. qcedev_areq->sha_req.sreq.src = NULL;
  1110. return err;
  1111. }
  1112. static int qcedev_hmac_update_iokey(struct qcedev_async_req *areq,
  1113. struct qcedev_handle *handle, bool ikey)
  1114. {
  1115. int i;
  1116. uint32_t constant;
  1117. uint32_t sha_block_size;
  1118. if (ikey)
  1119. constant = 0x36;
  1120. else
  1121. constant = 0x5c;
  1122. if (areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC)
  1123. sha_block_size = SHA1_BLOCK_SIZE;
  1124. else
  1125. sha_block_size = SHA256_BLOCK_SIZE;
  1126. memset(&handle->sha_ctxt.trailing_buf[0], 0, sha_block_size);
  1127. for (i = 0; i < sha_block_size; i++)
  1128. handle->sha_ctxt.trailing_buf[i] =
  1129. (handle->sha_ctxt.authkey[i] ^ constant);
  1130. handle->sha_ctxt.trailing_buf_len = sha_block_size;
  1131. return 0;
  1132. }
  1133. static int qcedev_hmac_init(struct qcedev_async_req *areq,
  1134. struct qcedev_handle *handle,
  1135. struct scatterlist *sg_src)
  1136. {
  1137. int err;
  1138. struct qcedev_control *podev = handle->cntl;
  1139. err = qcedev_set_hmac_auth_key(areq, handle, sg_src);
  1140. if (err)
  1141. return err;
  1142. if (!podev->ce_support.sha_hmac)
  1143. qcedev_hmac_update_iokey(areq, handle, true);
  1144. return 0;
  1145. }
  1146. static int qcedev_hmac_final(struct qcedev_async_req *areq,
  1147. struct qcedev_handle *handle)
  1148. {
  1149. int err;
  1150. struct qcedev_control *podev = handle->cntl;
  1151. err = qcedev_sha_final(areq, handle);
  1152. if (podev->ce_support.sha_hmac)
  1153. return err;
  1154. qcedev_hmac_update_iokey(areq, handle, false);
  1155. err = qcedev_hmac_get_ohash(areq, handle);
  1156. if (err)
  1157. return err;
  1158. err = qcedev_sha_final(areq, handle);
  1159. return err;
  1160. }
  1161. static int qcedev_hash_init(struct qcedev_async_req *areq,
  1162. struct qcedev_handle *handle,
  1163. struct scatterlist *sg_src)
  1164. {
  1165. if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA1) ||
  1166. (areq->sha_op_req.alg == QCEDEV_ALG_SHA256))
  1167. return qcedev_sha_init(areq, handle);
  1168. else
  1169. return qcedev_hmac_init(areq, handle, sg_src);
  1170. }
  1171. static int qcedev_hash_update(struct qcedev_async_req *qcedev_areq,
  1172. struct qcedev_handle *handle,
  1173. struct scatterlist *sg_src)
  1174. {
  1175. return qcedev_sha_update(qcedev_areq, handle, sg_src);
  1176. }
  1177. static int qcedev_hash_final(struct qcedev_async_req *areq,
  1178. struct qcedev_handle *handle)
  1179. {
  1180. if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA1) ||
  1181. (areq->sha_op_req.alg == QCEDEV_ALG_SHA256))
  1182. return qcedev_sha_final(areq, handle);
  1183. else
  1184. return qcedev_hmac_final(areq, handle);
  1185. }
  1186. static int qcedev_vbuf_ablk_cipher_max_xfer(struct qcedev_async_req *areq,
  1187. int *di, struct qcedev_handle *handle,
  1188. uint8_t *k_align_src)
  1189. {
  1190. int err = 0;
  1191. int i = 0;
  1192. int dst_i = *di;
  1193. struct scatterlist sg_src;
  1194. uint32_t byteoffset = 0;
  1195. uint8_t *user_src = NULL;
  1196. uint8_t *k_align_dst = k_align_src;
  1197. struct qcedev_cipher_op_req *creq = &areq->cipher_op_req;
  1198. if (areq->cipher_op_req.mode == QCEDEV_AES_MODE_CTR)
  1199. byteoffset = areq->cipher_op_req.byteoffset;
  1200. user_src = areq->cipher_op_req.vbuf.src[0].vaddr;
  1201. if (user_src && copy_from_user((k_align_src + byteoffset),
  1202. (void __user *)user_src,
  1203. areq->cipher_op_req.vbuf.src[0].len))
  1204. return -EFAULT;
  1205. k_align_src += byteoffset + areq->cipher_op_req.vbuf.src[0].len;
  1206. for (i = 1; i < areq->cipher_op_req.entries; i++) {
  1207. user_src = areq->cipher_op_req.vbuf.src[i].vaddr;
  1208. if (user_src && copy_from_user(k_align_src,
  1209. (void __user *)user_src,
  1210. areq->cipher_op_req.vbuf.src[i].len)) {
  1211. return -EFAULT;
  1212. }
  1213. k_align_src += areq->cipher_op_req.vbuf.src[i].len;
  1214. }
  1215. /* restore src beginning */
  1216. k_align_src = k_align_dst;
  1217. areq->cipher_op_req.data_len += byteoffset;
  1218. areq->cipher_req.creq.src = (struct scatterlist *) &sg_src;
  1219. areq->cipher_req.creq.dst = (struct scatterlist *) &sg_src;
  1220. /* In place encryption/decryption */
  1221. sg_init_one(areq->cipher_req.creq.src,
  1222. k_align_dst,
  1223. areq->cipher_op_req.data_len);
  1224. areq->cipher_req.creq.cryptlen = areq->cipher_op_req.data_len;
  1225. areq->cipher_req.creq.iv = areq->cipher_op_req.iv;
  1226. areq->cipher_op_req.entries = 1;
  1227. err = submit_req(areq, handle);
  1228. /* copy data to destination buffer*/
  1229. creq->data_len -= byteoffset;
  1230. while (creq->data_len > 0) {
  1231. if (creq->vbuf.dst[dst_i].len <= creq->data_len) {
  1232. if (err == 0 && copy_to_user(
  1233. (void __user *)creq->vbuf.dst[dst_i].vaddr,
  1234. (k_align_dst + byteoffset),
  1235. creq->vbuf.dst[dst_i].len)) {
  1236. err = -EFAULT;
  1237. goto exit;
  1238. }
  1239. k_align_dst += creq->vbuf.dst[dst_i].len;
  1240. creq->data_len -= creq->vbuf.dst[dst_i].len;
  1241. dst_i++;
  1242. } else {
  1243. if (err == 0 && copy_to_user(
  1244. (void __user *)creq->vbuf.dst[dst_i].vaddr,
  1245. (k_align_dst + byteoffset),
  1246. creq->data_len)) {
  1247. err = -EFAULT;
  1248. goto exit;
  1249. }
  1250. k_align_dst += creq->data_len;
  1251. creq->vbuf.dst[dst_i].len -= creq->data_len;
  1252. creq->vbuf.dst[dst_i].vaddr += creq->data_len;
  1253. creq->data_len = 0;
  1254. }
  1255. }
  1256. *di = dst_i;
  1257. exit:
  1258. areq->cipher_req.creq.src = NULL;
  1259. areq->cipher_req.creq.dst = NULL;
  1260. return err;
  1261. };
  1262. static int qcedev_vbuf_ablk_cipher(struct qcedev_async_req *areq,
  1263. struct qcedev_handle *handle)
  1264. {
  1265. int err = 0;
  1266. int di = 0;
  1267. int i = 0;
  1268. int j = 0;
  1269. int k = 0;
  1270. uint32_t byteoffset = 0;
  1271. int num_entries = 0;
  1272. uint32_t total = 0;
  1273. uint32_t len;
  1274. uint8_t *k_buf_src = NULL;
  1275. uint8_t *k_align_src = NULL;
  1276. uint32_t max_data_xfer;
  1277. struct qcedev_cipher_op_req *saved_req;
  1278. struct qcedev_cipher_op_req *creq = &areq->cipher_op_req;
  1279. total = 0;
  1280. if (areq->cipher_op_req.mode == QCEDEV_AES_MODE_CTR)
  1281. byteoffset = areq->cipher_op_req.byteoffset;
  1282. k_buf_src = kmalloc(QCE_MAX_OPER_DATA + CACHE_LINE_SIZE * 2,
  1283. GFP_KERNEL);
  1284. if (k_buf_src == NULL)
  1285. return -ENOMEM;
  1286. k_align_src = (uint8_t *)ALIGN(((uintptr_t)k_buf_src),
  1287. CACHE_LINE_SIZE);
  1288. max_data_xfer = QCE_MAX_OPER_DATA - byteoffset;
  1289. saved_req = kmemdup(creq, sizeof(struct qcedev_cipher_op_req),
  1290. GFP_KERNEL);
  1291. if (saved_req == NULL) {
  1292. memset(k_buf_src, 0, ksize((void *)k_buf_src));
  1293. kfree(k_buf_src);
  1294. return -ENOMEM;
  1295. }
  1296. if (areq->cipher_op_req.data_len > max_data_xfer) {
  1297. struct qcedev_cipher_op_req req;
  1298. /* save the original req structure */
  1299. memcpy(&req, creq, sizeof(struct qcedev_cipher_op_req));
  1300. i = 0;
  1301. /* Address 32 KB at a time */
  1302. while ((i < req.entries) && (err == 0)) {
  1303. if (creq->vbuf.src[i].len > max_data_xfer) {
  1304. creq->vbuf.src[0].len = max_data_xfer;
  1305. if (i > 0) {
  1306. creq->vbuf.src[0].vaddr =
  1307. creq->vbuf.src[i].vaddr;
  1308. }
  1309. creq->data_len = max_data_xfer;
  1310. creq->entries = 1;
  1311. err = qcedev_vbuf_ablk_cipher_max_xfer(areq,
  1312. &di, handle, k_align_src);
  1313. if (err < 0) {
  1314. memset(saved_req, 0,
  1315. ksize((void *)saved_req));
  1316. memset(k_buf_src, 0,
  1317. ksize((void *)k_buf_src));
  1318. kfree(k_buf_src);
  1319. kfree(saved_req);
  1320. return err;
  1321. }
  1322. creq->vbuf.src[i].len = req.vbuf.src[i].len -
  1323. max_data_xfer;
  1324. creq->vbuf.src[i].vaddr =
  1325. req.vbuf.src[i].vaddr +
  1326. max_data_xfer;
  1327. req.vbuf.src[i].vaddr =
  1328. creq->vbuf.src[i].vaddr;
  1329. req.vbuf.src[i].len = creq->vbuf.src[i].len;
  1330. } else {
  1331. total = areq->cipher_op_req.byteoffset;
  1332. for (j = i; j < req.entries; j++) {
  1333. num_entries++;
  1334. if ((total + creq->vbuf.src[j].len)
  1335. >= max_data_xfer) {
  1336. creq->vbuf.src[j].len =
  1337. max_data_xfer - total;
  1338. total = max_data_xfer;
  1339. break;
  1340. }
  1341. total += creq->vbuf.src[j].len;
  1342. }
  1343. creq->data_len = total;
  1344. if (i > 0)
  1345. for (k = 0; k < num_entries; k++) {
  1346. creq->vbuf.src[k].len =
  1347. creq->vbuf.src[i+k].len;
  1348. creq->vbuf.src[k].vaddr =
  1349. creq->vbuf.src[i+k].vaddr;
  1350. }
  1351. creq->entries = num_entries;
  1352. i = j;
  1353. err = qcedev_vbuf_ablk_cipher_max_xfer(areq,
  1354. &di, handle, k_align_src);
  1355. if (err < 0) {
  1356. memset(saved_req, 0,
  1357. ksize((void *)saved_req));
  1358. memset(k_buf_src, 0,
  1359. ksize((void *)k_buf_src));
  1360. kfree(k_buf_src);
  1361. kfree(saved_req);
  1362. return err;
  1363. }
  1364. num_entries = 0;
  1365. areq->cipher_op_req.byteoffset = 0;
  1366. creq->vbuf.src[i].vaddr = req.vbuf.src[i].vaddr
  1367. + creq->vbuf.src[i].len;
  1368. creq->vbuf.src[i].len = req.vbuf.src[i].len -
  1369. creq->vbuf.src[i].len;
  1370. req.vbuf.src[i].vaddr =
  1371. creq->vbuf.src[i].vaddr;
  1372. req.vbuf.src[i].len = creq->vbuf.src[i].len;
  1373. if (creq->vbuf.src[i].len == 0)
  1374. i++;
  1375. }
  1376. areq->cipher_op_req.byteoffset = 0;
  1377. max_data_xfer = QCE_MAX_OPER_DATA;
  1378. byteoffset = 0;
  1379. } /* end of while ((i < req.entries) && (err == 0)) */
  1380. } else
  1381. err = qcedev_vbuf_ablk_cipher_max_xfer(areq, &di, handle,
  1382. k_align_src);
  1383. /* Restore the original req structure */
  1384. for (i = 0; i < saved_req->entries; i++) {
  1385. creq->vbuf.src[i].len = saved_req->vbuf.src[i].len;
  1386. creq->vbuf.src[i].vaddr = saved_req->vbuf.src[i].vaddr;
  1387. }
  1388. for (len = 0, i = 0; len < saved_req->data_len; i++) {
  1389. creq->vbuf.dst[i].len = saved_req->vbuf.dst[i].len;
  1390. creq->vbuf.dst[i].vaddr = saved_req->vbuf.dst[i].vaddr;
  1391. len += saved_req->vbuf.dst[i].len;
  1392. }
  1393. creq->entries = saved_req->entries;
  1394. creq->data_len = saved_req->data_len;
  1395. creq->byteoffset = saved_req->byteoffset;
  1396. memset(saved_req, 0, ksize((void *)saved_req));
  1397. memset(k_buf_src, 0, ksize((void *)k_buf_src));
  1398. kfree(saved_req);
  1399. kfree(k_buf_src);
  1400. return err;
  1401. }
  1402. static int qcedev_smmu_ablk_offload_cipher(struct qcedev_async_req *areq,
  1403. struct qcedev_handle *handle)
  1404. {
  1405. int i = 0;
  1406. int err = 0;
  1407. size_t byteoffset = 0;
  1408. size_t transfer_data_len = 0;
  1409. size_t pending_data_len = 0;
  1410. size_t max_data_xfer = MAX_CEHW_REQ_TRANSFER_SIZE - byteoffset;
  1411. uint8_t *user_src = NULL;
  1412. uint8_t *user_dst = NULL;
  1413. struct scatterlist sg_src;
  1414. struct scatterlist sg_dst;
  1415. if (areq->offload_cipher_op_req.mode == QCEDEV_AES_MODE_CTR)
  1416. byteoffset = areq->offload_cipher_op_req.byteoffset;
  1417. /*
  1418. * areq has two components:
  1419. * a) Request that comes from userspace i.e. offload_cipher_op_req
  1420. * b) Request that QCE understands - skcipher i.e. cipher_req.creq
  1421. * skcipher has sglist pointers src and dest that would carry
  1422. * data to/from CE.
  1423. */
  1424. areq->cipher_req.creq.src = &sg_src;
  1425. areq->cipher_req.creq.dst = &sg_dst;
  1426. sg_init_table(&sg_src, 1);
  1427. sg_init_table(&sg_dst, 1);
  1428. for (i = 0; i < areq->offload_cipher_op_req.entries; i++) {
  1429. transfer_data_len = 0;
  1430. pending_data_len = areq->offload_cipher_op_req.vbuf.src[i].len;
  1431. user_src = areq->offload_cipher_op_req.vbuf.src[i].vaddr;
  1432. user_src += byteoffset;
  1433. user_dst = areq->offload_cipher_op_req.vbuf.dst[i].vaddr;
  1434. user_dst += byteoffset;
  1435. areq->cipher_req.creq.iv = areq->offload_cipher_op_req.iv;
  1436. while (pending_data_len) {
  1437. transfer_data_len = min(max_data_xfer,
  1438. pending_data_len);
  1439. sg_src.dma_address = (dma_addr_t)user_src;
  1440. sg_dst.dma_address = (dma_addr_t)user_dst;
  1441. areq->cipher_req.creq.cryptlen = transfer_data_len;
  1442. sg_src.length = transfer_data_len;
  1443. sg_dst.length = transfer_data_len;
  1444. err = submit_req(areq, handle);
  1445. if (err) {
  1446. pr_err("%s: Error processing req, err = %d\n",
  1447. __func__, err);
  1448. goto exit;
  1449. }
  1450. /* update data len to be processed */
  1451. pending_data_len -= transfer_data_len;
  1452. user_src += transfer_data_len;
  1453. user_dst += transfer_data_len;
  1454. }
  1455. }
  1456. exit:
  1457. return err;
  1458. }
  1459. static int qcedev_check_cipher_key(struct qcedev_cipher_op_req *req,
  1460. struct qcedev_control *podev)
  1461. {
  1462. /* if intending to use HW key make sure key fields are set
  1463. * correctly and HW key is indeed supported in target
  1464. */
  1465. if (req->encklen == 0) {
  1466. int i;
  1467. for (i = 0; i < QCEDEV_MAX_KEY_SIZE; i++) {
  1468. if (req->enckey[i]) {
  1469. pr_err("%s: Invalid key: non-zero key input\n",
  1470. __func__);
  1471. goto error;
  1472. }
  1473. }
  1474. if ((req->op != QCEDEV_OPER_ENC_NO_KEY) &&
  1475. (req->op != QCEDEV_OPER_DEC_NO_KEY))
  1476. if (!podev->platform_support.hw_key_support) {
  1477. pr_err("%s: Invalid op %d\n", __func__,
  1478. (uint32_t)req->op);
  1479. goto error;
  1480. }
  1481. } else {
  1482. if (req->encklen == QCEDEV_AES_KEY_192) {
  1483. if (!podev->ce_support.aes_key_192) {
  1484. pr_err("%s: AES-192 not supported\n", __func__);
  1485. goto error;
  1486. }
  1487. } else {
  1488. /* if not using HW key make sure key
  1489. * length is valid
  1490. */
  1491. if (req->mode == QCEDEV_AES_MODE_XTS) {
  1492. if ((req->encklen != QCEDEV_AES_KEY_128*2) &&
  1493. (req->encklen != QCEDEV_AES_KEY_256*2)) {
  1494. pr_err("%s: unsupported key size: %d\n",
  1495. __func__, req->encklen);
  1496. goto error;
  1497. }
  1498. } else {
  1499. if ((req->encklen != QCEDEV_AES_KEY_128) &&
  1500. (req->encklen != QCEDEV_AES_KEY_256)) {
  1501. pr_err("%s: unsupported key size %d\n",
  1502. __func__, req->encklen);
  1503. goto error;
  1504. }
  1505. }
  1506. }
  1507. }
  1508. return 0;
  1509. error:
  1510. return -EINVAL;
  1511. }
  1512. static int qcedev_check_cipher_params(struct qcedev_cipher_op_req *req,
  1513. struct qcedev_control *podev)
  1514. {
  1515. uint32_t total = 0;
  1516. uint32_t i;
  1517. if (req->use_pmem) {
  1518. pr_err("%s: Use of PMEM is not supported\n", __func__);
  1519. goto error;
  1520. }
  1521. if ((req->entries == 0) || (req->data_len == 0) ||
  1522. (req->entries > QCEDEV_MAX_BUFFERS)) {
  1523. pr_err("%s: Invalid cipher length/entries\n", __func__);
  1524. goto error;
  1525. }
  1526. if ((req->alg >= QCEDEV_ALG_LAST) ||
  1527. (req->mode >= QCEDEV_AES_DES_MODE_LAST)) {
  1528. pr_err("%s: Invalid algorithm %d\n", __func__,
  1529. (uint32_t)req->alg);
  1530. goto error;
  1531. }
  1532. if ((req->mode == QCEDEV_AES_MODE_XTS) &&
  1533. (!podev->ce_support.aes_xts)) {
  1534. pr_err("%s: XTS algorithm is not supported\n", __func__);
  1535. goto error;
  1536. }
  1537. if (req->alg == QCEDEV_ALG_AES) {
  1538. if (qcedev_check_cipher_key(req, podev))
  1539. goto error;
  1540. }
  1541. /* if using a byteoffset, make sure it is CTR mode using vbuf */
  1542. if (req->byteoffset) {
  1543. if (req->mode != QCEDEV_AES_MODE_CTR) {
  1544. pr_err("%s: Operation on byte offset not supported\n",
  1545. __func__);
  1546. goto error;
  1547. }
  1548. if (req->byteoffset >= AES_CE_BLOCK_SIZE) {
  1549. pr_err("%s: Invalid byte offset\n", __func__);
  1550. goto error;
  1551. }
  1552. total = req->byteoffset;
  1553. for (i = 0; i < req->entries; i++) {
  1554. if (total > U32_MAX - req->vbuf.src[i].len) {
  1555. pr_err("%s:Integer overflow on total src len\n",
  1556. __func__);
  1557. goto error;
  1558. }
  1559. total += req->vbuf.src[i].len;
  1560. }
  1561. }
  1562. if (req->data_len < req->byteoffset) {
  1563. pr_err("%s: req data length %u is less than byteoffset %u\n",
  1564. __func__, req->data_len, req->byteoffset);
  1565. goto error;
  1566. }
  1567. /* Ensure IV size */
  1568. if (req->ivlen > QCEDEV_MAX_IV_SIZE) {
  1569. pr_err("%s: ivlen is not correct: %u\n", __func__, req->ivlen);
  1570. goto error;
  1571. }
  1572. /* Ensure Key size */
  1573. if (req->encklen > QCEDEV_MAX_KEY_SIZE) {
  1574. pr_err("%s: Klen is not correct: %u\n", __func__, req->encklen);
  1575. goto error;
  1576. }
  1577. /* Ensure zer ivlen for ECB mode */
  1578. if (req->ivlen > 0) {
  1579. if ((req->mode == QCEDEV_AES_MODE_ECB) ||
  1580. (req->mode == QCEDEV_DES_MODE_ECB)) {
  1581. pr_err("%s: Expecting a zero length IV\n", __func__);
  1582. goto error;
  1583. }
  1584. } else {
  1585. if ((req->mode != QCEDEV_AES_MODE_ECB) &&
  1586. (req->mode != QCEDEV_DES_MODE_ECB)) {
  1587. pr_err("%s: Expecting a non-zero ength IV\n", __func__);
  1588. goto error;
  1589. }
  1590. }
  1591. /* Check for sum of all dst length is equal to data_len */
  1592. for (i = 0, total = 0; i < req->entries; i++) {
  1593. if (!req->vbuf.dst[i].vaddr && req->vbuf.dst[i].len) {
  1594. pr_err("%s: NULL req dst vbuf[%d] with length %d\n",
  1595. __func__, i, req->vbuf.dst[i].len);
  1596. goto error;
  1597. }
  1598. if (req->vbuf.dst[i].len >= U32_MAX - total) {
  1599. pr_err("%s: Integer overflow on total req dst vbuf length\n",
  1600. __func__);
  1601. goto error;
  1602. }
  1603. total += req->vbuf.dst[i].len;
  1604. }
  1605. if (total != req->data_len) {
  1606. pr_err("%s: Total (i=%d) dst(%d) buf size != data_len (%d)\n",
  1607. __func__, i, total, req->data_len);
  1608. goto error;
  1609. }
  1610. /* Check for sum of all src length is equal to data_len */
  1611. for (i = 0, total = 0; i < req->entries; i++) {
  1612. if (!req->vbuf.src[i].vaddr && req->vbuf.src[i].len) {
  1613. pr_err("%s: NULL req src vbuf[%d] with length %d\n",
  1614. __func__, i, req->vbuf.src[i].len);
  1615. goto error;
  1616. }
  1617. if (req->vbuf.src[i].len > U32_MAX - total) {
  1618. pr_err("%s: Integer overflow on total req src vbuf length\n",
  1619. __func__);
  1620. goto error;
  1621. }
  1622. total += req->vbuf.src[i].len;
  1623. }
  1624. if (total != req->data_len) {
  1625. pr_err("%s: Total src(%d) buf size != data_len (%d)\n",
  1626. __func__, total, req->data_len);
  1627. goto error;
  1628. }
  1629. return 0;
  1630. error:
  1631. return -EINVAL;
  1632. }
  1633. static int qcedev_check_sha_params(struct qcedev_sha_op_req *req,
  1634. struct qcedev_control *podev)
  1635. {
  1636. uint32_t total = 0;
  1637. uint32_t i;
  1638. if ((req->alg == QCEDEV_ALG_AES_CMAC) &&
  1639. (!podev->ce_support.cmac)) {
  1640. pr_err("%s: CMAC not supported\n", __func__);
  1641. goto sha_error;
  1642. }
  1643. if ((!req->entries) || (req->entries > QCEDEV_MAX_BUFFERS)) {
  1644. pr_err("%s: Invalid num entries (%d)\n",
  1645. __func__, req->entries);
  1646. goto sha_error;
  1647. }
  1648. if (req->alg >= QCEDEV_ALG_SHA_ALG_LAST) {
  1649. pr_err("%s: Invalid algorithm (%d)\n", __func__, req->alg);
  1650. goto sha_error;
  1651. }
  1652. if ((req->alg == QCEDEV_ALG_SHA1_HMAC) ||
  1653. (req->alg == QCEDEV_ALG_SHA256_HMAC)) {
  1654. if (req->authkey == NULL) {
  1655. pr_err("%s: Invalid authkey pointer\n", __func__);
  1656. goto sha_error;
  1657. }
  1658. if (req->authklen <= 0) {
  1659. pr_err("%s: Invalid authkey length (%d)\n",
  1660. __func__, req->authklen);
  1661. goto sha_error;
  1662. }
  1663. }
  1664. if (req->alg == QCEDEV_ALG_AES_CMAC) {
  1665. if ((req->authklen != QCEDEV_AES_KEY_128) &&
  1666. (req->authklen != QCEDEV_AES_KEY_256)) {
  1667. pr_err("%s: unsupported key length\n", __func__);
  1668. goto sha_error;
  1669. }
  1670. }
  1671. /* Check for sum of all src length is equal to data_len */
  1672. for (i = 0, total = 0; i < req->entries; i++) {
  1673. if (req->data[i].len > U32_MAX - total) {
  1674. pr_err("%s: Integer overflow on total req buf length\n",
  1675. __func__);
  1676. goto sha_error;
  1677. }
  1678. total += req->data[i].len;
  1679. }
  1680. if (total != req->data_len) {
  1681. pr_err("%s: Total src(%d) buf size != data_len (%d)\n",
  1682. __func__, total, req->data_len);
  1683. goto sha_error;
  1684. }
  1685. return 0;
  1686. sha_error:
  1687. return -EINVAL;
  1688. }
  1689. static int qcedev_check_offload_cipher_key(struct qcedev_offload_cipher_op_req *req,
  1690. struct qcedev_control *podev)
  1691. {
  1692. if (req->encklen == 0)
  1693. return -EINVAL;
  1694. /* AES-192 is not a valid option for OFFLOAD use case */
  1695. if ((req->encklen != QCEDEV_AES_KEY_128) &&
  1696. (req->encklen != QCEDEV_AES_KEY_256)) {
  1697. pr_err("%s: unsupported key size %d\n",
  1698. __func__, req->encklen);
  1699. goto error;
  1700. }
  1701. return 0;
  1702. error:
  1703. return -EINVAL;
  1704. }
  1705. static int qcedev_check_offload_cipher_params(struct qcedev_offload_cipher_op_req *req,
  1706. struct qcedev_control *podev)
  1707. {
  1708. uint32_t total = 0;
  1709. int i = 0;
  1710. if ((req->entries == 0) || (req->data_len == 0) ||
  1711. (req->entries > QCEDEV_MAX_BUFFERS)) {
  1712. pr_err("%s: Invalid cipher length/entries\n", __func__);
  1713. goto error;
  1714. }
  1715. if ((req->alg != QCEDEV_ALG_AES) ||
  1716. (req->mode > QCEDEV_AES_MODE_CTR)) {
  1717. pr_err("%s: Invalid algorithm %d\n", __func__,
  1718. (uint32_t)req->alg);
  1719. goto error;
  1720. }
  1721. if (qcedev_check_offload_cipher_key(req, podev))
  1722. goto error;
  1723. if (req->block_offset >= AES_CE_BLOCK_SIZE)
  1724. goto error;
  1725. /* if using a byteoffset, make sure it is CTR mode using vbuf */
  1726. if (req->byteoffset) {
  1727. if (req->mode != QCEDEV_AES_MODE_CTR) {
  1728. pr_err("%s: Operation on byte offset not supported\n",
  1729. __func__);
  1730. goto error;
  1731. }
  1732. if (req->byteoffset >= AES_CE_BLOCK_SIZE) {
  1733. pr_err("%s: Invalid byte offset\n", __func__);
  1734. goto error;
  1735. }
  1736. total = req->byteoffset;
  1737. for (i = 0; i < req->entries; i++) {
  1738. if (total > U32_MAX - req->vbuf.src[i].len) {
  1739. pr_err("%s:Int overflow on total src len\n",
  1740. __func__);
  1741. goto error;
  1742. }
  1743. total += req->vbuf.src[i].len;
  1744. }
  1745. }
  1746. if (req->data_len < req->byteoffset) {
  1747. pr_err("%s: req data length %u is less than byteoffset %u\n",
  1748. __func__, req->data_len, req->byteoffset);
  1749. goto error;
  1750. }
  1751. /* Ensure IV size */
  1752. if (req->ivlen > QCEDEV_MAX_IV_SIZE) {
  1753. pr_err("%s: ivlen is not correct: %u\n", __func__, req->ivlen);
  1754. goto error;
  1755. }
  1756. /* Ensure Key size */
  1757. if (req->encklen > QCEDEV_MAX_KEY_SIZE) {
  1758. pr_err("%s: Klen is not correct: %u\n", __func__,
  1759. req->encklen);
  1760. goto error;
  1761. }
  1762. /* Check for sum of all dst length is equal to data_len */
  1763. for (i = 0, total = 0; i < req->entries; i++) {
  1764. if (!req->vbuf.dst[i].vaddr && req->vbuf.dst[i].len) {
  1765. pr_err("%s: NULL req dst vbuf[%d] with length %d\n",
  1766. __func__, i, req->vbuf.dst[i].len);
  1767. goto error;
  1768. }
  1769. if (req->vbuf.dst[i].len >= U32_MAX - total) {
  1770. pr_err("%s: Int overflow on total req dst vbuf len\n",
  1771. __func__);
  1772. goto error;
  1773. }
  1774. total += req->vbuf.dst[i].len;
  1775. }
  1776. if (total != req->data_len) {
  1777. pr_err("%s: Total (i=%d) dst(%d) buf size != data_len (%d)\n",
  1778. __func__, i, total, req->data_len);
  1779. goto error;
  1780. }
  1781. /* Check for sum of all src length is equal to data_len */
  1782. for (i = 0, total = 0; i < req->entries; i++) {
  1783. if (!req->vbuf.src[i].vaddr && req->vbuf.src[i].len) {
  1784. pr_err("%s: NULL req src vbuf[%d] with length %d\n",
  1785. __func__, i, req->vbuf.src[i].len);
  1786. goto error;
  1787. }
  1788. if (req->vbuf.src[i].len > U32_MAX - total) {
  1789. pr_err("%s: Int overflow on total req src vbuf len\n",
  1790. __func__);
  1791. goto error;
  1792. }
  1793. total += req->vbuf.src[i].len;
  1794. }
  1795. if (total != req->data_len) {
  1796. pr_err("%s: Total src(%d) buf size != data_len (%d)\n",
  1797. __func__, total, req->data_len);
  1798. goto error;
  1799. }
  1800. return 0;
  1801. error:
  1802. return -EINVAL;
  1803. }
  1804. long qcedev_ioctl(struct file *file,
  1805. unsigned int cmd, unsigned long arg)
  1806. {
  1807. int err = 0;
  1808. struct qcedev_handle *handle;
  1809. struct qcedev_control *podev;
  1810. struct qcedev_async_req *qcedev_areq;
  1811. struct qcedev_stat *pstat;
  1812. qcedev_areq = kzalloc(sizeof(struct qcedev_async_req), GFP_KERNEL);
  1813. if (!qcedev_areq)
  1814. return -ENOMEM;
  1815. handle = file->private_data;
  1816. podev = handle->cntl;
  1817. qcedev_areq->handle = handle;
  1818. if (podev == NULL || podev->magic != QCEDEV_MAGIC) {
  1819. pr_err("%s: invalid handle %pK\n",
  1820. __func__, podev);
  1821. err = -ENOENT;
  1822. goto exit_free_qcedev_areq;
  1823. }
  1824. /* Verify user arguments. */
  1825. if (_IOC_TYPE(cmd) != QCEDEV_IOC_MAGIC) {
  1826. err = -ENOTTY;
  1827. goto exit_free_qcedev_areq;
  1828. }
  1829. init_completion(&qcedev_areq->complete);
  1830. pstat = &_qcedev_stat;
  1831. switch (cmd) {
  1832. case QCEDEV_IOCTL_ENC_REQ:
  1833. case QCEDEV_IOCTL_DEC_REQ:
  1834. if (copy_from_user(&qcedev_areq->cipher_op_req,
  1835. (void __user *)arg,
  1836. sizeof(struct qcedev_cipher_op_req))) {
  1837. err = -EFAULT;
  1838. goto exit_free_qcedev_areq;
  1839. }
  1840. qcedev_areq->op_type = QCEDEV_CRYPTO_OPER_CIPHER;
  1841. if (qcedev_check_cipher_params(&qcedev_areq->cipher_op_req,
  1842. podev)) {
  1843. err = -EINVAL;
  1844. goto exit_free_qcedev_areq;
  1845. }
  1846. err = qcedev_vbuf_ablk_cipher(qcedev_areq, handle);
  1847. if (err)
  1848. goto exit_free_qcedev_areq;
  1849. if (copy_to_user((void __user *)arg,
  1850. &qcedev_areq->cipher_op_req,
  1851. sizeof(struct qcedev_cipher_op_req))) {
  1852. err = -EFAULT;
  1853. goto exit_free_qcedev_areq;
  1854. }
  1855. break;
  1856. case QCEDEV_IOCTL_OFFLOAD_OP_REQ:
  1857. if (copy_from_user(&qcedev_areq->offload_cipher_op_req,
  1858. (void __user *)arg,
  1859. sizeof(struct qcedev_offload_cipher_op_req))) {
  1860. err = -EFAULT;
  1861. goto exit_free_qcedev_areq;
  1862. }
  1863. qcedev_areq->op_type = QCEDEV_CRYPTO_OPER_OFFLOAD_CIPHER;
  1864. if (qcedev_check_offload_cipher_params(
  1865. &qcedev_areq->offload_cipher_op_req, podev)) {
  1866. err = -EINVAL;
  1867. goto exit_free_qcedev_areq;
  1868. }
  1869. qcedev_areq->offload_cipher_op_req.err = QCEDEV_OFFLOAD_NO_ERROR;
  1870. err = qcedev_smmu_ablk_offload_cipher(qcedev_areq, handle);
  1871. if (err)
  1872. goto exit_free_qcedev_areq;
  1873. if (copy_to_user((void __user *)arg,
  1874. &qcedev_areq->offload_cipher_op_req,
  1875. sizeof(struct qcedev_offload_cipher_op_req))) {
  1876. err = -EFAULT;
  1877. goto exit_free_qcedev_areq;
  1878. }
  1879. break;
  1880. case QCEDEV_IOCTL_SHA_INIT_REQ:
  1881. {
  1882. struct scatterlist sg_src;
  1883. if (copy_from_user(&qcedev_areq->sha_op_req,
  1884. (void __user *)arg,
  1885. sizeof(struct qcedev_sha_op_req))) {
  1886. err = -EFAULT;
  1887. goto exit_free_qcedev_areq;
  1888. }
  1889. mutex_lock(&hash_access_lock);
  1890. if (qcedev_check_sha_params(&qcedev_areq->sha_op_req, podev)) {
  1891. mutex_unlock(&hash_access_lock);
  1892. err = -EINVAL;
  1893. goto exit_free_qcedev_areq;
  1894. }
  1895. qcedev_areq->op_type = QCEDEV_CRYPTO_OPER_SHA;
  1896. err = qcedev_hash_init(qcedev_areq, handle, &sg_src);
  1897. if (err) {
  1898. mutex_unlock(&hash_access_lock);
  1899. goto exit_free_qcedev_areq;
  1900. }
  1901. mutex_unlock(&hash_access_lock);
  1902. if (copy_to_user((void __user *)arg, &qcedev_areq->sha_op_req,
  1903. sizeof(struct qcedev_sha_op_req))) {
  1904. err = -EFAULT;
  1905. goto exit_free_qcedev_areq;
  1906. }
  1907. handle->sha_ctxt.init_done = true;
  1908. }
  1909. break;
  1910. case QCEDEV_IOCTL_GET_CMAC_REQ:
  1911. if (!podev->ce_support.cmac) {
  1912. err = -ENOTTY;
  1913. goto exit_free_qcedev_areq;
  1914. }
  1915. /* Fall-through */
  1916. case QCEDEV_IOCTL_SHA_UPDATE_REQ:
  1917. {
  1918. struct scatterlist sg_src;
  1919. if (copy_from_user(&qcedev_areq->sha_op_req,
  1920. (void __user *)arg,
  1921. sizeof(struct qcedev_sha_op_req))) {
  1922. err = -EFAULT;
  1923. goto exit_free_qcedev_areq;
  1924. }
  1925. mutex_lock(&hash_access_lock);
  1926. if (qcedev_check_sha_params(&qcedev_areq->sha_op_req, podev)) {
  1927. mutex_unlock(&hash_access_lock);
  1928. err = -EINVAL;
  1929. goto exit_free_qcedev_areq;
  1930. }
  1931. qcedev_areq->op_type = QCEDEV_CRYPTO_OPER_SHA;
  1932. if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_AES_CMAC) {
  1933. err = qcedev_hash_cmac(qcedev_areq, handle, &sg_src);
  1934. if (err) {
  1935. mutex_unlock(&hash_access_lock);
  1936. goto exit_free_qcedev_areq;
  1937. }
  1938. } else {
  1939. if (!handle->sha_ctxt.init_done) {
  1940. pr_err("%s Init was not called\n", __func__);
  1941. mutex_unlock(&hash_access_lock);
  1942. err = -EINVAL;
  1943. goto exit_free_qcedev_areq;
  1944. }
  1945. err = qcedev_hash_update(qcedev_areq, handle, &sg_src);
  1946. if (err) {
  1947. mutex_unlock(&hash_access_lock);
  1948. goto exit_free_qcedev_areq;
  1949. }
  1950. }
  1951. if (handle->sha_ctxt.diglen > QCEDEV_MAX_SHA_DIGEST) {
  1952. pr_err("Invalid sha_ctxt.diglen %d\n",
  1953. handle->sha_ctxt.diglen);
  1954. mutex_unlock(&hash_access_lock);
  1955. err = -EINVAL;
  1956. goto exit_free_qcedev_areq;
  1957. }
  1958. memcpy(&qcedev_areq->sha_op_req.digest[0],
  1959. &handle->sha_ctxt.digest[0],
  1960. handle->sha_ctxt.diglen);
  1961. mutex_unlock(&hash_access_lock);
  1962. if (copy_to_user((void __user *)arg, &qcedev_areq->sha_op_req,
  1963. sizeof(struct qcedev_sha_op_req))) {
  1964. err = -EFAULT;
  1965. goto exit_free_qcedev_areq;
  1966. }
  1967. }
  1968. break;
  1969. case QCEDEV_IOCTL_SHA_FINAL_REQ:
  1970. if (!handle->sha_ctxt.init_done) {
  1971. pr_err("%s Init was not called\n", __func__);
  1972. err = -EINVAL;
  1973. goto exit_free_qcedev_areq;
  1974. }
  1975. if (copy_from_user(&qcedev_areq->sha_op_req,
  1976. (void __user *)arg,
  1977. sizeof(struct qcedev_sha_op_req))) {
  1978. err = -EFAULT;
  1979. goto exit_free_qcedev_areq;
  1980. }
  1981. mutex_lock(&hash_access_lock);
  1982. if (qcedev_check_sha_params(&qcedev_areq->sha_op_req, podev)) {
  1983. mutex_unlock(&hash_access_lock);
  1984. err = -EINVAL;
  1985. goto exit_free_qcedev_areq;
  1986. }
  1987. qcedev_areq->op_type = QCEDEV_CRYPTO_OPER_SHA;
  1988. err = qcedev_hash_final(qcedev_areq, handle);
  1989. if (err) {
  1990. mutex_unlock(&hash_access_lock);
  1991. goto exit_free_qcedev_areq;
  1992. }
  1993. if (handle->sha_ctxt.diglen > QCEDEV_MAX_SHA_DIGEST) {
  1994. pr_err("Invalid sha_ctxt.diglen %d\n",
  1995. handle->sha_ctxt.diglen);
  1996. mutex_unlock(&hash_access_lock);
  1997. err = -EINVAL;
  1998. goto exit_free_qcedev_areq;
  1999. }
  2000. qcedev_areq->sha_op_req.diglen = handle->sha_ctxt.diglen;
  2001. memcpy(&qcedev_areq->sha_op_req.digest[0],
  2002. &handle->sha_ctxt.digest[0],
  2003. handle->sha_ctxt.diglen);
  2004. mutex_unlock(&hash_access_lock);
  2005. if (copy_to_user((void __user *)arg, &qcedev_areq->sha_op_req,
  2006. sizeof(struct qcedev_sha_op_req))) {
  2007. err = -EFAULT;
  2008. goto exit_free_qcedev_areq;
  2009. }
  2010. handle->sha_ctxt.init_done = false;
  2011. break;
  2012. case QCEDEV_IOCTL_GET_SHA_REQ:
  2013. {
  2014. struct scatterlist sg_src;
  2015. if (copy_from_user(&qcedev_areq->sha_op_req,
  2016. (void __user *)arg,
  2017. sizeof(struct qcedev_sha_op_req))) {
  2018. err = -EFAULT;
  2019. goto exit_free_qcedev_areq;
  2020. }
  2021. mutex_lock(&hash_access_lock);
  2022. if (qcedev_check_sha_params(&qcedev_areq->sha_op_req, podev)) {
  2023. mutex_unlock(&hash_access_lock);
  2024. err = -EINVAL;
  2025. goto exit_free_qcedev_areq;
  2026. }
  2027. qcedev_areq->op_type = QCEDEV_CRYPTO_OPER_SHA;
  2028. qcedev_hash_init(qcedev_areq, handle, &sg_src);
  2029. err = qcedev_hash_update(qcedev_areq, handle, &sg_src);
  2030. if (err) {
  2031. mutex_unlock(&hash_access_lock);
  2032. goto exit_free_qcedev_areq;
  2033. }
  2034. err = qcedev_hash_final(qcedev_areq, handle);
  2035. if (err) {
  2036. mutex_unlock(&hash_access_lock);
  2037. goto exit_free_qcedev_areq;
  2038. }
  2039. if (handle->sha_ctxt.diglen > QCEDEV_MAX_SHA_DIGEST) {
  2040. pr_err("Invalid sha_ctxt.diglen %d\n",
  2041. handle->sha_ctxt.diglen);
  2042. mutex_unlock(&hash_access_lock);
  2043. err = -EINVAL;
  2044. goto exit_free_qcedev_areq;
  2045. }
  2046. qcedev_areq->sha_op_req.diglen = handle->sha_ctxt.diglen;
  2047. memcpy(&qcedev_areq->sha_op_req.digest[0],
  2048. &handle->sha_ctxt.digest[0],
  2049. handle->sha_ctxt.diglen);
  2050. mutex_unlock(&hash_access_lock);
  2051. if (copy_to_user((void __user *)arg, &qcedev_areq->sha_op_req,
  2052. sizeof(struct qcedev_sha_op_req))) {
  2053. err = -EFAULT;
  2054. goto exit_free_qcedev_areq;
  2055. }
  2056. }
  2057. break;
  2058. case QCEDEV_IOCTL_MAP_BUF_REQ:
  2059. {
  2060. unsigned long long vaddr = 0;
  2061. struct qcedev_map_buf_req map_buf = { {0} };
  2062. int i = 0;
  2063. if (copy_from_user(&map_buf,
  2064. (void __user *)arg, sizeof(map_buf))) {
  2065. err = -EFAULT;
  2066. goto exit_free_qcedev_areq;
  2067. }
  2068. if (map_buf.num_fds > QCEDEV_MAX_BUFFERS) {
  2069. err = -EINVAL;
  2070. goto exit_free_qcedev_areq;
  2071. }
  2072. for (i = 0; i < map_buf.num_fds; i++) {
  2073. err = qcedev_check_and_map_buffer(handle,
  2074. map_buf.fd[i],
  2075. map_buf.fd_offset[i],
  2076. map_buf.fd_size[i],
  2077. &vaddr);
  2078. if (err) {
  2079. pr_err(
  2080. "%s: err: failed to map fd(%d) - %d\n",
  2081. __func__, map_buf.fd[i], err);
  2082. goto exit_free_qcedev_areq;
  2083. }
  2084. map_buf.buf_vaddr[i] = vaddr;
  2085. pr_info("%s: info: vaddr = %llx\n, fd = %d",
  2086. __func__, vaddr, map_buf.fd[i]);
  2087. }
  2088. if (copy_to_user((void __user *)arg, &map_buf,
  2089. sizeof(map_buf))) {
  2090. err = -EFAULT;
  2091. goto exit_free_qcedev_areq;
  2092. }
  2093. break;
  2094. }
  2095. case QCEDEV_IOCTL_UNMAP_BUF_REQ:
  2096. {
  2097. struct qcedev_unmap_buf_req unmap_buf = { { 0 } };
  2098. int i = 0;
  2099. if (copy_from_user(&unmap_buf,
  2100. (void __user *)arg, sizeof(unmap_buf))) {
  2101. err = -EFAULT;
  2102. goto exit_free_qcedev_areq;
  2103. }
  2104. for (i = 0; i < unmap_buf.num_fds; i++) {
  2105. err = qcedev_check_and_unmap_buffer(handle,
  2106. unmap_buf.fd[i]);
  2107. if (err) {
  2108. pr_err(
  2109. "%s: err: failed to unmap fd(%d) - %d\n",
  2110. __func__,
  2111. unmap_buf.fd[i], err);
  2112. goto exit_free_qcedev_areq;
  2113. }
  2114. }
  2115. break;
  2116. }
  2117. default:
  2118. err = -ENOTTY;
  2119. goto exit_free_qcedev_areq;
  2120. }
  2121. exit_free_qcedev_areq:
  2122. kfree(qcedev_areq);
  2123. return err;
  2124. }
  2125. static int qcedev_probe_device(struct platform_device *pdev)
  2126. {
  2127. void *handle = NULL;
  2128. int rc = 0;
  2129. struct qcedev_control *podev;
  2130. struct msm_ce_hw_support *platform_support;
  2131. podev = &qce_dev[0];
  2132. rc = alloc_chrdev_region(&qcedev_device_no, 0, 1, QCEDEV_DEV);
  2133. if (rc < 0) {
  2134. pr_err("alloc_chrdev_region failed %d\n", rc);
  2135. return rc;
  2136. }
  2137. driver_class = class_create(THIS_MODULE, QCEDEV_DEV);
  2138. if (IS_ERR(driver_class)) {
  2139. rc = -ENOMEM;
  2140. pr_err("class_create failed %d\n", rc);
  2141. goto exit_unreg_chrdev_region;
  2142. }
  2143. class_dev = device_create(driver_class, NULL, qcedev_device_no, NULL,
  2144. QCEDEV_DEV);
  2145. if (IS_ERR(class_dev)) {
  2146. pr_err("class_device_create failed %d\n", rc);
  2147. rc = -ENOMEM;
  2148. goto exit_destroy_class;
  2149. }
  2150. cdev_init(&podev->cdev, &qcedev_fops);
  2151. podev->cdev.owner = THIS_MODULE;
  2152. rc = cdev_add(&podev->cdev, MKDEV(MAJOR(qcedev_device_no), 0), 1);
  2153. if (rc < 0) {
  2154. pr_err("cdev_add failed %d\n", rc);
  2155. goto exit_destroy_device;
  2156. }
  2157. podev->minor = 0;
  2158. podev->high_bw_req_count = 0;
  2159. INIT_LIST_HEAD(&podev->ready_commands);
  2160. podev->active_command = NULL;
  2161. INIT_LIST_HEAD(&podev->context_banks);
  2162. spin_lock_init(&podev->lock);
  2163. tasklet_init(&podev->done_tasklet, req_done, (unsigned long)podev);
  2164. podev->icc_path = of_icc_get(&pdev->dev, "data_path");
  2165. if (IS_ERR(podev->icc_path)) {
  2166. rc = PTR_ERR(podev->icc_path);
  2167. pr_err("%s Failed to get icc path with error %d\n",
  2168. __func__, rc);
  2169. goto exit_del_cdev;
  2170. }
  2171. rc = icc_set_bw(podev->icc_path, CRYPTO_AVG_BW, CRYPTO_PEAK_BW);
  2172. if (rc) {
  2173. pr_err("%s Unable to set high bandwidth\n", __func__);
  2174. goto exit_unregister_bus_scale;
  2175. }
  2176. handle = qce_open(pdev, &rc);
  2177. if (handle == NULL) {
  2178. rc = -ENODEV;
  2179. goto exit_scale_busbandwidth;
  2180. }
  2181. rc = icc_set_bw(podev->icc_path, 0, 0);
  2182. if (rc) {
  2183. pr_err("%s Unable to set to low bandwidth\n", __func__);
  2184. goto exit_qce_close;
  2185. }
  2186. podev->qce = handle;
  2187. podev->pdev = pdev;
  2188. platform_set_drvdata(pdev, podev);
  2189. qce_hw_support(podev->qce, &podev->ce_support);
  2190. if (podev->ce_support.bam) {
  2191. podev->platform_support.ce_shared = 0;
  2192. podev->platform_support.shared_ce_resource = 0;
  2193. podev->platform_support.hw_key_support =
  2194. podev->ce_support.hw_key;
  2195. podev->platform_support.sha_hmac = 1;
  2196. } else {
  2197. platform_support =
  2198. (struct msm_ce_hw_support *)pdev->dev.platform_data;
  2199. podev->platform_support.ce_shared = platform_support->ce_shared;
  2200. podev->platform_support.shared_ce_resource =
  2201. platform_support->shared_ce_resource;
  2202. podev->platform_support.hw_key_support =
  2203. platform_support->hw_key_support;
  2204. podev->platform_support.sha_hmac = platform_support->sha_hmac;
  2205. }
  2206. podev->mem_client = qcedev_mem_new_client(MEM_ION);
  2207. if (!podev->mem_client) {
  2208. pr_err("%s: err: qcedev_mem_new_client failed\n", __func__);
  2209. goto exit_qce_close;
  2210. }
  2211. rc = of_platform_populate(pdev->dev.of_node, qcedev_match,
  2212. NULL, &pdev->dev);
  2213. if (rc) {
  2214. pr_err("%s: err: of_platform_populate failed: %d\n",
  2215. __func__, rc);
  2216. goto exit_mem_new_client;
  2217. }
  2218. return 0;
  2219. exit_mem_new_client:
  2220. if (podev->mem_client)
  2221. qcedev_mem_delete_client(podev->mem_client);
  2222. podev->mem_client = NULL;
  2223. exit_qce_close:
  2224. if (handle)
  2225. qce_close(handle);
  2226. exit_scale_busbandwidth:
  2227. icc_set_bw(podev->icc_path, 0, 0);
  2228. exit_unregister_bus_scale:
  2229. if (podev->icc_path)
  2230. icc_put(podev->icc_path);
  2231. exit_del_cdev:
  2232. cdev_del(&podev->cdev);
  2233. exit_destroy_device:
  2234. device_destroy(driver_class, qcedev_device_no);
  2235. exit_destroy_class:
  2236. class_destroy(driver_class);
  2237. exit_unreg_chrdev_region:
  2238. unregister_chrdev_region(qcedev_device_no, 1);
  2239. podev->icc_path = NULL;
  2240. platform_set_drvdata(pdev, NULL);
  2241. podev->pdev = NULL;
  2242. podev->qce = NULL;
  2243. return rc;
  2244. }
  2245. static int qcedev_probe(struct platform_device *pdev)
  2246. {
  2247. if (of_device_is_compatible(pdev->dev.of_node, "qcom,qcedev"))
  2248. return qcedev_probe_device(pdev);
  2249. else if (of_device_is_compatible(pdev->dev.of_node,
  2250. "qcom,qcedev,context-bank"))
  2251. return qcedev_parse_context_bank(pdev);
  2252. return -EINVAL;
  2253. };
  2254. static int qcedev_remove(struct platform_device *pdev)
  2255. {
  2256. struct qcedev_control *podev;
  2257. podev = platform_get_drvdata(pdev);
  2258. if (!podev)
  2259. return 0;
  2260. if (podev->qce)
  2261. qce_close(podev->qce);
  2262. if (podev->icc_path)
  2263. icc_put(podev->icc_path);
  2264. tasklet_kill(&podev->done_tasklet);
  2265. cdev_del(&podev->cdev);
  2266. device_destroy(driver_class, qcedev_device_no);
  2267. class_destroy(driver_class);
  2268. unregister_chrdev_region(qcedev_device_no, 1);
  2269. return 0;
  2270. };
  2271. static int qcedev_suspend(struct platform_device *pdev, pm_message_t state)
  2272. {
  2273. struct qcedev_control *podev;
  2274. int ret;
  2275. podev = platform_get_drvdata(pdev);
  2276. if (!podev)
  2277. return 0;
  2278. mutex_lock(&qcedev_sent_bw_req);
  2279. if (podev->high_bw_req_count) {
  2280. ret = qcedev_control_clocks(podev, false);
  2281. if (ret)
  2282. goto suspend_exit;
  2283. }
  2284. suspend_exit:
  2285. mutex_unlock(&qcedev_sent_bw_req);
  2286. return 0;
  2287. }
  2288. static int qcedev_resume(struct platform_device *pdev)
  2289. {
  2290. struct qcedev_control *podev;
  2291. int ret;
  2292. podev = platform_get_drvdata(pdev);
  2293. if (!podev)
  2294. return 0;
  2295. mutex_lock(&qcedev_sent_bw_req);
  2296. if (podev->high_bw_req_count) {
  2297. ret = qcedev_control_clocks(podev, true);
  2298. if (ret)
  2299. goto resume_exit;
  2300. }
  2301. resume_exit:
  2302. mutex_unlock(&qcedev_sent_bw_req);
  2303. return 0;
  2304. }
  2305. static struct platform_driver qcedev_plat_driver = {
  2306. .probe = qcedev_probe,
  2307. .remove = qcedev_remove,
  2308. .suspend = qcedev_suspend,
  2309. .resume = qcedev_resume,
  2310. .driver = {
  2311. .name = "qce",
  2312. .of_match_table = qcedev_match,
  2313. },
  2314. };
  2315. static int _disp_stats(int id)
  2316. {
  2317. struct qcedev_stat *pstat;
  2318. int len = 0;
  2319. pstat = &_qcedev_stat;
  2320. len = scnprintf(_debug_read_buf, DEBUG_MAX_RW_BUF - 1,
  2321. "\nQTI QCE dev driver %d Statistics:\n",
  2322. id + 1);
  2323. len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
  2324. " Encryption operation success : %d\n",
  2325. pstat->qcedev_enc_success);
  2326. len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
  2327. " Encryption operation fail : %d\n",
  2328. pstat->qcedev_enc_fail);
  2329. len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
  2330. " Decryption operation success : %d\n",
  2331. pstat->qcedev_dec_success);
  2332. len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
  2333. " Encryption operation fail : %d\n",
  2334. pstat->qcedev_dec_fail);
  2335. return len;
  2336. }
  2337. static ssize_t _debug_stats_read(struct file *file, char __user *buf,
  2338. size_t count, loff_t *ppos)
  2339. {
  2340. ssize_t rc = -EINVAL;
  2341. int qcedev = *((int *) file->private_data);
  2342. int len;
  2343. len = _disp_stats(qcedev);
  2344. if (len <= count)
  2345. rc = simple_read_from_buffer((void __user *) buf, len,
  2346. ppos, (void *) _debug_read_buf, len);
  2347. return rc;
  2348. }
  2349. static ssize_t _debug_stats_write(struct file *file, const char __user *buf,
  2350. size_t count, loff_t *ppos)
  2351. {
  2352. memset((char *)&_qcedev_stat, 0, sizeof(struct qcedev_stat));
  2353. return count;
  2354. };
  2355. static const struct file_operations _debug_stats_ops = {
  2356. .open = simple_open,
  2357. .read = _debug_stats_read,
  2358. .write = _debug_stats_write,
  2359. };
  2360. static int _qcedev_debug_init(void)
  2361. {
  2362. int rc;
  2363. char name[DEBUG_MAX_FNAME];
  2364. struct dentry *dent;
  2365. _debug_dent = debugfs_create_dir("qcedev", NULL);
  2366. if (IS_ERR(_debug_dent)) {
  2367. pr_debug("qcedev debugfs_create_dir fail, error %ld\n",
  2368. PTR_ERR(_debug_dent));
  2369. return PTR_ERR(_debug_dent);
  2370. }
  2371. snprintf(name, DEBUG_MAX_FNAME-1, "stats-%d", 1);
  2372. _debug_qcedev = 0;
  2373. dent = debugfs_create_file(name, 0644, _debug_dent,
  2374. &_debug_qcedev, &_debug_stats_ops);
  2375. if (dent == NULL) {
  2376. pr_debug("qcedev debugfs_create_file fail, error %ld\n",
  2377. PTR_ERR(dent));
  2378. rc = PTR_ERR(dent);
  2379. goto err;
  2380. }
  2381. return 0;
  2382. err:
  2383. debugfs_remove_recursive(_debug_dent);
  2384. return rc;
  2385. }
  2386. static int qcedev_init(void)
  2387. {
  2388. _qcedev_debug_init();
  2389. return platform_driver_register(&qcedev_plat_driver);
  2390. }
  2391. static void qcedev_exit(void)
  2392. {
  2393. debugfs_remove_recursive(_debug_dent);
  2394. platform_driver_unregister(&qcedev_plat_driver);
  2395. }
  2396. MODULE_LICENSE("GPL v2");
  2397. MODULE_DESCRIPTION("QTI DEV Crypto driver");
  2398. module_init(qcedev_init);
  2399. module_exit(qcedev_exit);