qcedev.c 71 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * QTI CE device driver.
  4. *
  5. * Copyright (c) 2010-2021, The Linux Foundation. All rights reserved.
  6. */
  7. #include <linux/mman.h>
  8. #include <linux/module.h>
  9. #include <linux/device.h>
  10. #include <linux/types.h>
  11. #include <linux/platform_device.h>
  12. #include <linux/dma-mapping.h>
  13. #include <linux/kernel.h>
  14. #include <linux/dmapool.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/spinlock.h>
  17. #include <linux/init.h>
  18. #include <linux/module.h>
  19. #include <linux/fs.h>
  20. #include <linux/uaccess.h>
  21. #include <linux/debugfs.h>
  22. #include <linux/scatterlist.h>
  23. #include <linux/crypto.h>
  24. #include "linux/platform_data/qcom_crypto_device.h"
  25. #include "linux/qcedev.h"
  26. #include <linux/interconnect.h>
  27. #include <crypto/hash.h>
  28. #include "qcedevi.h"
  29. #include "qce.h"
  30. #include "qcedev_smmu.h"
  31. #include "compat_qcedev.h"
  32. #include <linux/compat.h>
  33. #define CACHE_LINE_SIZE 64
  34. #define CE_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
  35. #define MAX_CEHW_REQ_TRANSFER_SIZE (128*32*1024)
  36. /* Max wait time once a crypt o request is done */
  37. #define MAX_CRYPTO_WAIT_TIME 1500
  38. static uint8_t _std_init_vector_sha1_uint8[] = {
  39. 0x67, 0x45, 0x23, 0x01, 0xEF, 0xCD, 0xAB, 0x89,
  40. 0x98, 0xBA, 0xDC, 0xFE, 0x10, 0x32, 0x54, 0x76,
  41. 0xC3, 0xD2, 0xE1, 0xF0
  42. };
  43. /* standard initialization vector for SHA-256, source: FIPS 180-2 */
  44. static uint8_t _std_init_vector_sha256_uint8[] = {
  45. 0x6A, 0x09, 0xE6, 0x67, 0xBB, 0x67, 0xAE, 0x85,
  46. 0x3C, 0x6E, 0xF3, 0x72, 0xA5, 0x4F, 0xF5, 0x3A,
  47. 0x51, 0x0E, 0x52, 0x7F, 0x9B, 0x05, 0x68, 0x8C,
  48. 0x1F, 0x83, 0xD9, 0xAB, 0x5B, 0xE0, 0xCD, 0x19
  49. };
  50. #define QCEDEV_CTX_KEY_MASK 0x000000ff
  51. #define QCEDEV_CTX_USE_HW_KEY 0x00000001
  52. #define QCEDEV_CTX_USE_PIPE_KEY 0x00000002
  53. // Key timer expiry for pipes 1-15 (Status3)
  54. #define PIPE_KEY_TIMER_EXPIRED_STATUS3_MASK 0x000000FF
  55. // Key timer expiry for pipes 16-19 (Status6)
  56. #define PIPE_KEY_TIMER_EXPIRED_STATUS6_MASK 0x00000003
  57. // Key pause for pipes 1-15 (Status3)
  58. #define PIPE_KEY_PAUSE_STATUS3_MASK 0xFF0000
  59. // Key pause for pipes 16-19 (Status6)
  60. #define PIPE_KEY_PAUSE_STATUS6_MASK 0x30000
  61. #define QCEDEV_STATUS1_ERR_INTR_MASK 0x10
  62. static DEFINE_MUTEX(send_cmd_lock);
  63. static DEFINE_MUTEX(qcedev_sent_bw_req);
  64. static DEFINE_MUTEX(hash_access_lock);
  65. static dev_t qcedev_device_no;
  66. static struct class *driver_class;
  67. static struct device *class_dev;
  68. static const struct of_device_id qcedev_match[] = {
  69. { .compatible = "qcom,qcedev"},
  70. { .compatible = "qcom,qcedev,context-bank"},
  71. {}
  72. };
  73. MODULE_DEVICE_TABLE(of, qcedev_match);
  74. static int qcedev_control_clocks(struct qcedev_control *podev, bool enable)
  75. {
  76. unsigned int control_flag;
  77. int ret = 0;
  78. if (podev->ce_support.req_bw_before_clk) {
  79. if (enable)
  80. control_flag = QCE_BW_REQUEST_FIRST;
  81. else
  82. control_flag = QCE_CLK_DISABLE_FIRST;
  83. } else {
  84. if (enable)
  85. control_flag = QCE_CLK_ENABLE_FIRST;
  86. else
  87. control_flag = QCE_BW_REQUEST_RESET_FIRST;
  88. }
  89. switch (control_flag) {
  90. case QCE_CLK_ENABLE_FIRST:
  91. ret = qce_enable_clk(podev->qce);
  92. if (ret) {
  93. pr_err("%s Unable enable clk\n", __func__);
  94. return ret;
  95. }
  96. ret = icc_set_bw(podev->icc_path,
  97. CRYPTO_AVG_BW, CRYPTO_PEAK_BW);
  98. if (ret) {
  99. pr_err("%s Unable to set high bw\n", __func__);
  100. ret = qce_disable_clk(podev->qce);
  101. if (ret)
  102. pr_err("%s Unable disable clk\n", __func__);
  103. return ret;
  104. }
  105. break;
  106. case QCE_BW_REQUEST_FIRST:
  107. ret = icc_set_bw(podev->icc_path,
  108. CRYPTO_AVG_BW, CRYPTO_PEAK_BW);
  109. if (ret) {
  110. pr_err("%s Unable to set high bw\n", __func__);
  111. return ret;
  112. }
  113. ret = qce_enable_clk(podev->qce);
  114. if (ret) {
  115. pr_err("%s Unable enable clk\n", __func__);
  116. ret = icc_set_bw(podev->icc_path, 0, 0);
  117. if (ret)
  118. pr_err("%s Unable to set low bw\n", __func__);
  119. return ret;
  120. }
  121. break;
  122. case QCE_CLK_DISABLE_FIRST:
  123. ret = qce_disable_clk(podev->qce);
  124. if (ret) {
  125. pr_err("%s Unable to disable clk\n", __func__);
  126. return ret;
  127. }
  128. ret = icc_set_bw(podev->icc_path, 0, 0);
  129. if (ret) {
  130. pr_err("%s Unable to set low bw\n", __func__);
  131. ret = qce_enable_clk(podev->qce);
  132. if (ret)
  133. pr_err("%s Unable enable clk\n", __func__);
  134. return ret;
  135. }
  136. break;
  137. case QCE_BW_REQUEST_RESET_FIRST:
  138. ret = icc_set_bw(podev->icc_path, 0, 0);
  139. if (ret) {
  140. pr_err("%s Unable to set low bw\n", __func__);
  141. return ret;
  142. }
  143. ret = qce_disable_clk(podev->qce);
  144. if (ret) {
  145. pr_err("%s Unable to disable clk\n", __func__);
  146. ret = icc_set_bw(podev->icc_path,
  147. CRYPTO_AVG_BW, CRYPTO_PEAK_BW);
  148. if (ret)
  149. pr_err("%s Unable to set high bw\n", __func__);
  150. return ret;
  151. }
  152. break;
  153. default:
  154. return -ENOENT;
  155. }
  156. return 0;
  157. }
  158. static void qcedev_ce_high_bw_req(struct qcedev_control *podev,
  159. bool high_bw_req)
  160. {
  161. int ret = 0;
  162. mutex_lock(&qcedev_sent_bw_req);
  163. if (high_bw_req) {
  164. if (podev->high_bw_req_count == 0) {
  165. ret = qcedev_control_clocks(podev, true);
  166. if (ret)
  167. goto exit_unlock_mutex;
  168. }
  169. podev->high_bw_req_count++;
  170. } else {
  171. if (podev->high_bw_req_count == 1) {
  172. ret = qcedev_control_clocks(podev, false);
  173. if (ret)
  174. goto exit_unlock_mutex;
  175. }
  176. podev->high_bw_req_count--;
  177. }
  178. exit_unlock_mutex:
  179. mutex_unlock(&qcedev_sent_bw_req);
  180. }
  181. #define QCEDEV_MAGIC 0x56434544 /* "qced" */
  182. static int qcedev_open(struct inode *inode, struct file *file);
  183. static int qcedev_release(struct inode *inode, struct file *file);
  184. static int start_cipher_req(struct qcedev_control *podev,
  185. int *current_req_info);
  186. static int start_offload_cipher_req(struct qcedev_control *podev,
  187. int *current_req_info);
  188. static int start_sha_req(struct qcedev_control *podev,
  189. int *current_req_info);
  190. static const struct file_operations qcedev_fops = {
  191. .owner = THIS_MODULE,
  192. .unlocked_ioctl = qcedev_ioctl,
  193. #ifdef CONFIG_COMPAT
  194. .compat_ioctl = compat_qcedev_ioctl,
  195. #endif
  196. .open = qcedev_open,
  197. .release = qcedev_release,
  198. };
  199. static struct qcedev_control qce_dev[] = {
  200. {
  201. .magic = QCEDEV_MAGIC,
  202. },
  203. };
  204. #define MAX_QCE_DEVICE ARRAY_SIZE(qce_dev)
  205. #define DEBUG_MAX_FNAME 16
  206. #define DEBUG_MAX_RW_BUF 1024
  207. struct qcedev_stat {
  208. u32 qcedev_dec_success;
  209. u32 qcedev_dec_fail;
  210. u32 qcedev_enc_success;
  211. u32 qcedev_enc_fail;
  212. u32 qcedev_sha_success;
  213. u32 qcedev_sha_fail;
  214. };
  215. static struct qcedev_stat _qcedev_stat;
  216. static struct dentry *_debug_dent;
  217. static char _debug_read_buf[DEBUG_MAX_RW_BUF];
  218. static int _debug_qcedev;
  219. static struct qcedev_control *qcedev_minor_to_control(unsigned int n)
  220. {
  221. int i;
  222. for (i = 0; i < MAX_QCE_DEVICE; i++) {
  223. if (qce_dev[i].minor == n)
  224. return &qce_dev[n];
  225. }
  226. return NULL;
  227. }
  228. static int qcedev_open(struct inode *inode, struct file *file)
  229. {
  230. struct qcedev_handle *handle;
  231. struct qcedev_control *podev;
  232. podev = qcedev_minor_to_control(MINOR(inode->i_rdev));
  233. if (podev == NULL) {
  234. pr_err("%s: no such device %d\n", __func__,
  235. MINOR(inode->i_rdev));
  236. return -ENOENT;
  237. }
  238. handle = kzalloc(sizeof(struct qcedev_handle), GFP_KERNEL);
  239. if (handle == NULL)
  240. return -ENOMEM;
  241. handle->cntl = podev;
  242. file->private_data = handle;
  243. qcedev_ce_high_bw_req(podev, true);
  244. mutex_init(&handle->registeredbufs.lock);
  245. INIT_LIST_HEAD(&handle->registeredbufs.list);
  246. return 0;
  247. }
  248. static int qcedev_release(struct inode *inode, struct file *file)
  249. {
  250. struct qcedev_control *podev;
  251. struct qcedev_handle *handle;
  252. handle = file->private_data;
  253. podev = handle->cntl;
  254. if (podev != NULL && podev->magic != QCEDEV_MAGIC) {
  255. pr_err("%s: invalid handle %pK\n",
  256. __func__, podev);
  257. }
  258. qcedev_ce_high_bw_req(podev, false);
  259. if (qcedev_unmap_all_buffers(handle))
  260. pr_err("%s: failed to unmap all ion buffers\n", __func__);
  261. kfree_sensitive(handle);
  262. file->private_data = NULL;
  263. return 0;
  264. }
  265. static void req_done(unsigned long data)
  266. {
  267. struct qcedev_control *podev = (struct qcedev_control *)data;
  268. struct qcedev_async_req *areq;
  269. unsigned long flags = 0;
  270. struct qcedev_async_req *new_req = NULL;
  271. int ret = 0;
  272. int current_req_info = 0;
  273. spin_lock_irqsave(&podev->lock, flags);
  274. areq = podev->active_command;
  275. podev->active_command = NULL;
  276. again:
  277. if (!list_empty(&podev->ready_commands)) {
  278. new_req = container_of(podev->ready_commands.next,
  279. struct qcedev_async_req, list);
  280. list_del(&new_req->list);
  281. podev->active_command = new_req;
  282. new_req->err = 0;
  283. if (new_req->op_type == QCEDEV_CRYPTO_OPER_CIPHER)
  284. ret = start_cipher_req(podev, &current_req_info);
  285. else if (new_req->op_type == QCEDEV_CRYPTO_OPER_OFFLOAD_CIPHER)
  286. ret = start_offload_cipher_req(podev, &current_req_info);
  287. else
  288. ret = start_sha_req(podev, &current_req_info);
  289. }
  290. spin_unlock_irqrestore(&podev->lock, flags);
  291. if (areq)
  292. complete(&areq->complete);
  293. if (new_req && ret) {
  294. complete(&new_req->complete);
  295. spin_lock_irqsave(&podev->lock, flags);
  296. podev->active_command = NULL;
  297. areq = NULL;
  298. ret = 0;
  299. new_req = NULL;
  300. goto again;
  301. }
  302. }
  303. void qcedev_sha_req_cb(void *cookie, unsigned char *digest,
  304. unsigned char *authdata, int ret)
  305. {
  306. struct qcedev_sha_req *areq;
  307. struct qcedev_control *pdev;
  308. struct qcedev_handle *handle;
  309. uint32_t *auth32 = (uint32_t *)authdata;
  310. areq = (struct qcedev_sha_req *) cookie;
  311. if (!areq || !areq->cookie)
  312. return;
  313. handle = (struct qcedev_handle *) areq->cookie;
  314. pdev = handle->cntl;
  315. if (!pdev)
  316. return;
  317. if (digest)
  318. memcpy(&handle->sha_ctxt.digest[0], digest, 32);
  319. if (authdata) {
  320. handle->sha_ctxt.auth_data[0] = auth32[0];
  321. handle->sha_ctxt.auth_data[1] = auth32[1];
  322. }
  323. tasklet_schedule(&pdev->done_tasklet);
  324. };
  325. void qcedev_cipher_req_cb(void *cookie, unsigned char *icv,
  326. unsigned char *iv, int ret)
  327. {
  328. struct qcedev_cipher_req *areq;
  329. struct qcedev_handle *handle;
  330. struct qcedev_control *podev;
  331. struct qcedev_async_req *qcedev_areq;
  332. areq = (struct qcedev_cipher_req *) cookie;
  333. if (!areq || !areq->cookie)
  334. return;
  335. handle = (struct qcedev_handle *) areq->cookie;
  336. podev = handle->cntl;
  337. if (!podev)
  338. return;
  339. qcedev_areq = podev->active_command;
  340. if (iv)
  341. memcpy(&qcedev_areq->cipher_op_req.iv[0], iv,
  342. qcedev_areq->cipher_op_req.ivlen);
  343. tasklet_schedule(&podev->done_tasklet);
  344. };
  345. static int start_cipher_req(struct qcedev_control *podev,
  346. int *current_req_info)
  347. {
  348. struct qcedev_async_req *qcedev_areq;
  349. struct qce_req creq;
  350. int ret = 0;
  351. /* start the command on the podev->active_command */
  352. qcedev_areq = podev->active_command;
  353. qcedev_areq->cipher_req.cookie = qcedev_areq->handle;
  354. if (qcedev_areq->cipher_op_req.use_pmem == QCEDEV_USE_PMEM) {
  355. pr_err("%s: Use of PMEM is not supported\n", __func__);
  356. goto unsupported;
  357. }
  358. creq.pmem = NULL;
  359. switch (qcedev_areq->cipher_op_req.alg) {
  360. case QCEDEV_ALG_DES:
  361. creq.alg = CIPHER_ALG_DES;
  362. break;
  363. case QCEDEV_ALG_3DES:
  364. creq.alg = CIPHER_ALG_3DES;
  365. break;
  366. case QCEDEV_ALG_AES:
  367. creq.alg = CIPHER_ALG_AES;
  368. break;
  369. default:
  370. return -EINVAL;
  371. }
  372. switch (qcedev_areq->cipher_op_req.mode) {
  373. case QCEDEV_AES_MODE_CBC:
  374. case QCEDEV_DES_MODE_CBC:
  375. creq.mode = QCE_MODE_CBC;
  376. break;
  377. case QCEDEV_AES_MODE_ECB:
  378. case QCEDEV_DES_MODE_ECB:
  379. creq.mode = QCE_MODE_ECB;
  380. break;
  381. case QCEDEV_AES_MODE_CTR:
  382. creq.mode = QCE_MODE_CTR;
  383. break;
  384. case QCEDEV_AES_MODE_XTS:
  385. creq.mode = QCE_MODE_XTS;
  386. break;
  387. default:
  388. return -EINVAL;
  389. }
  390. if ((creq.alg == CIPHER_ALG_AES) &&
  391. (creq.mode == QCE_MODE_CTR)) {
  392. creq.dir = QCE_ENCRYPT;
  393. } else {
  394. if (qcedev_areq->cipher_op_req.op == QCEDEV_OPER_ENC)
  395. creq.dir = QCE_ENCRYPT;
  396. else
  397. creq.dir = QCE_DECRYPT;
  398. }
  399. creq.iv = &qcedev_areq->cipher_op_req.iv[0];
  400. creq.ivsize = qcedev_areq->cipher_op_req.ivlen;
  401. creq.enckey = &qcedev_areq->cipher_op_req.enckey[0];
  402. creq.encklen = qcedev_areq->cipher_op_req.encklen;
  403. creq.cryptlen = qcedev_areq->cipher_op_req.data_len;
  404. if (qcedev_areq->cipher_op_req.encklen == 0) {
  405. if ((qcedev_areq->cipher_op_req.op == QCEDEV_OPER_ENC_NO_KEY)
  406. || (qcedev_areq->cipher_op_req.op ==
  407. QCEDEV_OPER_DEC_NO_KEY))
  408. creq.op = QCE_REQ_ABLK_CIPHER_NO_KEY;
  409. else {
  410. int i;
  411. for (i = 0; i < QCEDEV_MAX_KEY_SIZE; i++) {
  412. if (qcedev_areq->cipher_op_req.enckey[i] != 0)
  413. break;
  414. }
  415. if ((podev->platform_support.hw_key_support == 1) &&
  416. (i == QCEDEV_MAX_KEY_SIZE))
  417. creq.op = QCE_REQ_ABLK_CIPHER;
  418. else {
  419. ret = -EINVAL;
  420. goto unsupported;
  421. }
  422. }
  423. } else {
  424. creq.op = QCE_REQ_ABLK_CIPHER;
  425. }
  426. creq.qce_cb = qcedev_cipher_req_cb;
  427. creq.areq = (void *)&qcedev_areq->cipher_req;
  428. creq.flags = 0;
  429. creq.offload_op = 0;
  430. ret = qce_ablk_cipher_req(podev->qce, &creq);
  431. *current_req_info = creq.current_req_info;
  432. unsupported:
  433. qcedev_areq->err = ret ? -ENXIO : 0;
  434. return ret;
  435. };
  436. void qcedev_offload_cipher_req_cb(void *cookie, unsigned char *icv,
  437. unsigned char *iv, int ret)
  438. {
  439. struct qcedev_cipher_req *areq;
  440. struct qcedev_handle *handle;
  441. struct qcedev_control *podev;
  442. struct qcedev_async_req *qcedev_areq;
  443. areq = (struct qcedev_cipher_req *) cookie;
  444. if (!areq || !areq->cookie)
  445. return;
  446. handle = (struct qcedev_handle *) areq->cookie;
  447. podev = handle->cntl;
  448. if (!podev)
  449. return;
  450. qcedev_areq = podev->active_command;
  451. if (iv)
  452. memcpy(&qcedev_areq->offload_cipher_op_req.iv[0], iv,
  453. qcedev_areq->offload_cipher_op_req.ivlen);
  454. tasklet_schedule(&podev->done_tasklet);
  455. }
  456. static int start_offload_cipher_req(struct qcedev_control *podev,
  457. int *current_req_info)
  458. {
  459. struct qcedev_async_req *qcedev_areq;
  460. struct qce_req creq;
  461. u8 patt_sz = 0, proc_data_sz = 0;
  462. int ret = 0;
  463. memset(&creq, 0, sizeof(creq));
  464. /* Start the command on the podev->active_command */
  465. qcedev_areq = podev->active_command;
  466. qcedev_areq->cipher_req.cookie = qcedev_areq->handle;
  467. switch (qcedev_areq->offload_cipher_op_req.alg) {
  468. case QCEDEV_ALG_AES:
  469. creq.alg = CIPHER_ALG_AES;
  470. break;
  471. default:
  472. return -EINVAL;
  473. }
  474. switch (qcedev_areq->offload_cipher_op_req.mode) {
  475. case QCEDEV_AES_MODE_CBC:
  476. creq.mode = QCE_MODE_CBC;
  477. break;
  478. case QCEDEV_AES_MODE_CTR:
  479. creq.mode = QCE_MODE_CTR;
  480. break;
  481. default:
  482. return -EINVAL;
  483. }
  484. if (qcedev_areq->offload_cipher_op_req.is_copy_op) {
  485. creq.dir = QCE_ENCRYPT;
  486. } else {
  487. switch(qcedev_areq->offload_cipher_op_req.op) {
  488. case QCEDEV_OFFLOAD_HLOS_HLOS:
  489. case QCEDEV_OFFLOAD_HLOS_CPB:
  490. creq.dir = QCE_DECRYPT;
  491. break;
  492. case QCEDEV_OFFLOAD_CPB_HLOS:
  493. creq.dir = QCE_ENCRYPT;
  494. break;
  495. default:
  496. return -EINVAL;
  497. }
  498. }
  499. creq.iv = &qcedev_areq->offload_cipher_op_req.iv[0];
  500. creq.ivsize = qcedev_areq->offload_cipher_op_req.ivlen;
  501. creq.iv_ctr_size = qcedev_areq->offload_cipher_op_req.iv_ctr_size;
  502. creq.encklen = qcedev_areq->offload_cipher_op_req.encklen;
  503. /* OFFLOAD use cases use PIPE keys so no need to set keys */
  504. creq.flags = QCEDEV_CTX_USE_PIPE_KEY;
  505. creq.op = QCE_REQ_ABLK_CIPHER_NO_KEY;
  506. creq.offload_op = (int)qcedev_areq->offload_cipher_op_req.op;
  507. if (qcedev_areq->offload_cipher_op_req.is_copy_op)
  508. creq.is_copy_op = true;
  509. creq.cryptlen = qcedev_areq->offload_cipher_op_req.data_len;
  510. creq.qce_cb = qcedev_offload_cipher_req_cb;
  511. creq.areq = (void *)&qcedev_areq->cipher_req;
  512. patt_sz = qcedev_areq->offload_cipher_op_req.pattern_info.patt_sz;
  513. proc_data_sz =
  514. qcedev_areq->offload_cipher_op_req.pattern_info.proc_data_sz;
  515. creq.is_pattern_valid =
  516. qcedev_areq->offload_cipher_op_req.is_pattern_valid;
  517. if (creq.is_pattern_valid) {
  518. creq.pattern_info = 0x1;
  519. if (patt_sz)
  520. creq.pattern_info |= (patt_sz - 1) << 4;
  521. if (proc_data_sz)
  522. creq.pattern_info |= (proc_data_sz - 1) << 8;
  523. creq.pattern_info |=
  524. qcedev_areq->offload_cipher_op_req.pattern_info.patt_offset << 12;
  525. }
  526. creq.block_offset = qcedev_areq->offload_cipher_op_req.block_offset;
  527. ret = qce_ablk_cipher_req(podev->qce, &creq);
  528. *current_req_info = creq.current_req_info;
  529. qcedev_areq->err = ret ? -ENXIO : 0;
  530. return ret;
  531. }
  532. static int start_sha_req(struct qcedev_control *podev,
  533. int *current_req_info)
  534. {
  535. struct qcedev_async_req *qcedev_areq;
  536. struct qce_sha_req sreq;
  537. int ret = 0;
  538. struct qcedev_handle *handle;
  539. /* start the command on the podev->active_command */
  540. qcedev_areq = podev->active_command;
  541. handle = qcedev_areq->handle;
  542. switch (qcedev_areq->sha_op_req.alg) {
  543. case QCEDEV_ALG_SHA1:
  544. sreq.alg = QCE_HASH_SHA1;
  545. break;
  546. case QCEDEV_ALG_SHA256:
  547. sreq.alg = QCE_HASH_SHA256;
  548. break;
  549. case QCEDEV_ALG_SHA1_HMAC:
  550. if (podev->ce_support.sha_hmac) {
  551. sreq.alg = QCE_HASH_SHA1_HMAC;
  552. sreq.authkey = &handle->sha_ctxt.authkey[0];
  553. sreq.authklen = QCEDEV_MAX_SHA_BLOCK_SIZE;
  554. } else {
  555. sreq.alg = QCE_HASH_SHA1;
  556. sreq.authkey = NULL;
  557. }
  558. break;
  559. case QCEDEV_ALG_SHA256_HMAC:
  560. if (podev->ce_support.sha_hmac) {
  561. sreq.alg = QCE_HASH_SHA256_HMAC;
  562. sreq.authkey = &handle->sha_ctxt.authkey[0];
  563. sreq.authklen = QCEDEV_MAX_SHA_BLOCK_SIZE;
  564. } else {
  565. sreq.alg = QCE_HASH_SHA256;
  566. sreq.authkey = NULL;
  567. }
  568. break;
  569. case QCEDEV_ALG_AES_CMAC:
  570. sreq.alg = QCE_HASH_AES_CMAC;
  571. sreq.authkey = &handle->sha_ctxt.authkey[0];
  572. sreq.authklen = qcedev_areq->sha_op_req.authklen;
  573. break;
  574. default:
  575. pr_err("Algorithm %d not supported, exiting\n",
  576. qcedev_areq->sha_op_req.alg);
  577. return -EINVAL;
  578. }
  579. qcedev_areq->sha_req.cookie = handle;
  580. sreq.qce_cb = qcedev_sha_req_cb;
  581. if (qcedev_areq->sha_op_req.alg != QCEDEV_ALG_AES_CMAC) {
  582. sreq.auth_data[0] = handle->sha_ctxt.auth_data[0];
  583. sreq.auth_data[1] = handle->sha_ctxt.auth_data[1];
  584. sreq.auth_data[2] = handle->sha_ctxt.auth_data[2];
  585. sreq.auth_data[3] = handle->sha_ctxt.auth_data[3];
  586. sreq.digest = &handle->sha_ctxt.digest[0];
  587. sreq.first_blk = handle->sha_ctxt.first_blk;
  588. sreq.last_blk = handle->sha_ctxt.last_blk;
  589. }
  590. sreq.size = qcedev_areq->sha_req.sreq.nbytes;
  591. sreq.src = qcedev_areq->sha_req.sreq.src;
  592. sreq.areq = (void *)&qcedev_areq->sha_req;
  593. sreq.flags = 0;
  594. ret = qce_process_sha_req(podev->qce, &sreq);
  595. *current_req_info = sreq.current_req_info;
  596. qcedev_areq->err = ret ? -ENXIO : 0;
  597. return ret;
  598. };
  599. static void qcedev_check_crypto_status(
  600. struct qcedev_async_req *qcedev_areq, void *handle,
  601. bool print_err)
  602. {
  603. unsigned int s1, s2, s3, s4, s5, s6;
  604. qcedev_areq->offload_cipher_op_req.err = QCEDEV_OFFLOAD_NO_ERROR;
  605. qce_get_crypto_status(handle, &s1, &s2, &s3, &s4, &s5, &s6);
  606. if (print_err) {
  607. pr_err("%s: sts = 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", __func__,
  608. s1, s2, s3, s4, s5, s6);
  609. }
  610. // Check for key timer expiry
  611. if ((s6 & PIPE_KEY_TIMER_EXPIRED_STATUS6_MASK) ||
  612. (s3 & PIPE_KEY_TIMER_EXPIRED_STATUS3_MASK)) {
  613. pr_info("%s: crypto timer expired\n", __func__);
  614. pr_info("%s: sts = 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", __func__,
  615. s1, s2, s3, s4, s5, s6);
  616. qcedev_areq->offload_cipher_op_req.err =
  617. QCEDEV_OFFLOAD_KEY_TIMER_EXPIRED_ERROR;
  618. return;
  619. }
  620. // Check for key pause
  621. if ((s6 & PIPE_KEY_PAUSE_STATUS6_MASK) ||
  622. (s3 & PIPE_KEY_PAUSE_STATUS3_MASK)) {
  623. pr_info("%s: crypto key paused\n", __func__);
  624. pr_info("%s: sts = 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", __func__,
  625. s1, s2, s3, s4, s5, s6);
  626. qcedev_areq->offload_cipher_op_req.err =
  627. QCEDEV_OFFLOAD_KEY_PAUSE_ERROR;
  628. return;
  629. }
  630. // Check for generic error
  631. if (s1 & QCEDEV_STATUS1_ERR_INTR_MASK) {
  632. pr_err("%s: generic crypto error\n", __func__);
  633. pr_info("%s: sts = 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", __func__,
  634. s1, s2, s3, s4, s5, s6);
  635. qcedev_areq->offload_cipher_op_req.err =
  636. QCEDEV_OFFLOAD_GENERIC_ERROR;
  637. return;
  638. }
  639. }
  640. static int submit_req(struct qcedev_async_req *qcedev_areq,
  641. struct qcedev_handle *handle)
  642. {
  643. struct qcedev_control *podev;
  644. unsigned long flags = 0;
  645. int ret = 0;
  646. struct qcedev_stat *pstat;
  647. int current_req_info = 0;
  648. int wait = 0;
  649. bool print_sts = false;
  650. qcedev_areq->err = 0;
  651. podev = handle->cntl;
  652. spin_lock_irqsave(&podev->lock, flags);
  653. if (podev->active_command == NULL) {
  654. podev->active_command = qcedev_areq;
  655. if (qcedev_areq->op_type == QCEDEV_CRYPTO_OPER_CIPHER)
  656. ret = start_cipher_req(podev, &current_req_info);
  657. else if (qcedev_areq->op_type == QCEDEV_CRYPTO_OPER_OFFLOAD_CIPHER)
  658. ret = start_offload_cipher_req(podev, &current_req_info);
  659. else
  660. ret = start_sha_req(podev, &current_req_info);
  661. } else {
  662. list_add_tail(&qcedev_areq->list, &podev->ready_commands);
  663. }
  664. if (ret != 0)
  665. podev->active_command = NULL;
  666. spin_unlock_irqrestore(&podev->lock, flags);
  667. if (ret == 0)
  668. wait = wait_for_completion_timeout(&qcedev_areq->complete,
  669. msecs_to_jiffies(MAX_CRYPTO_WAIT_TIME));
  670. if (!wait) {
  671. /*
  672. * This means wait timed out, and the callback routine was not
  673. * exercised. The callback sequence does some housekeeping which
  674. * would be missed here, hence having a call to qce here to do
  675. * that.
  676. */
  677. pr_err("%s: wait timed out, req info = %d\n", __func__,
  678. current_req_info);
  679. print_sts = true;
  680. qcedev_check_crypto_status(qcedev_areq, podev->qce, print_sts);
  681. qce_manage_timeout(podev->qce, current_req_info);
  682. if (qcedev_areq->offload_cipher_op_req.err !=
  683. QCEDEV_OFFLOAD_NO_ERROR)
  684. return 0;
  685. }
  686. if (ret)
  687. qcedev_areq->err = -EIO;
  688. pstat = &_qcedev_stat;
  689. if (qcedev_areq->op_type == QCEDEV_CRYPTO_OPER_CIPHER) {
  690. switch (qcedev_areq->cipher_op_req.op) {
  691. case QCEDEV_OPER_DEC:
  692. if (qcedev_areq->err)
  693. pstat->qcedev_dec_fail++;
  694. else
  695. pstat->qcedev_dec_success++;
  696. break;
  697. case QCEDEV_OPER_ENC:
  698. if (qcedev_areq->err)
  699. pstat->qcedev_enc_fail++;
  700. else
  701. pstat->qcedev_enc_success++;
  702. break;
  703. default:
  704. break;
  705. }
  706. } else if (qcedev_areq->op_type == QCEDEV_CRYPTO_OPER_OFFLOAD_CIPHER) {
  707. //Do nothing
  708. } else {
  709. if (qcedev_areq->err)
  710. pstat->qcedev_sha_fail++;
  711. else
  712. pstat->qcedev_sha_success++;
  713. }
  714. return qcedev_areq->err;
  715. }
  716. static int qcedev_sha_init(struct qcedev_async_req *areq,
  717. struct qcedev_handle *handle)
  718. {
  719. struct qcedev_sha_ctxt *sha_ctxt = &handle->sha_ctxt;
  720. memset(sha_ctxt, 0, sizeof(struct qcedev_sha_ctxt));
  721. sha_ctxt->first_blk = 1;
  722. if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA1) ||
  723. (areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC)) {
  724. memcpy(&sha_ctxt->digest[0],
  725. &_std_init_vector_sha1_uint8[0], SHA1_DIGEST_SIZE);
  726. sha_ctxt->diglen = SHA1_DIGEST_SIZE;
  727. } else {
  728. if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA256) ||
  729. (areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC)) {
  730. memcpy(&sha_ctxt->digest[0],
  731. &_std_init_vector_sha256_uint8[0],
  732. SHA256_DIGEST_SIZE);
  733. sha_ctxt->diglen = SHA256_DIGEST_SIZE;
  734. }
  735. }
  736. sha_ctxt->init_done = true;
  737. return 0;
  738. }
  739. static int qcedev_sha_update_max_xfer(struct qcedev_async_req *qcedev_areq,
  740. struct qcedev_handle *handle,
  741. struct scatterlist *sg_src)
  742. {
  743. int err = 0;
  744. int i = 0;
  745. uint32_t total;
  746. uint8_t *user_src = NULL;
  747. uint8_t *k_src = NULL;
  748. uint8_t *k_buf_src = NULL;
  749. uint8_t *k_align_src = NULL;
  750. uint32_t sha_pad_len = 0;
  751. uint32_t trailing_buf_len = 0;
  752. uint32_t t_buf = handle->sha_ctxt.trailing_buf_len;
  753. uint32_t sha_block_size;
  754. total = qcedev_areq->sha_op_req.data_len + t_buf;
  755. if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1)
  756. sha_block_size = SHA1_BLOCK_SIZE;
  757. else
  758. sha_block_size = SHA256_BLOCK_SIZE;
  759. if (total <= sha_block_size) {
  760. uint32_t len = qcedev_areq->sha_op_req.data_len;
  761. i = 0;
  762. k_src = &handle->sha_ctxt.trailing_buf[t_buf];
  763. /* Copy data from user src(s) */
  764. while (len > 0) {
  765. user_src = qcedev_areq->sha_op_req.data[i].vaddr;
  766. if (user_src && copy_from_user(k_src,
  767. (void __user *)user_src,
  768. qcedev_areq->sha_op_req.data[i].len))
  769. return -EFAULT;
  770. len -= qcedev_areq->sha_op_req.data[i].len;
  771. k_src += qcedev_areq->sha_op_req.data[i].len;
  772. i++;
  773. }
  774. handle->sha_ctxt.trailing_buf_len = total;
  775. return 0;
  776. }
  777. k_buf_src = kmalloc(total + CACHE_LINE_SIZE * 2,
  778. GFP_KERNEL);
  779. if (k_buf_src == NULL)
  780. return -ENOMEM;
  781. k_align_src = (uint8_t *)ALIGN(((uintptr_t)k_buf_src),
  782. CACHE_LINE_SIZE);
  783. k_src = k_align_src;
  784. /* check for trailing buffer from previous updates and append it */
  785. if (t_buf > 0) {
  786. memcpy(k_src, &handle->sha_ctxt.trailing_buf[0],
  787. t_buf);
  788. k_src += t_buf;
  789. }
  790. /* Copy data from user src(s) */
  791. user_src = qcedev_areq->sha_op_req.data[0].vaddr;
  792. if (user_src && copy_from_user(k_src,
  793. (void __user *)user_src,
  794. qcedev_areq->sha_op_req.data[0].len)) {
  795. memset(k_buf_src, 0, ksize((void *)k_buf_src));
  796. kfree(k_buf_src);
  797. return -EFAULT;
  798. }
  799. k_src += qcedev_areq->sha_op_req.data[0].len;
  800. for (i = 1; i < qcedev_areq->sha_op_req.entries; i++) {
  801. user_src = qcedev_areq->sha_op_req.data[i].vaddr;
  802. if (user_src && copy_from_user(k_src,
  803. (void __user *)user_src,
  804. qcedev_areq->sha_op_req.data[i].len)) {
  805. memset(k_buf_src, 0, ksize((void *)k_buf_src));
  806. kfree(k_buf_src);
  807. return -EFAULT;
  808. }
  809. k_src += qcedev_areq->sha_op_req.data[i].len;
  810. }
  811. /* get new trailing buffer */
  812. sha_pad_len = ALIGN(total, CE_SHA_BLOCK_SIZE) - total;
  813. trailing_buf_len = CE_SHA_BLOCK_SIZE - sha_pad_len;
  814. qcedev_areq->sha_req.sreq.src = sg_src;
  815. sg_init_one(qcedev_areq->sha_req.sreq.src, k_align_src,
  816. total-trailing_buf_len);
  817. qcedev_areq->sha_req.sreq.nbytes = total - trailing_buf_len;
  818. /* update sha_ctxt trailing buf content to new trailing buf */
  819. if (trailing_buf_len > 0) {
  820. memset(&handle->sha_ctxt.trailing_buf[0], 0, 64);
  821. memcpy(&handle->sha_ctxt.trailing_buf[0],
  822. (k_src - trailing_buf_len),
  823. trailing_buf_len);
  824. }
  825. handle->sha_ctxt.trailing_buf_len = trailing_buf_len;
  826. err = submit_req(qcedev_areq, handle);
  827. handle->sha_ctxt.last_blk = 0;
  828. handle->sha_ctxt.first_blk = 0;
  829. memset(k_buf_src, 0, ksize((void *)k_buf_src));
  830. kfree(k_buf_src);
  831. return err;
  832. }
  833. static int qcedev_sha_update(struct qcedev_async_req *qcedev_areq,
  834. struct qcedev_handle *handle,
  835. struct scatterlist *sg_src)
  836. {
  837. int err = 0;
  838. int i = 0;
  839. int j = 0;
  840. int k = 0;
  841. int num_entries = 0;
  842. uint32_t total = 0;
  843. if (!handle->sha_ctxt.init_done) {
  844. pr_err("%s Init was not called\n", __func__);
  845. return -EINVAL;
  846. }
  847. if (qcedev_areq->sha_op_req.data_len > QCE_MAX_OPER_DATA) {
  848. struct qcedev_sha_op_req *saved_req;
  849. struct qcedev_sha_op_req req;
  850. struct qcedev_sha_op_req *sreq = &qcedev_areq->sha_op_req;
  851. /* save the original req structure */
  852. saved_req =
  853. kmalloc(sizeof(struct qcedev_sha_op_req), GFP_KERNEL);
  854. if (saved_req == NULL) {
  855. pr_err("%s:Can't Allocate mem:saved_req 0x%lx\n",
  856. __func__, (uintptr_t)saved_req);
  857. return -ENOMEM;
  858. }
  859. memcpy(&req, sreq, sizeof(struct qcedev_sha_op_req));
  860. memcpy(saved_req, sreq, sizeof(struct qcedev_sha_op_req));
  861. i = 0;
  862. /* Address 32 KB at a time */
  863. while ((i < req.entries) && (err == 0)) {
  864. if (sreq->data[i].len > QCE_MAX_OPER_DATA) {
  865. sreq->data[0].len = QCE_MAX_OPER_DATA;
  866. if (i > 0) {
  867. sreq->data[0].vaddr =
  868. sreq->data[i].vaddr;
  869. }
  870. sreq->data_len = QCE_MAX_OPER_DATA;
  871. sreq->entries = 1;
  872. err = qcedev_sha_update_max_xfer(qcedev_areq,
  873. handle, sg_src);
  874. sreq->data[i].len = req.data[i].len -
  875. QCE_MAX_OPER_DATA;
  876. sreq->data[i].vaddr = req.data[i].vaddr +
  877. QCE_MAX_OPER_DATA;
  878. req.data[i].vaddr = sreq->data[i].vaddr;
  879. req.data[i].len = sreq->data[i].len;
  880. } else {
  881. total = 0;
  882. for (j = i; j < req.entries; j++) {
  883. num_entries++;
  884. if ((total + sreq->data[j].len) >=
  885. QCE_MAX_OPER_DATA) {
  886. sreq->data[j].len =
  887. (QCE_MAX_OPER_DATA - total);
  888. total = QCE_MAX_OPER_DATA;
  889. break;
  890. }
  891. total += sreq->data[j].len;
  892. }
  893. sreq->data_len = total;
  894. if (i > 0)
  895. for (k = 0; k < num_entries; k++) {
  896. sreq->data[k].len =
  897. sreq->data[i+k].len;
  898. sreq->data[k].vaddr =
  899. sreq->data[i+k].vaddr;
  900. }
  901. sreq->entries = num_entries;
  902. i = j;
  903. err = qcedev_sha_update_max_xfer(qcedev_areq,
  904. handle, sg_src);
  905. num_entries = 0;
  906. sreq->data[i].vaddr = req.data[i].vaddr +
  907. sreq->data[i].len;
  908. sreq->data[i].len = req.data[i].len -
  909. sreq->data[i].len;
  910. req.data[i].vaddr = sreq->data[i].vaddr;
  911. req.data[i].len = sreq->data[i].len;
  912. if (sreq->data[i].len == 0)
  913. i++;
  914. }
  915. } /* end of while ((i < req.entries) && (err == 0)) */
  916. /* Restore the original req structure */
  917. for (i = 0; i < saved_req->entries; i++) {
  918. sreq->data[i].len = saved_req->data[i].len;
  919. sreq->data[i].vaddr = saved_req->data[i].vaddr;
  920. }
  921. sreq->entries = saved_req->entries;
  922. sreq->data_len = saved_req->data_len;
  923. memset(saved_req, 0, ksize((void *)saved_req));
  924. kfree(saved_req);
  925. } else
  926. err = qcedev_sha_update_max_xfer(qcedev_areq, handle, sg_src);
  927. return err;
  928. }
  929. static int qcedev_sha_final(struct qcedev_async_req *qcedev_areq,
  930. struct qcedev_handle *handle)
  931. {
  932. int err = 0;
  933. struct scatterlist sg_src;
  934. uint32_t total;
  935. uint8_t *k_buf_src = NULL;
  936. uint8_t *k_align_src = NULL;
  937. if (!handle->sha_ctxt.init_done) {
  938. pr_err("%s Init was not called\n", __func__);
  939. return -EINVAL;
  940. }
  941. handle->sha_ctxt.last_blk = 1;
  942. total = handle->sha_ctxt.trailing_buf_len;
  943. k_buf_src = kmalloc(total + CACHE_LINE_SIZE * 2,
  944. GFP_KERNEL);
  945. if (k_buf_src == NULL)
  946. return -ENOMEM;
  947. k_align_src = (uint8_t *)ALIGN(((uintptr_t)k_buf_src),
  948. CACHE_LINE_SIZE);
  949. memcpy(k_align_src, &handle->sha_ctxt.trailing_buf[0], total);
  950. qcedev_areq->sha_req.sreq.src = (struct scatterlist *) &sg_src;
  951. sg_init_one(qcedev_areq->sha_req.sreq.src, k_align_src, total);
  952. qcedev_areq->sha_req.sreq.nbytes = total;
  953. err = submit_req(qcedev_areq, handle);
  954. handle->sha_ctxt.first_blk = 0;
  955. handle->sha_ctxt.last_blk = 0;
  956. handle->sha_ctxt.auth_data[0] = 0;
  957. handle->sha_ctxt.auth_data[1] = 0;
  958. handle->sha_ctxt.trailing_buf_len = 0;
  959. handle->sha_ctxt.init_done = false;
  960. memset(&handle->sha_ctxt.trailing_buf[0], 0, 64);
  961. memset(k_buf_src, 0, ksize((void *)k_buf_src));
  962. kfree(k_buf_src);
  963. qcedev_areq->sha_req.sreq.src = NULL;
  964. return err;
  965. }
  966. static int qcedev_hash_cmac(struct qcedev_async_req *qcedev_areq,
  967. struct qcedev_handle *handle,
  968. struct scatterlist *sg_src)
  969. {
  970. int err = 0;
  971. int i = 0;
  972. uint32_t total;
  973. uint8_t *user_src = NULL;
  974. uint8_t *k_src = NULL;
  975. uint8_t *k_buf_src = NULL;
  976. total = qcedev_areq->sha_op_req.data_len;
  977. if ((qcedev_areq->sha_op_req.authklen != QCEDEV_AES_KEY_128) &&
  978. (qcedev_areq->sha_op_req.authklen != QCEDEV_AES_KEY_256)) {
  979. pr_err("%s: unsupported key length\n", __func__);
  980. return -EINVAL;
  981. }
  982. if (copy_from_user(&handle->sha_ctxt.authkey[0],
  983. (void __user *)qcedev_areq->sha_op_req.authkey,
  984. qcedev_areq->sha_op_req.authklen))
  985. return -EFAULT;
  986. if (total > U32_MAX - CACHE_LINE_SIZE * 2)
  987. return -EINVAL;
  988. k_buf_src = kmalloc(total + CACHE_LINE_SIZE * 2, GFP_KERNEL);
  989. if (k_buf_src == NULL)
  990. return -ENOMEM;
  991. k_src = k_buf_src;
  992. /* Copy data from user src(s) */
  993. user_src = qcedev_areq->sha_op_req.data[0].vaddr;
  994. for (i = 0; i < qcedev_areq->sha_op_req.entries; i++) {
  995. user_src = qcedev_areq->sha_op_req.data[i].vaddr;
  996. if (user_src && copy_from_user(k_src, (void __user *)user_src,
  997. qcedev_areq->sha_op_req.data[i].len)) {
  998. memset(k_buf_src, 0, ksize((void *)k_buf_src));
  999. kfree(k_buf_src);
  1000. return -EFAULT;
  1001. }
  1002. k_src += qcedev_areq->sha_op_req.data[i].len;
  1003. }
  1004. qcedev_areq->sha_req.sreq.src = sg_src;
  1005. sg_init_one(qcedev_areq->sha_req.sreq.src, k_buf_src, total);
  1006. qcedev_areq->sha_req.sreq.nbytes = total;
  1007. handle->sha_ctxt.diglen = qcedev_areq->sha_op_req.diglen;
  1008. err = submit_req(qcedev_areq, handle);
  1009. memset(k_buf_src, 0, ksize((void *)k_buf_src));
  1010. kfree(k_buf_src);
  1011. return err;
  1012. }
  1013. static int qcedev_set_hmac_auth_key(struct qcedev_async_req *areq,
  1014. struct qcedev_handle *handle,
  1015. struct scatterlist *sg_src)
  1016. {
  1017. int err = 0;
  1018. if (areq->sha_op_req.authklen <= QCEDEV_MAX_KEY_SIZE) {
  1019. qcedev_sha_init(areq, handle);
  1020. if (copy_from_user(&handle->sha_ctxt.authkey[0],
  1021. (void __user *)areq->sha_op_req.authkey,
  1022. areq->sha_op_req.authklen))
  1023. return -EFAULT;
  1024. } else {
  1025. struct qcedev_async_req authkey_areq;
  1026. uint8_t authkey[QCEDEV_MAX_SHA_BLOCK_SIZE];
  1027. init_completion(&authkey_areq.complete);
  1028. authkey_areq.sha_op_req.entries = 1;
  1029. authkey_areq.sha_op_req.data[0].vaddr =
  1030. areq->sha_op_req.authkey;
  1031. authkey_areq.sha_op_req.data[0].len = areq->sha_op_req.authklen;
  1032. authkey_areq.sha_op_req.data_len = areq->sha_op_req.authklen;
  1033. authkey_areq.sha_op_req.diglen = 0;
  1034. authkey_areq.handle = handle;
  1035. memset(&authkey_areq.sha_op_req.digest[0], 0,
  1036. QCEDEV_MAX_SHA_DIGEST);
  1037. if (areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC)
  1038. authkey_areq.sha_op_req.alg = QCEDEV_ALG_SHA1;
  1039. if (areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC)
  1040. authkey_areq.sha_op_req.alg = QCEDEV_ALG_SHA256;
  1041. authkey_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
  1042. qcedev_sha_init(&authkey_areq, handle);
  1043. err = qcedev_sha_update(&authkey_areq, handle, sg_src);
  1044. if (!err)
  1045. err = qcedev_sha_final(&authkey_areq, handle);
  1046. else
  1047. return err;
  1048. memcpy(&authkey[0], &handle->sha_ctxt.digest[0],
  1049. handle->sha_ctxt.diglen);
  1050. qcedev_sha_init(areq, handle);
  1051. memcpy(&handle->sha_ctxt.authkey[0], &authkey[0],
  1052. handle->sha_ctxt.diglen);
  1053. }
  1054. return err;
  1055. }
  1056. static int qcedev_hmac_get_ohash(struct qcedev_async_req *qcedev_areq,
  1057. struct qcedev_handle *handle)
  1058. {
  1059. int err = 0;
  1060. struct scatterlist sg_src;
  1061. uint8_t *k_src = NULL;
  1062. uint32_t sha_block_size = 0;
  1063. uint32_t sha_digest_size = 0;
  1064. if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC) {
  1065. sha_digest_size = SHA1_DIGEST_SIZE;
  1066. sha_block_size = SHA1_BLOCK_SIZE;
  1067. } else {
  1068. if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC) {
  1069. sha_digest_size = SHA256_DIGEST_SIZE;
  1070. sha_block_size = SHA256_BLOCK_SIZE;
  1071. }
  1072. }
  1073. k_src = kmalloc(sha_block_size, GFP_KERNEL);
  1074. if (k_src == NULL)
  1075. return -ENOMEM;
  1076. /* check for trailing buffer from previous updates and append it */
  1077. memcpy(k_src, &handle->sha_ctxt.trailing_buf[0],
  1078. handle->sha_ctxt.trailing_buf_len);
  1079. qcedev_areq->sha_req.sreq.src = (struct scatterlist *) &sg_src;
  1080. sg_init_one(qcedev_areq->sha_req.sreq.src, k_src, sha_block_size);
  1081. qcedev_areq->sha_req.sreq.nbytes = sha_block_size;
  1082. memset(&handle->sha_ctxt.trailing_buf[0], 0, sha_block_size);
  1083. memcpy(&handle->sha_ctxt.trailing_buf[0], &handle->sha_ctxt.digest[0],
  1084. sha_digest_size);
  1085. handle->sha_ctxt.trailing_buf_len = sha_digest_size;
  1086. handle->sha_ctxt.first_blk = 1;
  1087. handle->sha_ctxt.last_blk = 0;
  1088. handle->sha_ctxt.auth_data[0] = 0;
  1089. handle->sha_ctxt.auth_data[1] = 0;
  1090. if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC) {
  1091. memcpy(&handle->sha_ctxt.digest[0],
  1092. &_std_init_vector_sha1_uint8[0], SHA1_DIGEST_SIZE);
  1093. handle->sha_ctxt.diglen = SHA1_DIGEST_SIZE;
  1094. }
  1095. if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC) {
  1096. memcpy(&handle->sha_ctxt.digest[0],
  1097. &_std_init_vector_sha256_uint8[0], SHA256_DIGEST_SIZE);
  1098. handle->sha_ctxt.diglen = SHA256_DIGEST_SIZE;
  1099. }
  1100. err = submit_req(qcedev_areq, handle);
  1101. handle->sha_ctxt.last_blk = 0;
  1102. handle->sha_ctxt.first_blk = 0;
  1103. memset(k_src, 0, ksize((void *)k_src));
  1104. kfree(k_src);
  1105. qcedev_areq->sha_req.sreq.src = NULL;
  1106. return err;
  1107. }
  1108. static int qcedev_hmac_update_iokey(struct qcedev_async_req *areq,
  1109. struct qcedev_handle *handle, bool ikey)
  1110. {
  1111. int i;
  1112. uint32_t constant;
  1113. uint32_t sha_block_size;
  1114. if (ikey)
  1115. constant = 0x36;
  1116. else
  1117. constant = 0x5c;
  1118. if (areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC)
  1119. sha_block_size = SHA1_BLOCK_SIZE;
  1120. else
  1121. sha_block_size = SHA256_BLOCK_SIZE;
  1122. memset(&handle->sha_ctxt.trailing_buf[0], 0, sha_block_size);
  1123. for (i = 0; i < sha_block_size; i++)
  1124. handle->sha_ctxt.trailing_buf[i] =
  1125. (handle->sha_ctxt.authkey[i] ^ constant);
  1126. handle->sha_ctxt.trailing_buf_len = sha_block_size;
  1127. return 0;
  1128. }
  1129. static int qcedev_hmac_init(struct qcedev_async_req *areq,
  1130. struct qcedev_handle *handle,
  1131. struct scatterlist *sg_src)
  1132. {
  1133. int err;
  1134. struct qcedev_control *podev = handle->cntl;
  1135. err = qcedev_set_hmac_auth_key(areq, handle, sg_src);
  1136. if (err)
  1137. return err;
  1138. if (!podev->ce_support.sha_hmac)
  1139. qcedev_hmac_update_iokey(areq, handle, true);
  1140. return 0;
  1141. }
  1142. static int qcedev_hmac_final(struct qcedev_async_req *areq,
  1143. struct qcedev_handle *handle)
  1144. {
  1145. int err;
  1146. struct qcedev_control *podev = handle->cntl;
  1147. err = qcedev_sha_final(areq, handle);
  1148. if (podev->ce_support.sha_hmac)
  1149. return err;
  1150. qcedev_hmac_update_iokey(areq, handle, false);
  1151. err = qcedev_hmac_get_ohash(areq, handle);
  1152. if (err)
  1153. return err;
  1154. err = qcedev_sha_final(areq, handle);
  1155. return err;
  1156. }
  1157. static int qcedev_hash_init(struct qcedev_async_req *areq,
  1158. struct qcedev_handle *handle,
  1159. struct scatterlist *sg_src)
  1160. {
  1161. if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA1) ||
  1162. (areq->sha_op_req.alg == QCEDEV_ALG_SHA256))
  1163. return qcedev_sha_init(areq, handle);
  1164. else
  1165. return qcedev_hmac_init(areq, handle, sg_src);
  1166. }
  1167. static int qcedev_hash_update(struct qcedev_async_req *qcedev_areq,
  1168. struct qcedev_handle *handle,
  1169. struct scatterlist *sg_src)
  1170. {
  1171. return qcedev_sha_update(qcedev_areq, handle, sg_src);
  1172. }
  1173. static int qcedev_hash_final(struct qcedev_async_req *areq,
  1174. struct qcedev_handle *handle)
  1175. {
  1176. if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA1) ||
  1177. (areq->sha_op_req.alg == QCEDEV_ALG_SHA256))
  1178. return qcedev_sha_final(areq, handle);
  1179. else
  1180. return qcedev_hmac_final(areq, handle);
  1181. }
  1182. static int qcedev_vbuf_ablk_cipher_max_xfer(struct qcedev_async_req *areq,
  1183. int *di, struct qcedev_handle *handle,
  1184. uint8_t *k_align_src)
  1185. {
  1186. int err = 0;
  1187. int i = 0;
  1188. int dst_i = *di;
  1189. struct scatterlist sg_src;
  1190. uint32_t byteoffset = 0;
  1191. uint8_t *user_src = NULL;
  1192. uint8_t *k_align_dst = k_align_src;
  1193. struct qcedev_cipher_op_req *creq = &areq->cipher_op_req;
  1194. if (areq->cipher_op_req.mode == QCEDEV_AES_MODE_CTR)
  1195. byteoffset = areq->cipher_op_req.byteoffset;
  1196. user_src = areq->cipher_op_req.vbuf.src[0].vaddr;
  1197. if (user_src && copy_from_user((k_align_src + byteoffset),
  1198. (void __user *)user_src,
  1199. areq->cipher_op_req.vbuf.src[0].len))
  1200. return -EFAULT;
  1201. k_align_src += byteoffset + areq->cipher_op_req.vbuf.src[0].len;
  1202. for (i = 1; i < areq->cipher_op_req.entries; i++) {
  1203. user_src = areq->cipher_op_req.vbuf.src[i].vaddr;
  1204. if (user_src && copy_from_user(k_align_src,
  1205. (void __user *)user_src,
  1206. areq->cipher_op_req.vbuf.src[i].len)) {
  1207. return -EFAULT;
  1208. }
  1209. k_align_src += areq->cipher_op_req.vbuf.src[i].len;
  1210. }
  1211. /* restore src beginning */
  1212. k_align_src = k_align_dst;
  1213. areq->cipher_op_req.data_len += byteoffset;
  1214. areq->cipher_req.creq.src = (struct scatterlist *) &sg_src;
  1215. areq->cipher_req.creq.dst = (struct scatterlist *) &sg_src;
  1216. /* In place encryption/decryption */
  1217. sg_init_one(areq->cipher_req.creq.src,
  1218. k_align_dst,
  1219. areq->cipher_op_req.data_len);
  1220. areq->cipher_req.creq.cryptlen = areq->cipher_op_req.data_len;
  1221. areq->cipher_req.creq.iv = areq->cipher_op_req.iv;
  1222. areq->cipher_op_req.entries = 1;
  1223. err = submit_req(areq, handle);
  1224. /* copy data to destination buffer*/
  1225. creq->data_len -= byteoffset;
  1226. while (creq->data_len > 0) {
  1227. if (creq->vbuf.dst[dst_i].len <= creq->data_len) {
  1228. if (err == 0 && copy_to_user(
  1229. (void __user *)creq->vbuf.dst[dst_i].vaddr,
  1230. (k_align_dst + byteoffset),
  1231. creq->vbuf.dst[dst_i].len)) {
  1232. err = -EFAULT;
  1233. goto exit;
  1234. }
  1235. k_align_dst += creq->vbuf.dst[dst_i].len;
  1236. creq->data_len -= creq->vbuf.dst[dst_i].len;
  1237. dst_i++;
  1238. } else {
  1239. if (err == 0 && copy_to_user(
  1240. (void __user *)creq->vbuf.dst[dst_i].vaddr,
  1241. (k_align_dst + byteoffset),
  1242. creq->data_len)) {
  1243. err = -EFAULT;
  1244. goto exit;
  1245. }
  1246. k_align_dst += creq->data_len;
  1247. creq->vbuf.dst[dst_i].len -= creq->data_len;
  1248. creq->vbuf.dst[dst_i].vaddr += creq->data_len;
  1249. creq->data_len = 0;
  1250. }
  1251. }
  1252. *di = dst_i;
  1253. exit:
  1254. areq->cipher_req.creq.src = NULL;
  1255. areq->cipher_req.creq.dst = NULL;
  1256. return err;
  1257. };
  1258. static int qcedev_vbuf_ablk_cipher(struct qcedev_async_req *areq,
  1259. struct qcedev_handle *handle)
  1260. {
  1261. int err = 0;
  1262. int di = 0;
  1263. int i = 0;
  1264. int j = 0;
  1265. int k = 0;
  1266. uint32_t byteoffset = 0;
  1267. int num_entries = 0;
  1268. uint32_t total = 0;
  1269. uint32_t len;
  1270. uint8_t *k_buf_src = NULL;
  1271. uint8_t *k_align_src = NULL;
  1272. uint32_t max_data_xfer;
  1273. struct qcedev_cipher_op_req *saved_req;
  1274. struct qcedev_cipher_op_req *creq = &areq->cipher_op_req;
  1275. total = 0;
  1276. if (areq->cipher_op_req.mode == QCEDEV_AES_MODE_CTR)
  1277. byteoffset = areq->cipher_op_req.byteoffset;
  1278. k_buf_src = kmalloc(QCE_MAX_OPER_DATA + CACHE_LINE_SIZE * 2,
  1279. GFP_KERNEL);
  1280. if (k_buf_src == NULL)
  1281. return -ENOMEM;
  1282. k_align_src = (uint8_t *)ALIGN(((uintptr_t)k_buf_src),
  1283. CACHE_LINE_SIZE);
  1284. max_data_xfer = QCE_MAX_OPER_DATA - byteoffset;
  1285. saved_req = kmemdup(creq, sizeof(struct qcedev_cipher_op_req),
  1286. GFP_KERNEL);
  1287. if (saved_req == NULL) {
  1288. memset(k_buf_src, 0, ksize((void *)k_buf_src));
  1289. kfree(k_buf_src);
  1290. return -ENOMEM;
  1291. }
  1292. if (areq->cipher_op_req.data_len > max_data_xfer) {
  1293. struct qcedev_cipher_op_req req;
  1294. /* save the original req structure */
  1295. memcpy(&req, creq, sizeof(struct qcedev_cipher_op_req));
  1296. i = 0;
  1297. /* Address 32 KB at a time */
  1298. while ((i < req.entries) && (err == 0)) {
  1299. if (creq->vbuf.src[i].len > max_data_xfer) {
  1300. creq->vbuf.src[0].len = max_data_xfer;
  1301. if (i > 0) {
  1302. creq->vbuf.src[0].vaddr =
  1303. creq->vbuf.src[i].vaddr;
  1304. }
  1305. creq->data_len = max_data_xfer;
  1306. creq->entries = 1;
  1307. err = qcedev_vbuf_ablk_cipher_max_xfer(areq,
  1308. &di, handle, k_align_src);
  1309. if (err < 0) {
  1310. memset(saved_req, 0,
  1311. ksize((void *)saved_req));
  1312. memset(k_buf_src, 0,
  1313. ksize((void *)k_buf_src));
  1314. kfree(k_buf_src);
  1315. kfree(saved_req);
  1316. return err;
  1317. }
  1318. creq->vbuf.src[i].len = req.vbuf.src[i].len -
  1319. max_data_xfer;
  1320. creq->vbuf.src[i].vaddr =
  1321. req.vbuf.src[i].vaddr +
  1322. max_data_xfer;
  1323. req.vbuf.src[i].vaddr =
  1324. creq->vbuf.src[i].vaddr;
  1325. req.vbuf.src[i].len = creq->vbuf.src[i].len;
  1326. } else {
  1327. total = areq->cipher_op_req.byteoffset;
  1328. for (j = i; j < req.entries; j++) {
  1329. num_entries++;
  1330. if ((total + creq->vbuf.src[j].len)
  1331. >= max_data_xfer) {
  1332. creq->vbuf.src[j].len =
  1333. max_data_xfer - total;
  1334. total = max_data_xfer;
  1335. break;
  1336. }
  1337. total += creq->vbuf.src[j].len;
  1338. }
  1339. creq->data_len = total;
  1340. if (i > 0)
  1341. for (k = 0; k < num_entries; k++) {
  1342. creq->vbuf.src[k].len =
  1343. creq->vbuf.src[i+k].len;
  1344. creq->vbuf.src[k].vaddr =
  1345. creq->vbuf.src[i+k].vaddr;
  1346. }
  1347. creq->entries = num_entries;
  1348. i = j;
  1349. err = qcedev_vbuf_ablk_cipher_max_xfer(areq,
  1350. &di, handle, k_align_src);
  1351. if (err < 0) {
  1352. memset(saved_req, 0,
  1353. ksize((void *)saved_req));
  1354. memset(k_buf_src, 0,
  1355. ksize((void *)k_buf_src));
  1356. kfree(k_buf_src);
  1357. kfree(saved_req);
  1358. return err;
  1359. }
  1360. num_entries = 0;
  1361. areq->cipher_op_req.byteoffset = 0;
  1362. creq->vbuf.src[i].vaddr = req.vbuf.src[i].vaddr
  1363. + creq->vbuf.src[i].len;
  1364. creq->vbuf.src[i].len = req.vbuf.src[i].len -
  1365. creq->vbuf.src[i].len;
  1366. req.vbuf.src[i].vaddr =
  1367. creq->vbuf.src[i].vaddr;
  1368. req.vbuf.src[i].len = creq->vbuf.src[i].len;
  1369. if (creq->vbuf.src[i].len == 0)
  1370. i++;
  1371. }
  1372. areq->cipher_op_req.byteoffset = 0;
  1373. max_data_xfer = QCE_MAX_OPER_DATA;
  1374. byteoffset = 0;
  1375. } /* end of while ((i < req.entries) && (err == 0)) */
  1376. } else
  1377. err = qcedev_vbuf_ablk_cipher_max_xfer(areq, &di, handle,
  1378. k_align_src);
  1379. /* Restore the original req structure */
  1380. for (i = 0; i < saved_req->entries; i++) {
  1381. creq->vbuf.src[i].len = saved_req->vbuf.src[i].len;
  1382. creq->vbuf.src[i].vaddr = saved_req->vbuf.src[i].vaddr;
  1383. }
  1384. for (len = 0, i = 0; len < saved_req->data_len; i++) {
  1385. creq->vbuf.dst[i].len = saved_req->vbuf.dst[i].len;
  1386. creq->vbuf.dst[i].vaddr = saved_req->vbuf.dst[i].vaddr;
  1387. len += saved_req->vbuf.dst[i].len;
  1388. }
  1389. creq->entries = saved_req->entries;
  1390. creq->data_len = saved_req->data_len;
  1391. creq->byteoffset = saved_req->byteoffset;
  1392. memset(saved_req, 0, ksize((void *)saved_req));
  1393. memset(k_buf_src, 0, ksize((void *)k_buf_src));
  1394. kfree(saved_req);
  1395. kfree(k_buf_src);
  1396. return err;
  1397. }
  1398. static int qcedev_smmu_ablk_offload_cipher(struct qcedev_async_req *areq,
  1399. struct qcedev_handle *handle)
  1400. {
  1401. int i = 0;
  1402. int err = 0;
  1403. size_t byteoffset = 0;
  1404. size_t transfer_data_len = 0;
  1405. size_t pending_data_len = 0;
  1406. size_t max_data_xfer = MAX_CEHW_REQ_TRANSFER_SIZE - byteoffset;
  1407. uint8_t *user_src = NULL;
  1408. uint8_t *user_dst = NULL;
  1409. struct scatterlist sg_src;
  1410. struct scatterlist sg_dst;
  1411. if (areq->offload_cipher_op_req.mode == QCEDEV_AES_MODE_CTR)
  1412. byteoffset = areq->offload_cipher_op_req.byteoffset;
  1413. /*
  1414. * areq has two components:
  1415. * a) Request that comes from userspace i.e. offload_cipher_op_req
  1416. * b) Request that QCE understands - skcipher i.e. cipher_req.creq
  1417. * skcipher has sglist pointers src and dest that would carry
  1418. * data to/from CE.
  1419. */
  1420. areq->cipher_req.creq.src = &sg_src;
  1421. areq->cipher_req.creq.dst = &sg_dst;
  1422. sg_init_table(&sg_src, 1);
  1423. sg_init_table(&sg_dst, 1);
  1424. for (i = 0; i < areq->offload_cipher_op_req.entries; i++) {
  1425. transfer_data_len = 0;
  1426. pending_data_len = areq->offload_cipher_op_req.vbuf.src[i].len;
  1427. user_src = areq->offload_cipher_op_req.vbuf.src[i].vaddr;
  1428. user_src += byteoffset;
  1429. user_dst = areq->offload_cipher_op_req.vbuf.dst[i].vaddr;
  1430. user_dst += byteoffset;
  1431. areq->cipher_req.creq.iv = areq->offload_cipher_op_req.iv;
  1432. while (pending_data_len) {
  1433. transfer_data_len = min(max_data_xfer,
  1434. pending_data_len);
  1435. sg_src.dma_address = (dma_addr_t)user_src;
  1436. sg_dst.dma_address = (dma_addr_t)user_dst;
  1437. areq->cipher_req.creq.cryptlen = transfer_data_len;
  1438. sg_src.length = transfer_data_len;
  1439. sg_dst.length = transfer_data_len;
  1440. err = submit_req(areq, handle);
  1441. if (err) {
  1442. pr_err("%s: Error processing req, err = %d\n",
  1443. __func__, err);
  1444. goto exit;
  1445. }
  1446. /* update data len to be processed */
  1447. pending_data_len -= transfer_data_len;
  1448. user_src += transfer_data_len;
  1449. user_dst += transfer_data_len;
  1450. }
  1451. }
  1452. exit:
  1453. return err;
  1454. }
  1455. static int qcedev_check_cipher_key(struct qcedev_cipher_op_req *req,
  1456. struct qcedev_control *podev)
  1457. {
  1458. /* if intending to use HW key make sure key fields are set
  1459. * correctly and HW key is indeed supported in target
  1460. */
  1461. if (req->encklen == 0) {
  1462. int i;
  1463. for (i = 0; i < QCEDEV_MAX_KEY_SIZE; i++) {
  1464. if (req->enckey[i]) {
  1465. pr_err("%s: Invalid key: non-zero key input\n",
  1466. __func__);
  1467. goto error;
  1468. }
  1469. }
  1470. if ((req->op != QCEDEV_OPER_ENC_NO_KEY) &&
  1471. (req->op != QCEDEV_OPER_DEC_NO_KEY))
  1472. if (!podev->platform_support.hw_key_support) {
  1473. pr_err("%s: Invalid op %d\n", __func__,
  1474. (uint32_t)req->op);
  1475. goto error;
  1476. }
  1477. } else {
  1478. if (req->encklen == QCEDEV_AES_KEY_192) {
  1479. if (!podev->ce_support.aes_key_192) {
  1480. pr_err("%s: AES-192 not supported\n", __func__);
  1481. goto error;
  1482. }
  1483. } else {
  1484. /* if not using HW key make sure key
  1485. * length is valid
  1486. */
  1487. if (req->mode == QCEDEV_AES_MODE_XTS) {
  1488. if ((req->encklen != QCEDEV_AES_KEY_128*2) &&
  1489. (req->encklen != QCEDEV_AES_KEY_256*2)) {
  1490. pr_err("%s: unsupported key size: %d\n",
  1491. __func__, req->encklen);
  1492. goto error;
  1493. }
  1494. } else {
  1495. if ((req->encklen != QCEDEV_AES_KEY_128) &&
  1496. (req->encklen != QCEDEV_AES_KEY_256)) {
  1497. pr_err("%s: unsupported key size %d\n",
  1498. __func__, req->encklen);
  1499. goto error;
  1500. }
  1501. }
  1502. }
  1503. }
  1504. return 0;
  1505. error:
  1506. return -EINVAL;
  1507. }
  1508. static int qcedev_check_cipher_params(struct qcedev_cipher_op_req *req,
  1509. struct qcedev_control *podev)
  1510. {
  1511. uint32_t total = 0;
  1512. uint32_t i;
  1513. if (req->use_pmem) {
  1514. pr_err("%s: Use of PMEM is not supported\n", __func__);
  1515. goto error;
  1516. }
  1517. if ((req->entries == 0) || (req->data_len == 0) ||
  1518. (req->entries > QCEDEV_MAX_BUFFERS)) {
  1519. pr_err("%s: Invalid cipher length/entries\n", __func__);
  1520. goto error;
  1521. }
  1522. if ((req->alg >= QCEDEV_ALG_LAST) ||
  1523. (req->mode >= QCEDEV_AES_DES_MODE_LAST)) {
  1524. pr_err("%s: Invalid algorithm %d\n", __func__,
  1525. (uint32_t)req->alg);
  1526. goto error;
  1527. }
  1528. if ((req->mode == QCEDEV_AES_MODE_XTS) &&
  1529. (!podev->ce_support.aes_xts)) {
  1530. pr_err("%s: XTS algorithm is not supported\n", __func__);
  1531. goto error;
  1532. }
  1533. if (req->alg == QCEDEV_ALG_AES) {
  1534. if (qcedev_check_cipher_key(req, podev))
  1535. goto error;
  1536. }
  1537. /* if using a byteoffset, make sure it is CTR mode using vbuf */
  1538. if (req->byteoffset) {
  1539. if (req->mode != QCEDEV_AES_MODE_CTR) {
  1540. pr_err("%s: Operation on byte offset not supported\n",
  1541. __func__);
  1542. goto error;
  1543. }
  1544. if (req->byteoffset >= AES_CE_BLOCK_SIZE) {
  1545. pr_err("%s: Invalid byte offset\n", __func__);
  1546. goto error;
  1547. }
  1548. total = req->byteoffset;
  1549. for (i = 0; i < req->entries; i++) {
  1550. if (total > U32_MAX - req->vbuf.src[i].len) {
  1551. pr_err("%s:Integer overflow on total src len\n",
  1552. __func__);
  1553. goto error;
  1554. }
  1555. total += req->vbuf.src[i].len;
  1556. }
  1557. }
  1558. if (req->data_len < req->byteoffset) {
  1559. pr_err("%s: req data length %u is less than byteoffset %u\n",
  1560. __func__, req->data_len, req->byteoffset);
  1561. goto error;
  1562. }
  1563. /* Ensure IV size */
  1564. if (req->ivlen > QCEDEV_MAX_IV_SIZE) {
  1565. pr_err("%s: ivlen is not correct: %u\n", __func__, req->ivlen);
  1566. goto error;
  1567. }
  1568. /* Ensure Key size */
  1569. if (req->encklen > QCEDEV_MAX_KEY_SIZE) {
  1570. pr_err("%s: Klen is not correct: %u\n", __func__, req->encklen);
  1571. goto error;
  1572. }
  1573. /* Ensure zer ivlen for ECB mode */
  1574. if (req->ivlen > 0) {
  1575. if ((req->mode == QCEDEV_AES_MODE_ECB) ||
  1576. (req->mode == QCEDEV_DES_MODE_ECB)) {
  1577. pr_err("%s: Expecting a zero length IV\n", __func__);
  1578. goto error;
  1579. }
  1580. } else {
  1581. if ((req->mode != QCEDEV_AES_MODE_ECB) &&
  1582. (req->mode != QCEDEV_DES_MODE_ECB)) {
  1583. pr_err("%s: Expecting a non-zero ength IV\n", __func__);
  1584. goto error;
  1585. }
  1586. }
  1587. /* Check for sum of all dst length is equal to data_len */
  1588. for (i = 0, total = 0; i < req->entries; i++) {
  1589. if (!req->vbuf.dst[i].vaddr && req->vbuf.dst[i].len) {
  1590. pr_err("%s: NULL req dst vbuf[%d] with length %d\n",
  1591. __func__, i, req->vbuf.dst[i].len);
  1592. goto error;
  1593. }
  1594. if (req->vbuf.dst[i].len >= U32_MAX - total) {
  1595. pr_err("%s: Integer overflow on total req dst vbuf length\n",
  1596. __func__);
  1597. goto error;
  1598. }
  1599. total += req->vbuf.dst[i].len;
  1600. }
  1601. if (total != req->data_len) {
  1602. pr_err("%s: Total (i=%d) dst(%d) buf size != data_len (%d)\n",
  1603. __func__, i, total, req->data_len);
  1604. goto error;
  1605. }
  1606. /* Check for sum of all src length is equal to data_len */
  1607. for (i = 0, total = 0; i < req->entries; i++) {
  1608. if (!req->vbuf.src[i].vaddr && req->vbuf.src[i].len) {
  1609. pr_err("%s: NULL req src vbuf[%d] with length %d\n",
  1610. __func__, i, req->vbuf.src[i].len);
  1611. goto error;
  1612. }
  1613. if (req->vbuf.src[i].len > U32_MAX - total) {
  1614. pr_err("%s: Integer overflow on total req src vbuf length\n",
  1615. __func__);
  1616. goto error;
  1617. }
  1618. total += req->vbuf.src[i].len;
  1619. }
  1620. if (total != req->data_len) {
  1621. pr_err("%s: Total src(%d) buf size != data_len (%d)\n",
  1622. __func__, total, req->data_len);
  1623. goto error;
  1624. }
  1625. return 0;
  1626. error:
  1627. return -EINVAL;
  1628. }
  1629. static int qcedev_check_sha_params(struct qcedev_sha_op_req *req,
  1630. struct qcedev_control *podev)
  1631. {
  1632. uint32_t total = 0;
  1633. uint32_t i;
  1634. if ((req->alg == QCEDEV_ALG_AES_CMAC) &&
  1635. (!podev->ce_support.cmac)) {
  1636. pr_err("%s: CMAC not supported\n", __func__);
  1637. goto sha_error;
  1638. }
  1639. if ((!req->entries) || (req->entries > QCEDEV_MAX_BUFFERS)) {
  1640. pr_err("%s: Invalid num entries (%d)\n",
  1641. __func__, req->entries);
  1642. goto sha_error;
  1643. }
  1644. if (req->alg >= QCEDEV_ALG_SHA_ALG_LAST) {
  1645. pr_err("%s: Invalid algorithm (%d)\n", __func__, req->alg);
  1646. goto sha_error;
  1647. }
  1648. if ((req->alg == QCEDEV_ALG_SHA1_HMAC) ||
  1649. (req->alg == QCEDEV_ALG_SHA256_HMAC)) {
  1650. if (req->authkey == NULL) {
  1651. pr_err("%s: Invalid authkey pointer\n", __func__);
  1652. goto sha_error;
  1653. }
  1654. if (req->authklen <= 0) {
  1655. pr_err("%s: Invalid authkey length (%d)\n",
  1656. __func__, req->authklen);
  1657. goto sha_error;
  1658. }
  1659. }
  1660. if (req->alg == QCEDEV_ALG_AES_CMAC) {
  1661. if ((req->authklen != QCEDEV_AES_KEY_128) &&
  1662. (req->authklen != QCEDEV_AES_KEY_256)) {
  1663. pr_err("%s: unsupported key length\n", __func__);
  1664. goto sha_error;
  1665. }
  1666. }
  1667. /* Check for sum of all src length is equal to data_len */
  1668. for (i = 0, total = 0; i < req->entries; i++) {
  1669. if (req->data[i].len > U32_MAX - total) {
  1670. pr_err("%s: Integer overflow on total req buf length\n",
  1671. __func__);
  1672. goto sha_error;
  1673. }
  1674. total += req->data[i].len;
  1675. }
  1676. if (total != req->data_len) {
  1677. pr_err("%s: Total src(%d) buf size != data_len (%d)\n",
  1678. __func__, total, req->data_len);
  1679. goto sha_error;
  1680. }
  1681. return 0;
  1682. sha_error:
  1683. return -EINVAL;
  1684. }
  1685. static int qcedev_check_offload_cipher_key(struct qcedev_offload_cipher_op_req *req,
  1686. struct qcedev_control *podev)
  1687. {
  1688. if (req->encklen == 0)
  1689. return -EINVAL;
  1690. /* AES-192 is not a valid option for OFFLOAD use case */
  1691. if ((req->encklen != QCEDEV_AES_KEY_128) &&
  1692. (req->encklen != QCEDEV_AES_KEY_256)) {
  1693. pr_err("%s: unsupported key size %d\n",
  1694. __func__, req->encklen);
  1695. goto error;
  1696. }
  1697. return 0;
  1698. error:
  1699. return -EINVAL;
  1700. }
  1701. static int qcedev_check_offload_cipher_params(struct qcedev_offload_cipher_op_req *req,
  1702. struct qcedev_control *podev)
  1703. {
  1704. uint32_t total = 0;
  1705. int i = 0;
  1706. if ((req->entries == 0) || (req->data_len == 0) ||
  1707. (req->entries > QCEDEV_MAX_BUFFERS)) {
  1708. pr_err("%s: Invalid cipher length/entries\n", __func__);
  1709. goto error;
  1710. }
  1711. if ((req->alg != QCEDEV_ALG_AES) ||
  1712. (req->mode > QCEDEV_AES_MODE_CTR)) {
  1713. pr_err("%s: Invalid algorithm %d\n", __func__,
  1714. (uint32_t)req->alg);
  1715. goto error;
  1716. }
  1717. if (qcedev_check_offload_cipher_key(req, podev))
  1718. goto error;
  1719. if (req->block_offset >= AES_CE_BLOCK_SIZE)
  1720. goto error;
  1721. /* if using a byteoffset, make sure it is CTR mode using vbuf */
  1722. if (req->byteoffset) {
  1723. if (req->mode != QCEDEV_AES_MODE_CTR) {
  1724. pr_err("%s: Operation on byte offset not supported\n",
  1725. __func__);
  1726. goto error;
  1727. }
  1728. if (req->byteoffset >= AES_CE_BLOCK_SIZE) {
  1729. pr_err("%s: Invalid byte offset\n", __func__);
  1730. goto error;
  1731. }
  1732. total = req->byteoffset;
  1733. for (i = 0; i < req->entries; i++) {
  1734. if (total > U32_MAX - req->vbuf.src[i].len) {
  1735. pr_err("%s:Int overflow on total src len\n",
  1736. __func__);
  1737. goto error;
  1738. }
  1739. total += req->vbuf.src[i].len;
  1740. }
  1741. }
  1742. if (req->data_len < req->byteoffset) {
  1743. pr_err("%s: req data length %u is less than byteoffset %u\n",
  1744. __func__, req->data_len, req->byteoffset);
  1745. goto error;
  1746. }
  1747. /* Ensure IV size */
  1748. if (req->ivlen > QCEDEV_MAX_IV_SIZE) {
  1749. pr_err("%s: ivlen is not correct: %u\n", __func__, req->ivlen);
  1750. goto error;
  1751. }
  1752. /* Ensure Key size */
  1753. if (req->encklen > QCEDEV_MAX_KEY_SIZE) {
  1754. pr_err("%s: Klen is not correct: %u\n", __func__,
  1755. req->encklen);
  1756. goto error;
  1757. }
  1758. /* Check for sum of all dst length is equal to data_len */
  1759. for (i = 0, total = 0; i < req->entries; i++) {
  1760. if (!req->vbuf.dst[i].vaddr && req->vbuf.dst[i].len) {
  1761. pr_err("%s: NULL req dst vbuf[%d] with length %d\n",
  1762. __func__, i, req->vbuf.dst[i].len);
  1763. goto error;
  1764. }
  1765. if (req->vbuf.dst[i].len >= U32_MAX - total) {
  1766. pr_err("%s: Int overflow on total req dst vbuf len\n",
  1767. __func__);
  1768. goto error;
  1769. }
  1770. total += req->vbuf.dst[i].len;
  1771. }
  1772. if (total != req->data_len) {
  1773. pr_err("%s: Total (i=%d) dst(%d) buf size != data_len (%d)\n",
  1774. __func__, i, total, req->data_len);
  1775. goto error;
  1776. }
  1777. /* Check for sum of all src length is equal to data_len */
  1778. for (i = 0, total = 0; i < req->entries; i++) {
  1779. if (!req->vbuf.src[i].vaddr && req->vbuf.src[i].len) {
  1780. pr_err("%s: NULL req src vbuf[%d] with length %d\n",
  1781. __func__, i, req->vbuf.src[i].len);
  1782. goto error;
  1783. }
  1784. if (req->vbuf.src[i].len > U32_MAX - total) {
  1785. pr_err("%s: Int overflow on total req src vbuf len\n",
  1786. __func__);
  1787. goto error;
  1788. }
  1789. total += req->vbuf.src[i].len;
  1790. }
  1791. if (total != req->data_len) {
  1792. pr_err("%s: Total src(%d) buf size != data_len (%d)\n",
  1793. __func__, total, req->data_len);
  1794. goto error;
  1795. }
  1796. return 0;
  1797. error:
  1798. return -EINVAL;
  1799. }
  1800. long qcedev_ioctl(struct file *file,
  1801. unsigned int cmd, unsigned long arg)
  1802. {
  1803. int err = 0;
  1804. struct qcedev_handle *handle;
  1805. struct qcedev_control *podev;
  1806. struct qcedev_async_req *qcedev_areq;
  1807. struct qcedev_stat *pstat;
  1808. qcedev_areq = kzalloc(sizeof(struct qcedev_async_req), GFP_KERNEL);
  1809. if (!qcedev_areq)
  1810. return -ENOMEM;
  1811. handle = file->private_data;
  1812. podev = handle->cntl;
  1813. qcedev_areq->handle = handle;
  1814. if (podev == NULL || podev->magic != QCEDEV_MAGIC) {
  1815. pr_err("%s: invalid handle %pK\n",
  1816. __func__, podev);
  1817. err = -ENOENT;
  1818. goto exit_free_qcedev_areq;
  1819. }
  1820. /* Verify user arguments. */
  1821. if (_IOC_TYPE(cmd) != QCEDEV_IOC_MAGIC) {
  1822. err = -ENOTTY;
  1823. goto exit_free_qcedev_areq;
  1824. }
  1825. init_completion(&qcedev_areq->complete);
  1826. pstat = &_qcedev_stat;
  1827. switch (cmd) {
  1828. case QCEDEV_IOCTL_ENC_REQ:
  1829. case QCEDEV_IOCTL_DEC_REQ:
  1830. if (copy_from_user(&qcedev_areq->cipher_op_req,
  1831. (void __user *)arg,
  1832. sizeof(struct qcedev_cipher_op_req))) {
  1833. err = -EFAULT;
  1834. goto exit_free_qcedev_areq;
  1835. }
  1836. qcedev_areq->op_type = QCEDEV_CRYPTO_OPER_CIPHER;
  1837. if (qcedev_check_cipher_params(&qcedev_areq->cipher_op_req,
  1838. podev)) {
  1839. err = -EINVAL;
  1840. goto exit_free_qcedev_areq;
  1841. }
  1842. err = qcedev_vbuf_ablk_cipher(qcedev_areq, handle);
  1843. if (err)
  1844. goto exit_free_qcedev_areq;
  1845. if (copy_to_user((void __user *)arg,
  1846. &qcedev_areq->cipher_op_req,
  1847. sizeof(struct qcedev_cipher_op_req))) {
  1848. err = -EFAULT;
  1849. goto exit_free_qcedev_areq;
  1850. }
  1851. break;
  1852. case QCEDEV_IOCTL_OFFLOAD_OP_REQ:
  1853. if (copy_from_user(&qcedev_areq->offload_cipher_op_req,
  1854. (void __user *)arg,
  1855. sizeof(struct qcedev_offload_cipher_op_req))) {
  1856. err = -EFAULT;
  1857. goto exit_free_qcedev_areq;
  1858. }
  1859. qcedev_areq->op_type = QCEDEV_CRYPTO_OPER_OFFLOAD_CIPHER;
  1860. if (qcedev_check_offload_cipher_params(
  1861. &qcedev_areq->offload_cipher_op_req, podev)) {
  1862. err = -EINVAL;
  1863. goto exit_free_qcedev_areq;
  1864. }
  1865. err = qcedev_smmu_ablk_offload_cipher(qcedev_areq, handle);
  1866. if (err)
  1867. goto exit_free_qcedev_areq;
  1868. if (copy_to_user((void __user *)arg,
  1869. &qcedev_areq->offload_cipher_op_req,
  1870. sizeof(struct qcedev_offload_cipher_op_req))) {
  1871. err = -EFAULT;
  1872. goto exit_free_qcedev_areq;
  1873. }
  1874. break;
  1875. case QCEDEV_IOCTL_SHA_INIT_REQ:
  1876. {
  1877. struct scatterlist sg_src;
  1878. if (copy_from_user(&qcedev_areq->sha_op_req,
  1879. (void __user *)arg,
  1880. sizeof(struct qcedev_sha_op_req))) {
  1881. err = -EFAULT;
  1882. goto exit_free_qcedev_areq;
  1883. }
  1884. mutex_lock(&hash_access_lock);
  1885. if (qcedev_check_sha_params(&qcedev_areq->sha_op_req, podev)) {
  1886. mutex_unlock(&hash_access_lock);
  1887. err = -EINVAL;
  1888. goto exit_free_qcedev_areq;
  1889. }
  1890. qcedev_areq->op_type = QCEDEV_CRYPTO_OPER_SHA;
  1891. err = qcedev_hash_init(qcedev_areq, handle, &sg_src);
  1892. if (err) {
  1893. mutex_unlock(&hash_access_lock);
  1894. goto exit_free_qcedev_areq;
  1895. }
  1896. mutex_unlock(&hash_access_lock);
  1897. if (copy_to_user((void __user *)arg, &qcedev_areq->sha_op_req,
  1898. sizeof(struct qcedev_sha_op_req))) {
  1899. err = -EFAULT;
  1900. goto exit_free_qcedev_areq;
  1901. }
  1902. handle->sha_ctxt.init_done = true;
  1903. }
  1904. break;
  1905. case QCEDEV_IOCTL_GET_CMAC_REQ:
  1906. if (!podev->ce_support.cmac) {
  1907. err = -ENOTTY;
  1908. goto exit_free_qcedev_areq;
  1909. }
  1910. /* Fall-through */
  1911. case QCEDEV_IOCTL_SHA_UPDATE_REQ:
  1912. {
  1913. struct scatterlist sg_src;
  1914. if (copy_from_user(&qcedev_areq->sha_op_req,
  1915. (void __user *)arg,
  1916. sizeof(struct qcedev_sha_op_req))) {
  1917. err = -EFAULT;
  1918. goto exit_free_qcedev_areq;
  1919. }
  1920. mutex_lock(&hash_access_lock);
  1921. if (qcedev_check_sha_params(&qcedev_areq->sha_op_req, podev)) {
  1922. mutex_unlock(&hash_access_lock);
  1923. err = -EINVAL;
  1924. goto exit_free_qcedev_areq;
  1925. }
  1926. qcedev_areq->op_type = QCEDEV_CRYPTO_OPER_SHA;
  1927. if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_AES_CMAC) {
  1928. err = qcedev_hash_cmac(qcedev_areq, handle, &sg_src);
  1929. if (err) {
  1930. mutex_unlock(&hash_access_lock);
  1931. goto exit_free_qcedev_areq;
  1932. }
  1933. } else {
  1934. if (!handle->sha_ctxt.init_done) {
  1935. pr_err("%s Init was not called\n", __func__);
  1936. mutex_unlock(&hash_access_lock);
  1937. err = -EINVAL;
  1938. goto exit_free_qcedev_areq;
  1939. }
  1940. err = qcedev_hash_update(qcedev_areq, handle, &sg_src);
  1941. if (err) {
  1942. mutex_unlock(&hash_access_lock);
  1943. goto exit_free_qcedev_areq;
  1944. }
  1945. }
  1946. if (handle->sha_ctxt.diglen > QCEDEV_MAX_SHA_DIGEST) {
  1947. pr_err("Invalid sha_ctxt.diglen %d\n",
  1948. handle->sha_ctxt.diglen);
  1949. mutex_unlock(&hash_access_lock);
  1950. err = -EINVAL;
  1951. goto exit_free_qcedev_areq;
  1952. }
  1953. memcpy(&qcedev_areq->sha_op_req.digest[0],
  1954. &handle->sha_ctxt.digest[0],
  1955. handle->sha_ctxt.diglen);
  1956. mutex_unlock(&hash_access_lock);
  1957. if (copy_to_user((void __user *)arg, &qcedev_areq->sha_op_req,
  1958. sizeof(struct qcedev_sha_op_req))) {
  1959. err = -EFAULT;
  1960. goto exit_free_qcedev_areq;
  1961. }
  1962. }
  1963. break;
  1964. case QCEDEV_IOCTL_SHA_FINAL_REQ:
  1965. if (!handle->sha_ctxt.init_done) {
  1966. pr_err("%s Init was not called\n", __func__);
  1967. err = -EINVAL;
  1968. goto exit_free_qcedev_areq;
  1969. }
  1970. if (copy_from_user(&qcedev_areq->sha_op_req,
  1971. (void __user *)arg,
  1972. sizeof(struct qcedev_sha_op_req))) {
  1973. err = -EFAULT;
  1974. goto exit_free_qcedev_areq;
  1975. }
  1976. mutex_lock(&hash_access_lock);
  1977. if (qcedev_check_sha_params(&qcedev_areq->sha_op_req, podev)) {
  1978. mutex_unlock(&hash_access_lock);
  1979. err = -EINVAL;
  1980. goto exit_free_qcedev_areq;
  1981. }
  1982. qcedev_areq->op_type = QCEDEV_CRYPTO_OPER_SHA;
  1983. err = qcedev_hash_final(qcedev_areq, handle);
  1984. if (err) {
  1985. mutex_unlock(&hash_access_lock);
  1986. goto exit_free_qcedev_areq;
  1987. }
  1988. if (handle->sha_ctxt.diglen > QCEDEV_MAX_SHA_DIGEST) {
  1989. pr_err("Invalid sha_ctxt.diglen %d\n",
  1990. handle->sha_ctxt.diglen);
  1991. mutex_unlock(&hash_access_lock);
  1992. err = -EINVAL;
  1993. goto exit_free_qcedev_areq;
  1994. }
  1995. qcedev_areq->sha_op_req.diglen = handle->sha_ctxt.diglen;
  1996. memcpy(&qcedev_areq->sha_op_req.digest[0],
  1997. &handle->sha_ctxt.digest[0],
  1998. handle->sha_ctxt.diglen);
  1999. mutex_unlock(&hash_access_lock);
  2000. if (copy_to_user((void __user *)arg, &qcedev_areq->sha_op_req,
  2001. sizeof(struct qcedev_sha_op_req))) {
  2002. err = -EFAULT;
  2003. goto exit_free_qcedev_areq;
  2004. }
  2005. handle->sha_ctxt.init_done = false;
  2006. break;
  2007. case QCEDEV_IOCTL_GET_SHA_REQ:
  2008. {
  2009. struct scatterlist sg_src;
  2010. if (copy_from_user(&qcedev_areq->sha_op_req,
  2011. (void __user *)arg,
  2012. sizeof(struct qcedev_sha_op_req))) {
  2013. err = -EFAULT;
  2014. goto exit_free_qcedev_areq;
  2015. }
  2016. mutex_lock(&hash_access_lock);
  2017. if (qcedev_check_sha_params(&qcedev_areq->sha_op_req, podev)) {
  2018. mutex_unlock(&hash_access_lock);
  2019. err = -EINVAL;
  2020. goto exit_free_qcedev_areq;
  2021. }
  2022. qcedev_areq->op_type = QCEDEV_CRYPTO_OPER_SHA;
  2023. qcedev_hash_init(qcedev_areq, handle, &sg_src);
  2024. err = qcedev_hash_update(qcedev_areq, handle, &sg_src);
  2025. if (err) {
  2026. mutex_unlock(&hash_access_lock);
  2027. goto exit_free_qcedev_areq;
  2028. }
  2029. err = qcedev_hash_final(qcedev_areq, handle);
  2030. if (err) {
  2031. mutex_unlock(&hash_access_lock);
  2032. goto exit_free_qcedev_areq;
  2033. }
  2034. if (handle->sha_ctxt.diglen > QCEDEV_MAX_SHA_DIGEST) {
  2035. pr_err("Invalid sha_ctxt.diglen %d\n",
  2036. handle->sha_ctxt.diglen);
  2037. mutex_unlock(&hash_access_lock);
  2038. err = -EINVAL;
  2039. goto exit_free_qcedev_areq;
  2040. }
  2041. qcedev_areq->sha_op_req.diglen = handle->sha_ctxt.diglen;
  2042. memcpy(&qcedev_areq->sha_op_req.digest[0],
  2043. &handle->sha_ctxt.digest[0],
  2044. handle->sha_ctxt.diglen);
  2045. mutex_unlock(&hash_access_lock);
  2046. if (copy_to_user((void __user *)arg, &qcedev_areq->sha_op_req,
  2047. sizeof(struct qcedev_sha_op_req))) {
  2048. err = -EFAULT;
  2049. goto exit_free_qcedev_areq;
  2050. }
  2051. }
  2052. break;
  2053. case QCEDEV_IOCTL_MAP_BUF_REQ:
  2054. {
  2055. unsigned long long vaddr = 0;
  2056. struct qcedev_map_buf_req map_buf = { {0} };
  2057. int i = 0;
  2058. if (copy_from_user(&map_buf,
  2059. (void __user *)arg, sizeof(map_buf))) {
  2060. err = -EFAULT;
  2061. goto exit_free_qcedev_areq;
  2062. }
  2063. if (map_buf.num_fds > QCEDEV_MAX_BUFFERS) {
  2064. err = -EINVAL;
  2065. goto exit_free_qcedev_areq;
  2066. }
  2067. for (i = 0; i < map_buf.num_fds; i++) {
  2068. err = qcedev_check_and_map_buffer(handle,
  2069. map_buf.fd[i],
  2070. map_buf.fd_offset[i],
  2071. map_buf.fd_size[i],
  2072. &vaddr);
  2073. if (err) {
  2074. pr_err(
  2075. "%s: err: failed to map fd(%d) - %d\n",
  2076. __func__, map_buf.fd[i], err);
  2077. goto exit_free_qcedev_areq;
  2078. }
  2079. map_buf.buf_vaddr[i] = vaddr;
  2080. pr_info("%s: info: vaddr = %llx\n, fd = %d",
  2081. __func__, vaddr, map_buf.fd[i]);
  2082. }
  2083. if (copy_to_user((void __user *)arg, &map_buf,
  2084. sizeof(map_buf))) {
  2085. err = -EFAULT;
  2086. goto exit_free_qcedev_areq;
  2087. }
  2088. break;
  2089. }
  2090. case QCEDEV_IOCTL_UNMAP_BUF_REQ:
  2091. {
  2092. struct qcedev_unmap_buf_req unmap_buf = { { 0 } };
  2093. int i = 0;
  2094. if (copy_from_user(&unmap_buf,
  2095. (void __user *)arg, sizeof(unmap_buf))) {
  2096. err = -EFAULT;
  2097. goto exit_free_qcedev_areq;
  2098. }
  2099. for (i = 0; i < unmap_buf.num_fds; i++) {
  2100. err = qcedev_check_and_unmap_buffer(handle,
  2101. unmap_buf.fd[i]);
  2102. if (err) {
  2103. pr_err(
  2104. "%s: err: failed to unmap fd(%d) - %d\n",
  2105. __func__,
  2106. unmap_buf.fd[i], err);
  2107. goto exit_free_qcedev_areq;
  2108. }
  2109. }
  2110. break;
  2111. }
  2112. default:
  2113. err = -ENOTTY;
  2114. goto exit_free_qcedev_areq;
  2115. }
  2116. exit_free_qcedev_areq:
  2117. kfree(qcedev_areq);
  2118. return err;
  2119. }
  2120. static int qcedev_probe_device(struct platform_device *pdev)
  2121. {
  2122. void *handle = NULL;
  2123. int rc = 0;
  2124. struct qcedev_control *podev;
  2125. struct msm_ce_hw_support *platform_support;
  2126. podev = &qce_dev[0];
  2127. rc = alloc_chrdev_region(&qcedev_device_no, 0, 1, QCEDEV_DEV);
  2128. if (rc < 0) {
  2129. pr_err("alloc_chrdev_region failed %d\n", rc);
  2130. return rc;
  2131. }
  2132. driver_class = class_create(THIS_MODULE, QCEDEV_DEV);
  2133. if (IS_ERR(driver_class)) {
  2134. rc = -ENOMEM;
  2135. pr_err("class_create failed %d\n", rc);
  2136. goto exit_unreg_chrdev_region;
  2137. }
  2138. class_dev = device_create(driver_class, NULL, qcedev_device_no, NULL,
  2139. QCEDEV_DEV);
  2140. if (IS_ERR(class_dev)) {
  2141. pr_err("class_device_create failed %d\n", rc);
  2142. rc = -ENOMEM;
  2143. goto exit_destroy_class;
  2144. }
  2145. cdev_init(&podev->cdev, &qcedev_fops);
  2146. podev->cdev.owner = THIS_MODULE;
  2147. rc = cdev_add(&podev->cdev, MKDEV(MAJOR(qcedev_device_no), 0), 1);
  2148. if (rc < 0) {
  2149. pr_err("cdev_add failed %d\n", rc);
  2150. goto exit_destroy_device;
  2151. }
  2152. podev->minor = 0;
  2153. podev->high_bw_req_count = 0;
  2154. INIT_LIST_HEAD(&podev->ready_commands);
  2155. podev->active_command = NULL;
  2156. INIT_LIST_HEAD(&podev->context_banks);
  2157. spin_lock_init(&podev->lock);
  2158. tasklet_init(&podev->done_tasklet, req_done, (unsigned long)podev);
  2159. podev->icc_path = of_icc_get(&pdev->dev, "data_path");
  2160. if (IS_ERR(podev->icc_path)) {
  2161. rc = PTR_ERR(podev->icc_path);
  2162. pr_err("%s Failed to get icc path with error %d\n",
  2163. __func__, rc);
  2164. goto exit_del_cdev;
  2165. }
  2166. rc = icc_set_bw(podev->icc_path, CRYPTO_AVG_BW, CRYPTO_PEAK_BW);
  2167. if (rc) {
  2168. pr_err("%s Unable to set high bandwidth\n", __func__);
  2169. goto exit_unregister_bus_scale;
  2170. }
  2171. handle = qce_open(pdev, &rc);
  2172. if (handle == NULL) {
  2173. rc = -ENODEV;
  2174. goto exit_scale_busbandwidth;
  2175. }
  2176. rc = icc_set_bw(podev->icc_path, 0, 0);
  2177. if (rc) {
  2178. pr_err("%s Unable to set to low bandwidth\n", __func__);
  2179. goto exit_qce_close;
  2180. }
  2181. podev->qce = handle;
  2182. podev->pdev = pdev;
  2183. platform_set_drvdata(pdev, podev);
  2184. qce_hw_support(podev->qce, &podev->ce_support);
  2185. if (podev->ce_support.bam) {
  2186. podev->platform_support.ce_shared = 0;
  2187. podev->platform_support.shared_ce_resource = 0;
  2188. podev->platform_support.hw_key_support =
  2189. podev->ce_support.hw_key;
  2190. podev->platform_support.sha_hmac = 1;
  2191. } else {
  2192. platform_support =
  2193. (struct msm_ce_hw_support *)pdev->dev.platform_data;
  2194. podev->platform_support.ce_shared = platform_support->ce_shared;
  2195. podev->platform_support.shared_ce_resource =
  2196. platform_support->shared_ce_resource;
  2197. podev->platform_support.hw_key_support =
  2198. platform_support->hw_key_support;
  2199. podev->platform_support.sha_hmac = platform_support->sha_hmac;
  2200. }
  2201. podev->mem_client = qcedev_mem_new_client(MEM_ION);
  2202. if (!podev->mem_client) {
  2203. pr_err("%s: err: qcedev_mem_new_client failed\n", __func__);
  2204. goto exit_qce_close;
  2205. }
  2206. rc = of_platform_populate(pdev->dev.of_node, qcedev_match,
  2207. NULL, &pdev->dev);
  2208. if (rc) {
  2209. pr_err("%s: err: of_platform_populate failed: %d\n",
  2210. __func__, rc);
  2211. goto exit_mem_new_client;
  2212. }
  2213. return 0;
  2214. exit_mem_new_client:
  2215. if (podev->mem_client)
  2216. qcedev_mem_delete_client(podev->mem_client);
  2217. podev->mem_client = NULL;
  2218. exit_qce_close:
  2219. if (handle)
  2220. qce_close(handle);
  2221. exit_scale_busbandwidth:
  2222. icc_set_bw(podev->icc_path, 0, 0);
  2223. exit_unregister_bus_scale:
  2224. if (podev->icc_path)
  2225. icc_put(podev->icc_path);
  2226. exit_del_cdev:
  2227. cdev_del(&podev->cdev);
  2228. exit_destroy_device:
  2229. device_destroy(driver_class, qcedev_device_no);
  2230. exit_destroy_class:
  2231. class_destroy(driver_class);
  2232. exit_unreg_chrdev_region:
  2233. unregister_chrdev_region(qcedev_device_no, 1);
  2234. podev->icc_path = NULL;
  2235. platform_set_drvdata(pdev, NULL);
  2236. podev->pdev = NULL;
  2237. podev->qce = NULL;
  2238. return rc;
  2239. }
  2240. static int qcedev_probe(struct platform_device *pdev)
  2241. {
  2242. if (of_device_is_compatible(pdev->dev.of_node, "qcom,qcedev"))
  2243. return qcedev_probe_device(pdev);
  2244. else if (of_device_is_compatible(pdev->dev.of_node,
  2245. "qcom,qcedev,context-bank"))
  2246. return qcedev_parse_context_bank(pdev);
  2247. return -EINVAL;
  2248. };
  2249. static int qcedev_remove(struct platform_device *pdev)
  2250. {
  2251. struct qcedev_control *podev;
  2252. podev = platform_get_drvdata(pdev);
  2253. if (!podev)
  2254. return 0;
  2255. if (podev->qce)
  2256. qce_close(podev->qce);
  2257. if (podev->icc_path)
  2258. icc_put(podev->icc_path);
  2259. tasklet_kill(&podev->done_tasklet);
  2260. cdev_del(&podev->cdev);
  2261. device_destroy(driver_class, qcedev_device_no);
  2262. class_destroy(driver_class);
  2263. unregister_chrdev_region(qcedev_device_no, 1);
  2264. return 0;
  2265. };
  2266. static int qcedev_suspend(struct platform_device *pdev, pm_message_t state)
  2267. {
  2268. struct qcedev_control *podev;
  2269. int ret;
  2270. podev = platform_get_drvdata(pdev);
  2271. if (!podev)
  2272. return 0;
  2273. mutex_lock(&qcedev_sent_bw_req);
  2274. if (podev->high_bw_req_count) {
  2275. ret = qcedev_control_clocks(podev, false);
  2276. if (ret)
  2277. goto suspend_exit;
  2278. }
  2279. suspend_exit:
  2280. mutex_unlock(&qcedev_sent_bw_req);
  2281. return 0;
  2282. }
  2283. static int qcedev_resume(struct platform_device *pdev)
  2284. {
  2285. struct qcedev_control *podev;
  2286. int ret;
  2287. podev = platform_get_drvdata(pdev);
  2288. if (!podev)
  2289. return 0;
  2290. mutex_lock(&qcedev_sent_bw_req);
  2291. if (podev->high_bw_req_count) {
  2292. ret = qcedev_control_clocks(podev, true);
  2293. if (ret)
  2294. goto resume_exit;
  2295. }
  2296. resume_exit:
  2297. mutex_unlock(&qcedev_sent_bw_req);
  2298. return 0;
  2299. }
  2300. static struct platform_driver qcedev_plat_driver = {
  2301. .probe = qcedev_probe,
  2302. .remove = qcedev_remove,
  2303. .suspend = qcedev_suspend,
  2304. .resume = qcedev_resume,
  2305. .driver = {
  2306. .name = "qce",
  2307. .of_match_table = qcedev_match,
  2308. },
  2309. };
  2310. static int _disp_stats(int id)
  2311. {
  2312. struct qcedev_stat *pstat;
  2313. int len = 0;
  2314. pstat = &_qcedev_stat;
  2315. len = scnprintf(_debug_read_buf, DEBUG_MAX_RW_BUF - 1,
  2316. "\nQTI QCE dev driver %d Statistics:\n",
  2317. id + 1);
  2318. len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
  2319. " Encryption operation success : %d\n",
  2320. pstat->qcedev_enc_success);
  2321. len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
  2322. " Encryption operation fail : %d\n",
  2323. pstat->qcedev_enc_fail);
  2324. len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
  2325. " Decryption operation success : %d\n",
  2326. pstat->qcedev_dec_success);
  2327. len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
  2328. " Encryption operation fail : %d\n",
  2329. pstat->qcedev_dec_fail);
  2330. return len;
  2331. }
  2332. static ssize_t _debug_stats_read(struct file *file, char __user *buf,
  2333. size_t count, loff_t *ppos)
  2334. {
  2335. ssize_t rc = -EINVAL;
  2336. int qcedev = *((int *) file->private_data);
  2337. int len;
  2338. len = _disp_stats(qcedev);
  2339. if (len <= count)
  2340. rc = simple_read_from_buffer((void __user *) buf, len,
  2341. ppos, (void *) _debug_read_buf, len);
  2342. return rc;
  2343. }
  2344. static ssize_t _debug_stats_write(struct file *file, const char __user *buf,
  2345. size_t count, loff_t *ppos)
  2346. {
  2347. memset((char *)&_qcedev_stat, 0, sizeof(struct qcedev_stat));
  2348. return count;
  2349. };
  2350. static const struct file_operations _debug_stats_ops = {
  2351. .open = simple_open,
  2352. .read = _debug_stats_read,
  2353. .write = _debug_stats_write,
  2354. };
  2355. static int _qcedev_debug_init(void)
  2356. {
  2357. int rc;
  2358. char name[DEBUG_MAX_FNAME];
  2359. struct dentry *dent;
  2360. _debug_dent = debugfs_create_dir("qcedev", NULL);
  2361. if (IS_ERR(_debug_dent)) {
  2362. pr_debug("qcedev debugfs_create_dir fail, error %ld\n",
  2363. PTR_ERR(_debug_dent));
  2364. return PTR_ERR(_debug_dent);
  2365. }
  2366. snprintf(name, DEBUG_MAX_FNAME-1, "stats-%d", 1);
  2367. _debug_qcedev = 0;
  2368. dent = debugfs_create_file(name, 0644, _debug_dent,
  2369. &_debug_qcedev, &_debug_stats_ops);
  2370. if (dent == NULL) {
  2371. pr_debug("qcedev debugfs_create_file fail, error %ld\n",
  2372. PTR_ERR(dent));
  2373. rc = PTR_ERR(dent);
  2374. goto err;
  2375. }
  2376. return 0;
  2377. err:
  2378. debugfs_remove_recursive(_debug_dent);
  2379. return rc;
  2380. }
  2381. static int qcedev_init(void)
  2382. {
  2383. _qcedev_debug_init();
  2384. return platform_driver_register(&qcedev_plat_driver);
  2385. }
  2386. static void qcedev_exit(void)
  2387. {
  2388. debugfs_remove_recursive(_debug_dent);
  2389. platform_driver_unregister(&qcedev_plat_driver);
  2390. }
  2391. MODULE_LICENSE("GPL v2");
  2392. MODULE_DESCRIPTION("QTI DEV Crypto driver");
  2393. module_init(qcedev_init);
  2394. module_exit(qcedev_exit);