qcedev.c 74 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * QTI CE device driver.
  4. *
  5. * Copyright (c) 2010-2021, The Linux Foundation. All rights reserved.
  6. * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
  7. */
  8. #include <linux/mman.h>
  9. #include <linux/module.h>
  10. #include <linux/device.h>
  11. #include <linux/types.h>
  12. #include <linux/platform_device.h>
  13. #include <linux/dma-mapping.h>
  14. #include <linux/kernel.h>
  15. #include <linux/dmapool.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/spinlock.h>
  18. #include <linux/init.h>
  19. #include <linux/module.h>
  20. #include <linux/fs.h>
  21. #include <linux/uaccess.h>
  22. #include <linux/debugfs.h>
  23. #include <linux/scatterlist.h>
  24. #include <linux/crypto.h>
  25. #include "linux/qcedev.h"
  26. #include <linux/interconnect.h>
  27. #include <linux/delay.h>
  28. #include <crypto/hash.h>
  29. #include "qcedevi.h"
  30. #include "qce.h"
  31. #include "qcedev_smmu.h"
  32. #include "qcom_crypto_device.h"
  33. #define CACHE_LINE_SIZE 64
  34. #define CE_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
  35. #define MAX_CEHW_REQ_TRANSFER_SIZE (128*32*1024)
  36. /*
  37. * Max wait time once a crypto request is submitted.
  38. */
  39. #define MAX_CRYPTO_WAIT_TIME 1500
  40. /*
  41. * Max wait time once a offload crypto request is submitted.
  42. * This is low due to expected timeout and key pause errors.
  43. * This is temporary, and we can use the 1500 value once the
  44. * core irqs are enabled.
  45. */
  46. #define MAX_OFFLOAD_CRYPTO_WAIT_TIME 20
  47. #define MAX_REQUEST_TIME 5000
  48. enum qcedev_req_status {
  49. QCEDEV_REQ_CURRENT = 0,
  50. QCEDEV_REQ_WAITING = 1,
  51. QCEDEV_REQ_SUBMITTED = 2,
  52. QCEDEV_REQ_DONE = 3,
  53. };
  54. static uint8_t _std_init_vector_sha1_uint8[] = {
  55. 0x67, 0x45, 0x23, 0x01, 0xEF, 0xCD, 0xAB, 0x89,
  56. 0x98, 0xBA, 0xDC, 0xFE, 0x10, 0x32, 0x54, 0x76,
  57. 0xC3, 0xD2, 0xE1, 0xF0
  58. };
  59. /* standard initialization vector for SHA-256, source: FIPS 180-2 */
  60. static uint8_t _std_init_vector_sha256_uint8[] = {
  61. 0x6A, 0x09, 0xE6, 0x67, 0xBB, 0x67, 0xAE, 0x85,
  62. 0x3C, 0x6E, 0xF3, 0x72, 0xA5, 0x4F, 0xF5, 0x3A,
  63. 0x51, 0x0E, 0x52, 0x7F, 0x9B, 0x05, 0x68, 0x8C,
  64. 0x1F, 0x83, 0xD9, 0xAB, 0x5B, 0xE0, 0xCD, 0x19
  65. };
  66. #define QCEDEV_CTX_KEY_MASK 0x000000ff
  67. #define QCEDEV_CTX_USE_HW_KEY 0x00000001
  68. #define QCEDEV_CTX_USE_PIPE_KEY 0x00000002
  69. static DEFINE_MUTEX(send_cmd_lock);
  70. static DEFINE_MUTEX(qcedev_sent_bw_req);
  71. static DEFINE_MUTEX(hash_access_lock);
  72. static dev_t qcedev_device_no;
  73. static struct class *driver_class;
  74. static struct device *class_dev;
  75. static const struct of_device_id qcedev_match[] = {
  76. { .compatible = "qcom,qcedev"},
  77. { .compatible = "qcom,qcedev,context-bank"},
  78. {}
  79. };
  80. MODULE_DEVICE_TABLE(of, qcedev_match);
  81. static int qcedev_control_clocks(struct qcedev_control *podev, bool enable)
  82. {
  83. unsigned int control_flag;
  84. int ret = 0;
  85. if (podev->ce_support.req_bw_before_clk) {
  86. if (enable)
  87. control_flag = QCE_BW_REQUEST_FIRST;
  88. else
  89. control_flag = QCE_CLK_DISABLE_FIRST;
  90. } else {
  91. if (enable)
  92. control_flag = QCE_CLK_ENABLE_FIRST;
  93. else
  94. control_flag = QCE_BW_REQUEST_RESET_FIRST;
  95. }
  96. switch (control_flag) {
  97. case QCE_CLK_ENABLE_FIRST:
  98. ret = qce_enable_clk(podev->qce);
  99. if (ret) {
  100. pr_err("%s Unable enable clk\n", __func__);
  101. return ret;
  102. }
  103. ret = icc_set_bw(podev->icc_path,
  104. podev->icc_avg_bw, podev->icc_peak_bw);
  105. if (ret) {
  106. pr_err("%s Unable to set high bw\n", __func__);
  107. ret = qce_disable_clk(podev->qce);
  108. if (ret)
  109. pr_err("%s Unable disable clk\n", __func__);
  110. return ret;
  111. }
  112. break;
  113. case QCE_BW_REQUEST_FIRST:
  114. ret = icc_set_bw(podev->icc_path,
  115. podev->icc_avg_bw, podev->icc_peak_bw);
  116. if (ret) {
  117. pr_err("%s Unable to set high bw\n", __func__);
  118. return ret;
  119. }
  120. ret = qce_enable_clk(podev->qce);
  121. if (ret) {
  122. pr_err("%s Unable enable clk\n", __func__);
  123. ret = icc_set_bw(podev->icc_path, 0, 0);
  124. if (ret)
  125. pr_err("%s Unable to set low bw\n", __func__);
  126. return ret;
  127. }
  128. break;
  129. case QCE_CLK_DISABLE_FIRST:
  130. ret = qce_disable_clk(podev->qce);
  131. if (ret) {
  132. pr_err("%s Unable to disable clk\n", __func__);
  133. return ret;
  134. }
  135. ret = icc_set_bw(podev->icc_path, 0, 0);
  136. if (ret) {
  137. pr_err("%s Unable to set low bw\n", __func__);
  138. ret = qce_enable_clk(podev->qce);
  139. if (ret)
  140. pr_err("%s Unable enable clk\n", __func__);
  141. return ret;
  142. }
  143. break;
  144. case QCE_BW_REQUEST_RESET_FIRST:
  145. ret = icc_set_bw(podev->icc_path, 0, 0);
  146. if (ret) {
  147. pr_err("%s Unable to set low bw\n", __func__);
  148. return ret;
  149. }
  150. ret = qce_disable_clk(podev->qce);
  151. if (ret) {
  152. pr_err("%s Unable to disable clk\n", __func__);
  153. ret = icc_set_bw(podev->icc_path,
  154. podev->icc_avg_bw, podev->icc_peak_bw);
  155. if (ret)
  156. pr_err("%s Unable to set high bw\n", __func__);
  157. return ret;
  158. }
  159. break;
  160. default:
  161. return -ENOENT;
  162. }
  163. return 0;
  164. }
  165. static void qcedev_ce_high_bw_req(struct qcedev_control *podev,
  166. bool high_bw_req)
  167. {
  168. int ret = 0;
  169. if(podev == NULL) return;
  170. mutex_lock(&qcedev_sent_bw_req);
  171. if (high_bw_req) {
  172. if (podev->high_bw_req_count == 0) {
  173. ret = qcedev_control_clocks(podev, true);
  174. if (ret)
  175. goto exit_unlock_mutex;
  176. ret = qce_set_irqs(podev->qce, true);
  177. if (ret) {
  178. pr_err("%s: could not enable bam irqs, ret = %d",
  179. __func__, ret);
  180. qcedev_control_clocks(podev, false);
  181. goto exit_unlock_mutex;
  182. }
  183. }
  184. podev->high_bw_req_count++;
  185. } else {
  186. if (podev->high_bw_req_count == 1) {
  187. ret = qce_set_irqs(podev->qce, false);
  188. if (ret) {
  189. pr_err("%s: could not disable bam irqs, ret = %d",
  190. __func__, ret);
  191. goto exit_unlock_mutex;
  192. }
  193. ret = qcedev_control_clocks(podev, false);
  194. if (ret)
  195. goto exit_unlock_mutex;
  196. }
  197. podev->high_bw_req_count--;
  198. }
  199. exit_unlock_mutex:
  200. mutex_unlock(&qcedev_sent_bw_req);
  201. }
  202. #define QCEDEV_MAGIC 0x56434544 /* "qced" */
  203. static int qcedev_open(struct inode *inode, struct file *file);
  204. static int qcedev_release(struct inode *inode, struct file *file);
  205. static int start_cipher_req(struct qcedev_control *podev,
  206. int *current_req_info);
  207. static int start_offload_cipher_req(struct qcedev_control *podev,
  208. int *current_req_info);
  209. static int start_sha_req(struct qcedev_control *podev,
  210. int *current_req_info);
  211. static const struct file_operations qcedev_fops = {
  212. .owner = THIS_MODULE,
  213. .unlocked_ioctl = qcedev_ioctl,
  214. .open = qcedev_open,
  215. .release = qcedev_release,
  216. };
  217. static struct qcedev_control qce_dev[] = {
  218. {
  219. .magic = QCEDEV_MAGIC,
  220. },
  221. };
  222. #define MAX_QCE_DEVICE ARRAY_SIZE(qce_dev)
  223. #define DEBUG_MAX_FNAME 16
  224. #define DEBUG_MAX_RW_BUF 1024
  225. struct qcedev_stat {
  226. u32 qcedev_dec_success;
  227. u32 qcedev_dec_fail;
  228. u32 qcedev_enc_success;
  229. u32 qcedev_enc_fail;
  230. u32 qcedev_sha_success;
  231. u32 qcedev_sha_fail;
  232. };
  233. static struct qcedev_stat _qcedev_stat;
  234. static struct dentry *_debug_dent;
  235. static char _debug_read_buf[DEBUG_MAX_RW_BUF];
  236. static int _debug_qcedev;
  237. static struct qcedev_control *qcedev_minor_to_control(unsigned int n)
  238. {
  239. int i;
  240. for (i = 0; i < MAX_QCE_DEVICE; i++) {
  241. if (qce_dev[i].minor == n)
  242. return &qce_dev[n];
  243. }
  244. return NULL;
  245. }
  246. static int qcedev_open(struct inode *inode, struct file *file)
  247. {
  248. struct qcedev_handle *handle;
  249. struct qcedev_control *podev;
  250. podev = qcedev_minor_to_control(MINOR(inode->i_rdev));
  251. if (podev == NULL) {
  252. pr_err("%s: no such device %d\n", __func__,
  253. MINOR(inode->i_rdev));
  254. return -ENOENT;
  255. }
  256. handle = kzalloc(sizeof(struct qcedev_handle), GFP_KERNEL);
  257. if (handle == NULL)
  258. return -ENOMEM;
  259. handle->cntl = podev;
  260. file->private_data = handle;
  261. qcedev_ce_high_bw_req(podev, true);
  262. mutex_init(&handle->registeredbufs.lock);
  263. INIT_LIST_HEAD(&handle->registeredbufs.list);
  264. return 0;
  265. }
  266. static int qcedev_release(struct inode *inode, struct file *file)
  267. {
  268. struct qcedev_control *podev;
  269. struct qcedev_handle *handle;
  270. handle = file->private_data;
  271. podev = handle->cntl;
  272. if (podev != NULL && podev->magic != QCEDEV_MAGIC) {
  273. pr_err("%s: invalid handle %pK\n",
  274. __func__, podev);
  275. }
  276. if (podev)
  277. qcedev_ce_high_bw_req(podev, false);
  278. if (qcedev_unmap_all_buffers(handle))
  279. pr_err("%s: failed to unmap all ion buffers\n", __func__);
  280. kfree_sensitive(handle);
  281. file->private_data = NULL;
  282. return 0;
  283. }
  284. static void req_done(unsigned long data)
  285. {
  286. struct qcedev_control *podev = (struct qcedev_control *)data;
  287. struct qcedev_async_req *areq;
  288. unsigned long flags = 0;
  289. struct qcedev_async_req *new_req = NULL;
  290. spin_lock_irqsave(&podev->lock, flags);
  291. areq = podev->active_command;
  292. podev->active_command = NULL;
  293. if (areq) {
  294. if (!areq->timed_out)
  295. complete(&areq->complete);
  296. areq->state = QCEDEV_REQ_DONE;
  297. }
  298. /* Look through queued requests and wake up the corresponding thread */
  299. if (!list_empty(&podev->ready_commands)) {
  300. new_req = container_of(podev->ready_commands.next,
  301. struct qcedev_async_req, list);
  302. list_del(&new_req->list);
  303. new_req->state = QCEDEV_REQ_CURRENT;
  304. wake_up_interruptible(&new_req->wait_q);
  305. }
  306. spin_unlock_irqrestore(&podev->lock, flags);
  307. }
  308. void qcedev_sha_req_cb(void *cookie, unsigned char *digest,
  309. unsigned char *authdata, int ret)
  310. {
  311. struct qcedev_sha_req *areq;
  312. struct qcedev_control *pdev;
  313. struct qcedev_handle *handle;
  314. uint32_t *auth32 = (uint32_t *)authdata;
  315. areq = (struct qcedev_sha_req *) cookie;
  316. if (!areq || !areq->cookie)
  317. return;
  318. handle = (struct qcedev_handle *) areq->cookie;
  319. pdev = handle->cntl;
  320. if (!pdev)
  321. return;
  322. if (digest)
  323. memcpy(&handle->sha_ctxt.digest[0], digest, 32);
  324. if (authdata) {
  325. handle->sha_ctxt.auth_data[0] = auth32[0];
  326. handle->sha_ctxt.auth_data[1] = auth32[1];
  327. }
  328. tasklet_schedule(&pdev->done_tasklet);
  329. };
  330. void qcedev_cipher_req_cb(void *cookie, unsigned char *icv,
  331. unsigned char *iv, int ret)
  332. {
  333. struct qcedev_cipher_req *areq;
  334. struct qcedev_handle *handle;
  335. struct qcedev_control *podev;
  336. struct qcedev_async_req *qcedev_areq;
  337. areq = (struct qcedev_cipher_req *) cookie;
  338. if (!areq || !areq->cookie)
  339. return;
  340. handle = (struct qcedev_handle *) areq->cookie;
  341. podev = handle->cntl;
  342. if (!podev)
  343. return;
  344. qcedev_areq = podev->active_command;
  345. if (iv)
  346. memcpy(&qcedev_areq->cipher_op_req.iv[0], iv,
  347. qcedev_areq->cipher_op_req.ivlen);
  348. tasklet_schedule(&podev->done_tasklet);
  349. };
  350. static int start_cipher_req(struct qcedev_control *podev,
  351. int *current_req_info)
  352. {
  353. struct qcedev_async_req *qcedev_areq;
  354. struct qce_req creq;
  355. int ret = 0;
  356. memset(&creq, 0, sizeof(creq));
  357. /* start the command on the podev->active_command */
  358. qcedev_areq = podev->active_command;
  359. qcedev_areq->cipher_req.cookie = qcedev_areq->handle;
  360. if (qcedev_areq->cipher_op_req.use_pmem == QCEDEV_USE_PMEM) {
  361. pr_err("%s: Use of PMEM is not supported\n", __func__);
  362. goto unsupported;
  363. }
  364. creq.pmem = NULL;
  365. switch (qcedev_areq->cipher_op_req.alg) {
  366. case QCEDEV_ALG_DES:
  367. creq.alg = CIPHER_ALG_DES;
  368. break;
  369. case QCEDEV_ALG_3DES:
  370. creq.alg = CIPHER_ALG_3DES;
  371. break;
  372. case QCEDEV_ALG_AES:
  373. creq.alg = CIPHER_ALG_AES;
  374. break;
  375. default:
  376. return -EINVAL;
  377. }
  378. switch (qcedev_areq->cipher_op_req.mode) {
  379. case QCEDEV_AES_MODE_CBC:
  380. case QCEDEV_DES_MODE_CBC:
  381. creq.mode = QCE_MODE_CBC;
  382. break;
  383. case QCEDEV_AES_MODE_ECB:
  384. case QCEDEV_DES_MODE_ECB:
  385. creq.mode = QCE_MODE_ECB;
  386. break;
  387. case QCEDEV_AES_MODE_CTR:
  388. creq.mode = QCE_MODE_CTR;
  389. break;
  390. case QCEDEV_AES_MODE_XTS:
  391. creq.mode = QCE_MODE_XTS;
  392. break;
  393. default:
  394. return -EINVAL;
  395. }
  396. if ((creq.alg == CIPHER_ALG_AES) &&
  397. (creq.mode == QCE_MODE_CTR)) {
  398. creq.dir = QCE_ENCRYPT;
  399. } else {
  400. if (qcedev_areq->cipher_op_req.op == QCEDEV_OPER_ENC)
  401. creq.dir = QCE_ENCRYPT;
  402. else
  403. creq.dir = QCE_DECRYPT;
  404. }
  405. creq.iv = &qcedev_areq->cipher_op_req.iv[0];
  406. creq.ivsize = qcedev_areq->cipher_op_req.ivlen;
  407. creq.iv_ctr_size = 0;
  408. creq.enckey = &qcedev_areq->cipher_op_req.enckey[0];
  409. creq.encklen = qcedev_areq->cipher_op_req.encklen;
  410. creq.cryptlen = qcedev_areq->cipher_op_req.data_len;
  411. if (qcedev_areq->cipher_op_req.encklen == 0) {
  412. if ((qcedev_areq->cipher_op_req.op == QCEDEV_OPER_ENC_NO_KEY)
  413. || (qcedev_areq->cipher_op_req.op ==
  414. QCEDEV_OPER_DEC_NO_KEY))
  415. creq.op = QCE_REQ_ABLK_CIPHER_NO_KEY;
  416. else {
  417. int i;
  418. for (i = 0; i < QCEDEV_MAX_KEY_SIZE; i++) {
  419. if (qcedev_areq->cipher_op_req.enckey[i] != 0)
  420. break;
  421. }
  422. if ((podev->platform_support.hw_key_support == 1) &&
  423. (i == QCEDEV_MAX_KEY_SIZE))
  424. creq.op = QCE_REQ_ABLK_CIPHER;
  425. else {
  426. ret = -EINVAL;
  427. goto unsupported;
  428. }
  429. }
  430. } else {
  431. creq.op = QCE_REQ_ABLK_CIPHER;
  432. }
  433. creq.qce_cb = qcedev_cipher_req_cb;
  434. creq.areq = (void *)&qcedev_areq->cipher_req;
  435. creq.flags = 0;
  436. creq.offload_op = QCE_OFFLOAD_NONE;
  437. ret = qce_ablk_cipher_req(podev->qce, &creq);
  438. *current_req_info = creq.current_req_info;
  439. unsupported:
  440. qcedev_areq->err = ret ? -ENXIO : 0;
  441. return ret;
  442. };
  443. void qcedev_offload_cipher_req_cb(void *cookie, unsigned char *icv,
  444. unsigned char *iv, int ret)
  445. {
  446. struct qcedev_cipher_req *areq;
  447. struct qcedev_handle *handle;
  448. struct qcedev_control *podev;
  449. struct qcedev_async_req *qcedev_areq;
  450. areq = (struct qcedev_cipher_req *) cookie;
  451. if (!areq || !areq->cookie)
  452. return;
  453. handle = (struct qcedev_handle *) areq->cookie;
  454. podev = handle->cntl;
  455. if (!podev)
  456. return;
  457. qcedev_areq = podev->active_command;
  458. if (iv)
  459. memcpy(&qcedev_areq->offload_cipher_op_req.iv[0], iv,
  460. qcedev_areq->offload_cipher_op_req.ivlen);
  461. tasklet_schedule(&podev->done_tasklet);
  462. }
  463. static int start_offload_cipher_req(struct qcedev_control *podev,
  464. int *current_req_info)
  465. {
  466. struct qcedev_async_req *qcedev_areq;
  467. struct qce_req creq;
  468. u8 patt_sz = 0, proc_data_sz = 0;
  469. int ret = 0;
  470. memset(&creq, 0, sizeof(creq));
  471. /* Start the command on the podev->active_command */
  472. qcedev_areq = podev->active_command;
  473. qcedev_areq->cipher_req.cookie = qcedev_areq->handle;
  474. switch (qcedev_areq->offload_cipher_op_req.alg) {
  475. case QCEDEV_ALG_AES:
  476. creq.alg = CIPHER_ALG_AES;
  477. break;
  478. default:
  479. return -EINVAL;
  480. }
  481. switch (qcedev_areq->offload_cipher_op_req.mode) {
  482. case QCEDEV_AES_MODE_CBC:
  483. creq.mode = QCE_MODE_CBC;
  484. break;
  485. case QCEDEV_AES_MODE_CTR:
  486. creq.mode = QCE_MODE_CTR;
  487. break;
  488. default:
  489. return -EINVAL;
  490. }
  491. if (qcedev_areq->offload_cipher_op_req.is_copy_op ||
  492. qcedev_areq->offload_cipher_op_req.encrypt) {
  493. creq.dir = QCE_ENCRYPT;
  494. } else {
  495. switch(qcedev_areq->offload_cipher_op_req.op) {
  496. case QCEDEV_OFFLOAD_HLOS_HLOS:
  497. case QCEDEV_OFFLOAD_HLOS_HLOS_1:
  498. case QCEDEV_OFFLOAD_HLOS_CPB:
  499. case QCEDEV_OFFLOAD_HLOS_CPB_1:
  500. creq.dir = QCE_DECRYPT;
  501. break;
  502. case QCEDEV_OFFLOAD_CPB_HLOS:
  503. creq.dir = QCE_ENCRYPT;
  504. break;
  505. default:
  506. return -EINVAL;
  507. }
  508. }
  509. creq.iv = &qcedev_areq->offload_cipher_op_req.iv[0];
  510. creq.ivsize = qcedev_areq->offload_cipher_op_req.ivlen;
  511. creq.iv_ctr_size = qcedev_areq->offload_cipher_op_req.iv_ctr_size;
  512. creq.encklen = qcedev_areq->offload_cipher_op_req.encklen;
  513. /* OFFLOAD use cases use PIPE keys so no need to set keys */
  514. creq.flags = QCEDEV_CTX_USE_PIPE_KEY;
  515. creq.op = QCE_REQ_ABLK_CIPHER_NO_KEY;
  516. creq.offload_op = (int)qcedev_areq->offload_cipher_op_req.op;
  517. if (qcedev_areq->offload_cipher_op_req.is_copy_op)
  518. creq.is_copy_op = true;
  519. creq.cryptlen = qcedev_areq->offload_cipher_op_req.data_len;
  520. creq.qce_cb = qcedev_offload_cipher_req_cb;
  521. creq.areq = (void *)&qcedev_areq->cipher_req;
  522. patt_sz = qcedev_areq->offload_cipher_op_req.pattern_info.patt_sz;
  523. proc_data_sz =
  524. qcedev_areq->offload_cipher_op_req.pattern_info.proc_data_sz;
  525. creq.is_pattern_valid =
  526. qcedev_areq->offload_cipher_op_req.is_pattern_valid;
  527. if (creq.is_pattern_valid) {
  528. creq.pattern_info = 0x1;
  529. if (patt_sz)
  530. creq.pattern_info |= (patt_sz - 1) << 4;
  531. if (proc_data_sz)
  532. creq.pattern_info |= (proc_data_sz - 1) << 8;
  533. creq.pattern_info |=
  534. qcedev_areq->offload_cipher_op_req.pattern_info.patt_offset << 12;
  535. }
  536. creq.block_offset = qcedev_areq->offload_cipher_op_req.block_offset;
  537. ret = qce_ablk_cipher_req(podev->qce, &creq);
  538. *current_req_info = creq.current_req_info;
  539. qcedev_areq->err = ret ? -ENXIO : 0;
  540. return ret;
  541. }
  542. static int start_sha_req(struct qcedev_control *podev,
  543. int *current_req_info)
  544. {
  545. struct qcedev_async_req *qcedev_areq;
  546. struct qce_sha_req sreq;
  547. int ret = 0;
  548. struct qcedev_handle *handle;
  549. /* start the command on the podev->active_command */
  550. qcedev_areq = podev->active_command;
  551. handle = qcedev_areq->handle;
  552. switch (qcedev_areq->sha_op_req.alg) {
  553. case QCEDEV_ALG_SHA1:
  554. sreq.alg = QCE_HASH_SHA1;
  555. break;
  556. case QCEDEV_ALG_SHA256:
  557. sreq.alg = QCE_HASH_SHA256;
  558. break;
  559. case QCEDEV_ALG_SHA1_HMAC:
  560. if (podev->ce_support.sha_hmac) {
  561. sreq.alg = QCE_HASH_SHA1_HMAC;
  562. sreq.authkey = &handle->sha_ctxt.authkey[0];
  563. sreq.authklen = QCEDEV_MAX_SHA_BLOCK_SIZE;
  564. } else {
  565. sreq.alg = QCE_HASH_SHA1;
  566. sreq.authkey = NULL;
  567. }
  568. break;
  569. case QCEDEV_ALG_SHA256_HMAC:
  570. if (podev->ce_support.sha_hmac) {
  571. sreq.alg = QCE_HASH_SHA256_HMAC;
  572. sreq.authkey = &handle->sha_ctxt.authkey[0];
  573. sreq.authklen = QCEDEV_MAX_SHA_BLOCK_SIZE;
  574. } else {
  575. sreq.alg = QCE_HASH_SHA256;
  576. sreq.authkey = NULL;
  577. }
  578. break;
  579. case QCEDEV_ALG_AES_CMAC:
  580. sreq.alg = QCE_HASH_AES_CMAC;
  581. sreq.authkey = &handle->sha_ctxt.authkey[0];
  582. sreq.authklen = qcedev_areq->sha_op_req.authklen;
  583. break;
  584. default:
  585. pr_err("Algorithm %d not supported, exiting\n",
  586. qcedev_areq->sha_op_req.alg);
  587. return -EINVAL;
  588. }
  589. qcedev_areq->sha_req.cookie = handle;
  590. sreq.qce_cb = qcedev_sha_req_cb;
  591. if (qcedev_areq->sha_op_req.alg != QCEDEV_ALG_AES_CMAC) {
  592. sreq.auth_data[0] = handle->sha_ctxt.auth_data[0];
  593. sreq.auth_data[1] = handle->sha_ctxt.auth_data[1];
  594. sreq.auth_data[2] = handle->sha_ctxt.auth_data[2];
  595. sreq.auth_data[3] = handle->sha_ctxt.auth_data[3];
  596. sreq.digest = &handle->sha_ctxt.digest[0];
  597. sreq.first_blk = handle->sha_ctxt.first_blk;
  598. sreq.last_blk = handle->sha_ctxt.last_blk;
  599. }
  600. sreq.size = qcedev_areq->sha_req.sreq.nbytes;
  601. sreq.src = qcedev_areq->sha_req.sreq.src;
  602. sreq.areq = (void *)&qcedev_areq->sha_req;
  603. sreq.flags = 0;
  604. ret = qce_process_sha_req(podev->qce, &sreq);
  605. *current_req_info = sreq.current_req_info;
  606. qcedev_areq->err = ret ? -ENXIO : 0;
  607. return ret;
  608. };
  609. static void qcedev_check_crypto_status(
  610. struct qcedev_async_req *qcedev_areq, void *handle)
  611. {
  612. struct qce_error error = {0};
  613. qcedev_areq->offload_cipher_op_req.err = QCEDEV_OFFLOAD_NO_ERROR;
  614. qce_get_crypto_status(handle, &error);
  615. if (error.timer_error) {
  616. qcedev_areq->offload_cipher_op_req.err =
  617. QCEDEV_OFFLOAD_KEY_TIMER_EXPIRED_ERROR;
  618. } else if (error.key_paused) {
  619. qcedev_areq->offload_cipher_op_req.err =
  620. QCEDEV_OFFLOAD_KEY_PAUSE_ERROR;
  621. } else if (error.generic_error) {
  622. qcedev_areq->offload_cipher_op_req.err =
  623. QCEDEV_OFFLOAD_GENERIC_ERROR;
  624. }
  625. return;
  626. }
  627. #define MAX_RETRIES 333
  628. static int submit_req(struct qcedev_async_req *qcedev_areq,
  629. struct qcedev_handle *handle)
  630. {
  631. struct qcedev_control *podev;
  632. unsigned long flags = 0;
  633. int ret = 0;
  634. struct qcedev_stat *pstat;
  635. int current_req_info = 0;
  636. int wait = MAX_CRYPTO_WAIT_TIME;
  637. struct qcedev_async_req *new_req = NULL;
  638. int retries = 0;
  639. int req_wait = MAX_REQUEST_TIME;
  640. unsigned int crypto_wait = 0;
  641. qcedev_areq->err = 0;
  642. podev = handle->cntl;
  643. init_waitqueue_head(&qcedev_areq->wait_q);
  644. spin_lock_irqsave(&podev->lock, flags);
  645. /*
  646. * Service only one crypto request at a time.
  647. * Any other new requests are queued in ready_commands and woken up
  648. * only when the active command has finished successfully or when the
  649. * request times out or when the command failed when setting up.
  650. */
  651. do {
  652. if (podev->active_command == NULL) {
  653. podev->active_command = qcedev_areq;
  654. qcedev_areq->state = QCEDEV_REQ_SUBMITTED;
  655. switch (qcedev_areq->op_type) {
  656. case QCEDEV_CRYPTO_OPER_CIPHER:
  657. ret = start_cipher_req(podev,
  658. &current_req_info);
  659. crypto_wait = MAX_CRYPTO_WAIT_TIME;
  660. break;
  661. case QCEDEV_CRYPTO_OPER_OFFLOAD_CIPHER:
  662. ret = start_offload_cipher_req(podev,
  663. &current_req_info);
  664. crypto_wait = MAX_OFFLOAD_CRYPTO_WAIT_TIME;
  665. break;
  666. default:
  667. crypto_wait = MAX_CRYPTO_WAIT_TIME;
  668. ret = start_sha_req(podev,
  669. &current_req_info);
  670. break;
  671. }
  672. } else {
  673. list_add_tail(&qcedev_areq->list,
  674. &podev->ready_commands);
  675. qcedev_areq->state = QCEDEV_REQ_WAITING;
  676. req_wait = wait_event_interruptible_lock_irq_timeout(
  677. qcedev_areq->wait_q,
  678. (qcedev_areq->state == QCEDEV_REQ_CURRENT),
  679. podev->lock,
  680. msecs_to_jiffies(MAX_REQUEST_TIME));
  681. if ((req_wait == 0) || (req_wait == -ERESTARTSYS)) {
  682. pr_err("%s: request timed out, req_wait = %d\n",
  683. __func__, req_wait);
  684. list_del(&qcedev_areq->list);
  685. podev->active_command = NULL;
  686. spin_unlock_irqrestore(&podev->lock, flags);
  687. return qcedev_areq->err;
  688. }
  689. }
  690. } while (qcedev_areq->state != QCEDEV_REQ_SUBMITTED);
  691. if (ret != 0) {
  692. podev->active_command = NULL;
  693. /*
  694. * Look through queued requests and wake up the corresponding
  695. * thread.
  696. */
  697. if (!list_empty(&podev->ready_commands)) {
  698. new_req = container_of(podev->ready_commands.next,
  699. struct qcedev_async_req, list);
  700. list_del(&new_req->list);
  701. new_req->state = QCEDEV_REQ_CURRENT;
  702. wake_up_interruptible(&new_req->wait_q);
  703. }
  704. }
  705. spin_unlock_irqrestore(&podev->lock, flags);
  706. qcedev_areq->timed_out = false;
  707. if (ret == 0)
  708. wait = wait_for_completion_timeout(&qcedev_areq->complete,
  709. msecs_to_jiffies(crypto_wait));
  710. if (!wait) {
  711. /*
  712. * This means wait timed out, and the callback routine was not
  713. * exercised. The callback sequence does some housekeeping which
  714. * would be missed here, hence having a call to qce here to do
  715. * that.
  716. */
  717. pr_err("%s: wait timed out, req info = %d\n", __func__,
  718. current_req_info);
  719. spin_lock_irqsave(&podev->lock, flags);
  720. qcedev_areq->timed_out = true;
  721. spin_unlock_irqrestore(&podev->lock, flags);
  722. qcedev_check_crypto_status(qcedev_areq, podev->qce);
  723. if (qcedev_areq->offload_cipher_op_req.err ==
  724. QCEDEV_OFFLOAD_NO_ERROR) {
  725. pr_err("%s: no error, wait for request to be done", __func__);
  726. while (qcedev_areq->state != QCEDEV_REQ_DONE &&
  727. retries < MAX_RETRIES) {
  728. usleep_range(3000, 5000);
  729. retries++;
  730. pr_err("%s: waiting for req state to be done, retries = %d",
  731. __func__, retries);
  732. }
  733. return 0;
  734. }
  735. ret = qce_manage_timeout(podev->qce, current_req_info);
  736. if (ret)
  737. pr_err("%s: error during manage timeout", __func__);
  738. req_done((unsigned long) podev);
  739. if (qcedev_areq->offload_cipher_op_req.err !=
  740. QCEDEV_OFFLOAD_NO_ERROR)
  741. return 0;
  742. }
  743. if (ret)
  744. qcedev_areq->err = -EIO;
  745. pstat = &_qcedev_stat;
  746. if (qcedev_areq->op_type == QCEDEV_CRYPTO_OPER_CIPHER) {
  747. switch (qcedev_areq->cipher_op_req.op) {
  748. case QCEDEV_OPER_DEC:
  749. if (qcedev_areq->err)
  750. pstat->qcedev_dec_fail++;
  751. else
  752. pstat->qcedev_dec_success++;
  753. break;
  754. case QCEDEV_OPER_ENC:
  755. if (qcedev_areq->err)
  756. pstat->qcedev_enc_fail++;
  757. else
  758. pstat->qcedev_enc_success++;
  759. break;
  760. default:
  761. break;
  762. }
  763. } else if (qcedev_areq->op_type == QCEDEV_CRYPTO_OPER_OFFLOAD_CIPHER) {
  764. //Do nothing
  765. } else {
  766. if (qcedev_areq->err)
  767. pstat->qcedev_sha_fail++;
  768. else
  769. pstat->qcedev_sha_success++;
  770. }
  771. return qcedev_areq->err;
  772. }
  773. static int qcedev_sha_init(struct qcedev_async_req *areq,
  774. struct qcedev_handle *handle)
  775. {
  776. struct qcedev_sha_ctxt *sha_ctxt = &handle->sha_ctxt;
  777. memset(sha_ctxt, 0, sizeof(struct qcedev_sha_ctxt));
  778. sha_ctxt->first_blk = 1;
  779. if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA1) ||
  780. (areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC)) {
  781. memcpy(&sha_ctxt->digest[0],
  782. &_std_init_vector_sha1_uint8[0], SHA1_DIGEST_SIZE);
  783. sha_ctxt->diglen = SHA1_DIGEST_SIZE;
  784. } else {
  785. if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA256) ||
  786. (areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC)) {
  787. memcpy(&sha_ctxt->digest[0],
  788. &_std_init_vector_sha256_uint8[0],
  789. SHA256_DIGEST_SIZE);
  790. sha_ctxt->diglen = SHA256_DIGEST_SIZE;
  791. }
  792. }
  793. sha_ctxt->init_done = true;
  794. return 0;
  795. }
  796. static int qcedev_sha_update_max_xfer(struct qcedev_async_req *qcedev_areq,
  797. struct qcedev_handle *handle,
  798. struct scatterlist *sg_src)
  799. {
  800. int err = 0;
  801. int i = 0;
  802. uint32_t total;
  803. uint8_t *user_src = NULL;
  804. uint8_t *k_src = NULL;
  805. uint8_t *k_buf_src = NULL;
  806. uint32_t buf_size = 0;
  807. uint8_t *k_align_src = NULL;
  808. uint32_t sha_pad_len = 0;
  809. uint32_t trailing_buf_len = 0;
  810. uint32_t t_buf = handle->sha_ctxt.trailing_buf_len;
  811. uint32_t sha_block_size;
  812. total = qcedev_areq->sha_op_req.data_len + t_buf;
  813. if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1)
  814. sha_block_size = SHA1_BLOCK_SIZE;
  815. else
  816. sha_block_size = SHA256_BLOCK_SIZE;
  817. if (total <= sha_block_size) {
  818. uint32_t len = qcedev_areq->sha_op_req.data_len;
  819. i = 0;
  820. k_src = &handle->sha_ctxt.trailing_buf[t_buf];
  821. /* Copy data from user src(s) */
  822. while (len > 0) {
  823. user_src = qcedev_areq->sha_op_req.data[i].vaddr;
  824. if (user_src && copy_from_user(k_src,
  825. (void __user *)user_src,
  826. qcedev_areq->sha_op_req.data[i].len))
  827. return -EFAULT;
  828. len -= qcedev_areq->sha_op_req.data[i].len;
  829. k_src += qcedev_areq->sha_op_req.data[i].len;
  830. i++;
  831. }
  832. handle->sha_ctxt.trailing_buf_len = total;
  833. return 0;
  834. }
  835. buf_size = total + CACHE_LINE_SIZE * 2;
  836. k_buf_src = kmalloc(buf_size, GFP_KERNEL);
  837. if (k_buf_src == NULL)
  838. return -ENOMEM;
  839. k_align_src = (uint8_t *)ALIGN(((uintptr_t)k_buf_src),
  840. CACHE_LINE_SIZE);
  841. k_src = k_align_src;
  842. /* check for trailing buffer from previous updates and append it */
  843. if (t_buf > 0) {
  844. memcpy(k_src, &handle->sha_ctxt.trailing_buf[0],
  845. t_buf);
  846. k_src += t_buf;
  847. }
  848. /* Copy data from user src(s) */
  849. user_src = qcedev_areq->sha_op_req.data[0].vaddr;
  850. if (user_src && copy_from_user(k_src,
  851. (void __user *)user_src,
  852. qcedev_areq->sha_op_req.data[0].len)) {
  853. memset(k_buf_src, 0, buf_size);
  854. kfree(k_buf_src);
  855. return -EFAULT;
  856. }
  857. k_src += qcedev_areq->sha_op_req.data[0].len;
  858. for (i = 1; i < qcedev_areq->sha_op_req.entries; i++) {
  859. user_src = qcedev_areq->sha_op_req.data[i].vaddr;
  860. if (user_src && copy_from_user(k_src,
  861. (void __user *)user_src,
  862. qcedev_areq->sha_op_req.data[i].len)) {
  863. memset(k_buf_src, 0, buf_size);
  864. kfree(k_buf_src);
  865. return -EFAULT;
  866. }
  867. k_src += qcedev_areq->sha_op_req.data[i].len;
  868. }
  869. /* get new trailing buffer */
  870. sha_pad_len = ALIGN(total, CE_SHA_BLOCK_SIZE) - total;
  871. trailing_buf_len = CE_SHA_BLOCK_SIZE - sha_pad_len;
  872. qcedev_areq->sha_req.sreq.src = sg_src;
  873. sg_init_one(qcedev_areq->sha_req.sreq.src, k_align_src,
  874. total-trailing_buf_len);
  875. qcedev_areq->sha_req.sreq.nbytes = total - trailing_buf_len;
  876. /* update sha_ctxt trailing buf content to new trailing buf */
  877. if (trailing_buf_len > 0) {
  878. memset(&handle->sha_ctxt.trailing_buf[0], 0, 64);
  879. memcpy(&handle->sha_ctxt.trailing_buf[0],
  880. (k_src - trailing_buf_len),
  881. trailing_buf_len);
  882. }
  883. handle->sha_ctxt.trailing_buf_len = trailing_buf_len;
  884. err = submit_req(qcedev_areq, handle);
  885. handle->sha_ctxt.last_blk = 0;
  886. handle->sha_ctxt.first_blk = 0;
  887. memset(k_buf_src, 0, buf_size);
  888. kfree(k_buf_src);
  889. return err;
  890. }
  891. static int qcedev_sha_update(struct qcedev_async_req *qcedev_areq,
  892. struct qcedev_handle *handle,
  893. struct scatterlist *sg_src)
  894. {
  895. int err = 0;
  896. int i = 0;
  897. int j = 0;
  898. int k = 0;
  899. int num_entries = 0;
  900. uint32_t total = 0;
  901. if (!handle->sha_ctxt.init_done) {
  902. pr_err("%s Init was not called\n", __func__);
  903. return -EINVAL;
  904. }
  905. if (qcedev_areq->sha_op_req.data_len > QCE_MAX_OPER_DATA) {
  906. struct qcedev_sha_op_req *saved_req;
  907. struct qcedev_sha_op_req req;
  908. struct qcedev_sha_op_req *sreq = &qcedev_areq->sha_op_req;
  909. uint32_t req_size = 0;
  910. req_size = sizeof(struct qcedev_sha_op_req);
  911. /* save the original req structure */
  912. saved_req =
  913. kmalloc(req_size, GFP_KERNEL);
  914. if (saved_req == NULL) {
  915. pr_err("%s:Can't Allocate mem:saved_req 0x%lx\n",
  916. __func__, (uintptr_t)saved_req);
  917. return -ENOMEM;
  918. }
  919. memcpy(&req, sreq, sizeof(*sreq));
  920. memcpy(saved_req, sreq, sizeof(*sreq));
  921. i = 0;
  922. /* Address 32 KB at a time */
  923. while ((i < req.entries) && (err == 0)) {
  924. if (sreq->data[i].len > QCE_MAX_OPER_DATA) {
  925. sreq->data[0].len = QCE_MAX_OPER_DATA;
  926. if (i > 0) {
  927. sreq->data[0].vaddr =
  928. sreq->data[i].vaddr;
  929. }
  930. sreq->data_len = QCE_MAX_OPER_DATA;
  931. sreq->entries = 1;
  932. err = qcedev_sha_update_max_xfer(qcedev_areq,
  933. handle, sg_src);
  934. sreq->data[i].len = req.data[i].len -
  935. QCE_MAX_OPER_DATA;
  936. sreq->data[i].vaddr = req.data[i].vaddr +
  937. QCE_MAX_OPER_DATA;
  938. req.data[i].vaddr = sreq->data[i].vaddr;
  939. req.data[i].len = sreq->data[i].len;
  940. } else {
  941. total = 0;
  942. for (j = i; j < req.entries; j++) {
  943. num_entries++;
  944. if ((total + sreq->data[j].len) >=
  945. QCE_MAX_OPER_DATA) {
  946. sreq->data[j].len =
  947. (QCE_MAX_OPER_DATA - total);
  948. total = QCE_MAX_OPER_DATA;
  949. break;
  950. }
  951. total += sreq->data[j].len;
  952. }
  953. sreq->data_len = total;
  954. if (i > 0)
  955. for (k = 0; k < num_entries; k++) {
  956. sreq->data[k].len =
  957. sreq->data[i+k].len;
  958. sreq->data[k].vaddr =
  959. sreq->data[i+k].vaddr;
  960. }
  961. sreq->entries = num_entries;
  962. i = j;
  963. err = qcedev_sha_update_max_xfer(qcedev_areq,
  964. handle, sg_src);
  965. num_entries = 0;
  966. sreq->data[i].vaddr = req.data[i].vaddr +
  967. sreq->data[i].len;
  968. sreq->data[i].len = req.data[i].len -
  969. sreq->data[i].len;
  970. req.data[i].vaddr = sreq->data[i].vaddr;
  971. req.data[i].len = sreq->data[i].len;
  972. if (sreq->data[i].len == 0)
  973. i++;
  974. }
  975. } /* end of while ((i < req.entries) && (err == 0)) */
  976. /* Restore the original req structure */
  977. for (i = 0; i < saved_req->entries; i++) {
  978. sreq->data[i].len = saved_req->data[i].len;
  979. sreq->data[i].vaddr = saved_req->data[i].vaddr;
  980. }
  981. sreq->entries = saved_req->entries;
  982. sreq->data_len = saved_req->data_len;
  983. memset(saved_req, 0, req_size);
  984. kfree(saved_req);
  985. } else
  986. err = qcedev_sha_update_max_xfer(qcedev_areq, handle, sg_src);
  987. return err;
  988. }
  989. static int qcedev_sha_final(struct qcedev_async_req *qcedev_areq,
  990. struct qcedev_handle *handle)
  991. {
  992. int err = 0;
  993. struct scatterlist sg_src;
  994. uint32_t total;
  995. uint8_t *k_buf_src = NULL;
  996. uint32_t buf_size = 0;
  997. uint8_t *k_align_src = NULL;
  998. if (!handle->sha_ctxt.init_done) {
  999. pr_err("%s Init was not called\n", __func__);
  1000. return -EINVAL;
  1001. }
  1002. handle->sha_ctxt.last_blk = 1;
  1003. total = handle->sha_ctxt.trailing_buf_len;
  1004. buf_size = total + CACHE_LINE_SIZE * 2;
  1005. k_buf_src = kmalloc(buf_size, GFP_KERNEL);
  1006. if (k_buf_src == NULL)
  1007. return -ENOMEM;
  1008. k_align_src = (uint8_t *)ALIGN(((uintptr_t)k_buf_src),
  1009. CACHE_LINE_SIZE);
  1010. memcpy(k_align_src, &handle->sha_ctxt.trailing_buf[0], total);
  1011. qcedev_areq->sha_req.sreq.src = (struct scatterlist *) &sg_src;
  1012. sg_init_one(qcedev_areq->sha_req.sreq.src, k_align_src, total);
  1013. qcedev_areq->sha_req.sreq.nbytes = total;
  1014. err = submit_req(qcedev_areq, handle);
  1015. handle->sha_ctxt.first_blk = 0;
  1016. handle->sha_ctxt.last_blk = 0;
  1017. handle->sha_ctxt.auth_data[0] = 0;
  1018. handle->sha_ctxt.auth_data[1] = 0;
  1019. handle->sha_ctxt.trailing_buf_len = 0;
  1020. handle->sha_ctxt.init_done = false;
  1021. memset(&handle->sha_ctxt.trailing_buf[0], 0, 64);
  1022. memset(k_buf_src, 0, buf_size);
  1023. kfree(k_buf_src);
  1024. qcedev_areq->sha_req.sreq.src = NULL;
  1025. return err;
  1026. }
  1027. static int qcedev_hash_cmac(struct qcedev_async_req *qcedev_areq,
  1028. struct qcedev_handle *handle,
  1029. struct scatterlist *sg_src)
  1030. {
  1031. int err = 0;
  1032. int i = 0;
  1033. uint32_t total;
  1034. uint8_t *user_src = NULL;
  1035. uint8_t *k_src = NULL;
  1036. uint8_t *k_buf_src = NULL;
  1037. uint32_t buf_size = 0;
  1038. total = qcedev_areq->sha_op_req.data_len;
  1039. if ((qcedev_areq->sha_op_req.authklen != QCEDEV_AES_KEY_128) &&
  1040. (qcedev_areq->sha_op_req.authklen != QCEDEV_AES_KEY_256)) {
  1041. pr_err("%s: unsupported key length\n", __func__);
  1042. return -EINVAL;
  1043. }
  1044. if (copy_from_user(&handle->sha_ctxt.authkey[0],
  1045. (void __user *)qcedev_areq->sha_op_req.authkey,
  1046. qcedev_areq->sha_op_req.authklen))
  1047. return -EFAULT;
  1048. if (total > U32_MAX - CACHE_LINE_SIZE * 2)
  1049. return -EINVAL;
  1050. buf_size = total + CACHE_LINE_SIZE * 2;
  1051. k_buf_src = kmalloc(buf_size, GFP_KERNEL);
  1052. if (k_buf_src == NULL)
  1053. return -ENOMEM;
  1054. k_src = k_buf_src;
  1055. /* Copy data from user src(s) */
  1056. user_src = qcedev_areq->sha_op_req.data[0].vaddr;
  1057. for (i = 0; i < qcedev_areq->sha_op_req.entries; i++) {
  1058. user_src = qcedev_areq->sha_op_req.data[i].vaddr;
  1059. if (user_src && copy_from_user(k_src, (void __user *)user_src,
  1060. qcedev_areq->sha_op_req.data[i].len)) {
  1061. memset(k_buf_src, 0, buf_size);
  1062. kfree(k_buf_src);
  1063. return -EFAULT;
  1064. }
  1065. k_src += qcedev_areq->sha_op_req.data[i].len;
  1066. }
  1067. qcedev_areq->sha_req.sreq.src = sg_src;
  1068. sg_init_one(qcedev_areq->sha_req.sreq.src, k_buf_src, total);
  1069. qcedev_areq->sha_req.sreq.nbytes = total;
  1070. handle->sha_ctxt.diglen = qcedev_areq->sha_op_req.diglen;
  1071. err = submit_req(qcedev_areq, handle);
  1072. memset(k_buf_src, 0, buf_size);
  1073. kfree(k_buf_src);
  1074. return err;
  1075. }
  1076. static int qcedev_set_hmac_auth_key(struct qcedev_async_req *areq,
  1077. struct qcedev_handle *handle,
  1078. struct scatterlist *sg_src)
  1079. {
  1080. int err = 0;
  1081. if (areq->sha_op_req.authklen <= QCEDEV_MAX_KEY_SIZE) {
  1082. qcedev_sha_init(areq, handle);
  1083. if (copy_from_user(&handle->sha_ctxt.authkey[0],
  1084. (void __user *)areq->sha_op_req.authkey,
  1085. areq->sha_op_req.authklen))
  1086. return -EFAULT;
  1087. } else {
  1088. struct qcedev_async_req authkey_areq;
  1089. uint8_t authkey[QCEDEV_MAX_SHA_BLOCK_SIZE];
  1090. init_completion(&authkey_areq.complete);
  1091. authkey_areq.sha_op_req.entries = 1;
  1092. authkey_areq.sha_op_req.data[0].vaddr =
  1093. areq->sha_op_req.authkey;
  1094. authkey_areq.sha_op_req.data[0].len = areq->sha_op_req.authklen;
  1095. authkey_areq.sha_op_req.data_len = areq->sha_op_req.authklen;
  1096. authkey_areq.sha_op_req.diglen = 0;
  1097. authkey_areq.handle = handle;
  1098. memset(&authkey_areq.sha_op_req.digest[0], 0,
  1099. QCEDEV_MAX_SHA_DIGEST);
  1100. if (areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC)
  1101. authkey_areq.sha_op_req.alg = QCEDEV_ALG_SHA1;
  1102. if (areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC)
  1103. authkey_areq.sha_op_req.alg = QCEDEV_ALG_SHA256;
  1104. authkey_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
  1105. qcedev_sha_init(&authkey_areq, handle);
  1106. err = qcedev_sha_update(&authkey_areq, handle, sg_src);
  1107. if (!err)
  1108. err = qcedev_sha_final(&authkey_areq, handle);
  1109. else
  1110. return err;
  1111. memcpy(&authkey[0], &handle->sha_ctxt.digest[0],
  1112. handle->sha_ctxt.diglen);
  1113. qcedev_sha_init(areq, handle);
  1114. memcpy(&handle->sha_ctxt.authkey[0], &authkey[0],
  1115. handle->sha_ctxt.diglen);
  1116. }
  1117. return err;
  1118. }
  1119. static int qcedev_hmac_get_ohash(struct qcedev_async_req *qcedev_areq,
  1120. struct qcedev_handle *handle)
  1121. {
  1122. int err = 0;
  1123. struct scatterlist sg_src;
  1124. uint8_t *k_src = NULL;
  1125. uint32_t sha_block_size = 0;
  1126. uint32_t sha_digest_size = 0;
  1127. if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC) {
  1128. sha_digest_size = SHA1_DIGEST_SIZE;
  1129. sha_block_size = SHA1_BLOCK_SIZE;
  1130. } else {
  1131. if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC) {
  1132. sha_digest_size = SHA256_DIGEST_SIZE;
  1133. sha_block_size = SHA256_BLOCK_SIZE;
  1134. }
  1135. }
  1136. k_src = kmalloc(sha_block_size, GFP_KERNEL);
  1137. if (k_src == NULL)
  1138. return -ENOMEM;
  1139. /* check for trailing buffer from previous updates and append it */
  1140. memcpy(k_src, &handle->sha_ctxt.trailing_buf[0],
  1141. handle->sha_ctxt.trailing_buf_len);
  1142. qcedev_areq->sha_req.sreq.src = (struct scatterlist *) &sg_src;
  1143. sg_init_one(qcedev_areq->sha_req.sreq.src, k_src, sha_block_size);
  1144. qcedev_areq->sha_req.sreq.nbytes = sha_block_size;
  1145. memset(&handle->sha_ctxt.trailing_buf[0], 0, sha_block_size);
  1146. memcpy(&handle->sha_ctxt.trailing_buf[0], &handle->sha_ctxt.digest[0],
  1147. sha_digest_size);
  1148. handle->sha_ctxt.trailing_buf_len = sha_digest_size;
  1149. handle->sha_ctxt.first_blk = 1;
  1150. handle->sha_ctxt.last_blk = 0;
  1151. handle->sha_ctxt.auth_data[0] = 0;
  1152. handle->sha_ctxt.auth_data[1] = 0;
  1153. if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC) {
  1154. memcpy(&handle->sha_ctxt.digest[0],
  1155. &_std_init_vector_sha1_uint8[0], SHA1_DIGEST_SIZE);
  1156. handle->sha_ctxt.diglen = SHA1_DIGEST_SIZE;
  1157. }
  1158. if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC) {
  1159. memcpy(&handle->sha_ctxt.digest[0],
  1160. &_std_init_vector_sha256_uint8[0], SHA256_DIGEST_SIZE);
  1161. handle->sha_ctxt.diglen = SHA256_DIGEST_SIZE;
  1162. }
  1163. err = submit_req(qcedev_areq, handle);
  1164. handle->sha_ctxt.last_blk = 0;
  1165. handle->sha_ctxt.first_blk = 0;
  1166. memset(k_src, 0, sha_block_size);
  1167. kfree(k_src);
  1168. qcedev_areq->sha_req.sreq.src = NULL;
  1169. return err;
  1170. }
  1171. static int qcedev_hmac_update_iokey(struct qcedev_async_req *areq,
  1172. struct qcedev_handle *handle, bool ikey)
  1173. {
  1174. int i;
  1175. uint32_t constant;
  1176. uint32_t sha_block_size;
  1177. if (ikey)
  1178. constant = 0x36;
  1179. else
  1180. constant = 0x5c;
  1181. if (areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC)
  1182. sha_block_size = SHA1_BLOCK_SIZE;
  1183. else
  1184. sha_block_size = SHA256_BLOCK_SIZE;
  1185. memset(&handle->sha_ctxt.trailing_buf[0], 0, sha_block_size);
  1186. for (i = 0; i < sha_block_size; i++)
  1187. handle->sha_ctxt.trailing_buf[i] =
  1188. (handle->sha_ctxt.authkey[i] ^ constant);
  1189. handle->sha_ctxt.trailing_buf_len = sha_block_size;
  1190. return 0;
  1191. }
  1192. static int qcedev_hmac_init(struct qcedev_async_req *areq,
  1193. struct qcedev_handle *handle,
  1194. struct scatterlist *sg_src)
  1195. {
  1196. int err;
  1197. struct qcedev_control *podev = handle->cntl;
  1198. err = qcedev_set_hmac_auth_key(areq, handle, sg_src);
  1199. if (err)
  1200. return err;
  1201. if (!podev->ce_support.sha_hmac)
  1202. qcedev_hmac_update_iokey(areq, handle, true);
  1203. return 0;
  1204. }
  1205. static int qcedev_hmac_final(struct qcedev_async_req *areq,
  1206. struct qcedev_handle *handle)
  1207. {
  1208. int err;
  1209. struct qcedev_control *podev = handle->cntl;
  1210. err = qcedev_sha_final(areq, handle);
  1211. if (podev->ce_support.sha_hmac)
  1212. return err;
  1213. qcedev_hmac_update_iokey(areq, handle, false);
  1214. err = qcedev_hmac_get_ohash(areq, handle);
  1215. if (err)
  1216. return err;
  1217. err = qcedev_sha_final(areq, handle);
  1218. return err;
  1219. }
  1220. static int qcedev_hash_init(struct qcedev_async_req *areq,
  1221. struct qcedev_handle *handle,
  1222. struct scatterlist *sg_src)
  1223. {
  1224. if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA1) ||
  1225. (areq->sha_op_req.alg == QCEDEV_ALG_SHA256))
  1226. return qcedev_sha_init(areq, handle);
  1227. else
  1228. return qcedev_hmac_init(areq, handle, sg_src);
  1229. }
  1230. static int qcedev_hash_update(struct qcedev_async_req *qcedev_areq,
  1231. struct qcedev_handle *handle,
  1232. struct scatterlist *sg_src)
  1233. {
  1234. return qcedev_sha_update(qcedev_areq, handle, sg_src);
  1235. }
  1236. static int qcedev_hash_final(struct qcedev_async_req *areq,
  1237. struct qcedev_handle *handle)
  1238. {
  1239. if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA1) ||
  1240. (areq->sha_op_req.alg == QCEDEV_ALG_SHA256))
  1241. return qcedev_sha_final(areq, handle);
  1242. else
  1243. return qcedev_hmac_final(areq, handle);
  1244. }
  1245. static int qcedev_vbuf_ablk_cipher_max_xfer(struct qcedev_async_req *areq,
  1246. int *di, struct qcedev_handle *handle,
  1247. uint8_t *k_align_src)
  1248. {
  1249. int err = 0;
  1250. int i = 0;
  1251. int dst_i = *di;
  1252. struct scatterlist sg_src;
  1253. uint32_t byteoffset = 0;
  1254. uint8_t *user_src = NULL;
  1255. uint8_t *k_align_dst = k_align_src;
  1256. struct qcedev_cipher_op_req *creq = &areq->cipher_op_req;
  1257. if (areq->cipher_op_req.mode == QCEDEV_AES_MODE_CTR)
  1258. byteoffset = areq->cipher_op_req.byteoffset;
  1259. user_src = areq->cipher_op_req.vbuf.src[0].vaddr;
  1260. if (user_src && copy_from_user((k_align_src + byteoffset),
  1261. (void __user *)user_src,
  1262. areq->cipher_op_req.vbuf.src[0].len))
  1263. return -EFAULT;
  1264. k_align_src += byteoffset + areq->cipher_op_req.vbuf.src[0].len;
  1265. for (i = 1; i < areq->cipher_op_req.entries; i++) {
  1266. user_src = areq->cipher_op_req.vbuf.src[i].vaddr;
  1267. if (user_src && copy_from_user(k_align_src,
  1268. (void __user *)user_src,
  1269. areq->cipher_op_req.vbuf.src[i].len)) {
  1270. return -EFAULT;
  1271. }
  1272. k_align_src += areq->cipher_op_req.vbuf.src[i].len;
  1273. }
  1274. /* restore src beginning */
  1275. k_align_src = k_align_dst;
  1276. areq->cipher_op_req.data_len += byteoffset;
  1277. areq->cipher_req.creq.src = (struct scatterlist *) &sg_src;
  1278. areq->cipher_req.creq.dst = (struct scatterlist *) &sg_src;
  1279. /* In place encryption/decryption */
  1280. sg_init_one(areq->cipher_req.creq.src,
  1281. k_align_dst,
  1282. areq->cipher_op_req.data_len);
  1283. areq->cipher_req.creq.cryptlen = areq->cipher_op_req.data_len;
  1284. areq->cipher_req.creq.iv = areq->cipher_op_req.iv;
  1285. areq->cipher_op_req.entries = 1;
  1286. err = submit_req(areq, handle);
  1287. /* copy data to destination buffer*/
  1288. creq->data_len -= byteoffset;
  1289. while (creq->data_len > 0) {
  1290. if (creq->vbuf.dst[dst_i].len <= creq->data_len) {
  1291. if (err == 0 && copy_to_user(
  1292. (void __user *)creq->vbuf.dst[dst_i].vaddr,
  1293. (k_align_dst + byteoffset),
  1294. creq->vbuf.dst[dst_i].len)) {
  1295. err = -EFAULT;
  1296. goto exit;
  1297. }
  1298. k_align_dst += creq->vbuf.dst[dst_i].len;
  1299. creq->data_len -= creq->vbuf.dst[dst_i].len;
  1300. dst_i++;
  1301. } else {
  1302. if (err == 0 && copy_to_user(
  1303. (void __user *)creq->vbuf.dst[dst_i].vaddr,
  1304. (k_align_dst + byteoffset),
  1305. creq->data_len)) {
  1306. err = -EFAULT;
  1307. goto exit;
  1308. }
  1309. k_align_dst += creq->data_len;
  1310. creq->vbuf.dst[dst_i].len -= creq->data_len;
  1311. creq->vbuf.dst[dst_i].vaddr += creq->data_len;
  1312. creq->data_len = 0;
  1313. }
  1314. }
  1315. *di = dst_i;
  1316. exit:
  1317. areq->cipher_req.creq.src = NULL;
  1318. areq->cipher_req.creq.dst = NULL;
  1319. return err;
  1320. };
  1321. static int qcedev_vbuf_ablk_cipher(struct qcedev_async_req *areq,
  1322. struct qcedev_handle *handle)
  1323. {
  1324. int err = 0;
  1325. int di = 0;
  1326. int i = 0;
  1327. int j = 0;
  1328. int k = 0;
  1329. uint32_t byteoffset = 0;
  1330. int num_entries = 0;
  1331. uint32_t total = 0;
  1332. uint32_t len;
  1333. uint8_t *k_buf_src = NULL;
  1334. uint32_t buf_size = 0;
  1335. uint8_t *k_align_src = NULL;
  1336. uint32_t max_data_xfer;
  1337. struct qcedev_cipher_op_req *saved_req;
  1338. uint32_t req_size = 0;
  1339. struct qcedev_cipher_op_req *creq = &areq->cipher_op_req;
  1340. total = 0;
  1341. if (areq->cipher_op_req.mode == QCEDEV_AES_MODE_CTR)
  1342. byteoffset = areq->cipher_op_req.byteoffset;
  1343. buf_size = QCE_MAX_OPER_DATA + CACHE_LINE_SIZE * 2;
  1344. k_buf_src = kmalloc(buf_size, GFP_KERNEL);
  1345. if (k_buf_src == NULL)
  1346. return -ENOMEM;
  1347. k_align_src = (uint8_t *)ALIGN(((uintptr_t)k_buf_src),
  1348. CACHE_LINE_SIZE);
  1349. max_data_xfer = QCE_MAX_OPER_DATA - byteoffset;
  1350. req_size = sizeof(struct qcedev_cipher_op_req);
  1351. saved_req = kmemdup(creq, req_size, GFP_KERNEL);
  1352. if (saved_req == NULL) {
  1353. memset(k_buf_src, 0, buf_size);
  1354. kfree(k_buf_src);
  1355. return -ENOMEM;
  1356. }
  1357. if (areq->cipher_op_req.data_len > max_data_xfer) {
  1358. struct qcedev_cipher_op_req req;
  1359. /* save the original req structure */
  1360. memcpy(&req, creq, sizeof(struct qcedev_cipher_op_req));
  1361. i = 0;
  1362. /* Address 32 KB at a time */
  1363. while ((i < req.entries) && (err == 0)) {
  1364. if (creq->vbuf.src[i].len > max_data_xfer) {
  1365. creq->vbuf.src[0].len = max_data_xfer;
  1366. if (i > 0) {
  1367. creq->vbuf.src[0].vaddr =
  1368. creq->vbuf.src[i].vaddr;
  1369. }
  1370. creq->data_len = max_data_xfer;
  1371. creq->entries = 1;
  1372. err = qcedev_vbuf_ablk_cipher_max_xfer(areq,
  1373. &di, handle, k_align_src);
  1374. if (err < 0) {
  1375. memset(saved_req, 0, req_size);
  1376. memset(k_buf_src, 0, buf_size);
  1377. kfree(k_buf_src);
  1378. kfree(saved_req);
  1379. return err;
  1380. }
  1381. creq->vbuf.src[i].len = req.vbuf.src[i].len -
  1382. max_data_xfer;
  1383. creq->vbuf.src[i].vaddr =
  1384. req.vbuf.src[i].vaddr +
  1385. max_data_xfer;
  1386. req.vbuf.src[i].vaddr =
  1387. creq->vbuf.src[i].vaddr;
  1388. req.vbuf.src[i].len = creq->vbuf.src[i].len;
  1389. } else {
  1390. total = areq->cipher_op_req.byteoffset;
  1391. for (j = i; j < req.entries; j++) {
  1392. num_entries++;
  1393. if ((total + creq->vbuf.src[j].len)
  1394. >= max_data_xfer) {
  1395. creq->vbuf.src[j].len =
  1396. max_data_xfer - total;
  1397. total = max_data_xfer;
  1398. break;
  1399. }
  1400. total += creq->vbuf.src[j].len;
  1401. }
  1402. creq->data_len = total;
  1403. if (i > 0)
  1404. for (k = 0; k < num_entries; k++) {
  1405. creq->vbuf.src[k].len =
  1406. creq->vbuf.src[i+k].len;
  1407. creq->vbuf.src[k].vaddr =
  1408. creq->vbuf.src[i+k].vaddr;
  1409. }
  1410. creq->entries = num_entries;
  1411. i = j;
  1412. err = qcedev_vbuf_ablk_cipher_max_xfer(areq,
  1413. &di, handle, k_align_src);
  1414. if (err < 0) {
  1415. memset(saved_req, 0, req_size);
  1416. memset(k_buf_src, 0, buf_size);
  1417. kfree(k_buf_src);
  1418. kfree(saved_req);
  1419. return err;
  1420. }
  1421. num_entries = 0;
  1422. areq->cipher_op_req.byteoffset = 0;
  1423. creq->vbuf.src[i].vaddr = req.vbuf.src[i].vaddr
  1424. + creq->vbuf.src[i].len;
  1425. creq->vbuf.src[i].len = req.vbuf.src[i].len -
  1426. creq->vbuf.src[i].len;
  1427. req.vbuf.src[i].vaddr =
  1428. creq->vbuf.src[i].vaddr;
  1429. req.vbuf.src[i].len = creq->vbuf.src[i].len;
  1430. if (creq->vbuf.src[i].len == 0)
  1431. i++;
  1432. }
  1433. areq->cipher_op_req.byteoffset = 0;
  1434. max_data_xfer = QCE_MAX_OPER_DATA;
  1435. byteoffset = 0;
  1436. } /* end of while ((i < req.entries) && (err == 0)) */
  1437. } else
  1438. err = qcedev_vbuf_ablk_cipher_max_xfer(areq, &di, handle,
  1439. k_align_src);
  1440. /* Restore the original req structure */
  1441. for (i = 0; i < saved_req->entries; i++) {
  1442. creq->vbuf.src[i].len = saved_req->vbuf.src[i].len;
  1443. creq->vbuf.src[i].vaddr = saved_req->vbuf.src[i].vaddr;
  1444. }
  1445. for (len = 0, i = 0; len < saved_req->data_len; i++) {
  1446. creq->vbuf.dst[i].len = saved_req->vbuf.dst[i].len;
  1447. creq->vbuf.dst[i].vaddr = saved_req->vbuf.dst[i].vaddr;
  1448. len += saved_req->vbuf.dst[i].len;
  1449. }
  1450. creq->entries = saved_req->entries;
  1451. creq->data_len = saved_req->data_len;
  1452. creq->byteoffset = saved_req->byteoffset;
  1453. memset(saved_req, 0, req_size);
  1454. memset(k_buf_src, 0, buf_size);
  1455. kfree(saved_req);
  1456. kfree(k_buf_src);
  1457. return err;
  1458. }
  1459. static int qcedev_smmu_ablk_offload_cipher(struct qcedev_async_req *areq,
  1460. struct qcedev_handle *handle)
  1461. {
  1462. int i = 0;
  1463. int err = 0;
  1464. size_t byteoffset = 0;
  1465. size_t transfer_data_len = 0;
  1466. size_t pending_data_len = 0;
  1467. size_t max_data_xfer = MAX_CEHW_REQ_TRANSFER_SIZE - byteoffset;
  1468. uint8_t *user_src = NULL;
  1469. uint8_t *user_dst = NULL;
  1470. struct scatterlist sg_src;
  1471. struct scatterlist sg_dst;
  1472. if (areq->offload_cipher_op_req.mode == QCEDEV_AES_MODE_CTR)
  1473. byteoffset = areq->offload_cipher_op_req.byteoffset;
  1474. /*
  1475. * areq has two components:
  1476. * a) Request that comes from userspace i.e. offload_cipher_op_req
  1477. * b) Request that QCE understands - skcipher i.e. cipher_req.creq
  1478. * skcipher has sglist pointers src and dest that would carry
  1479. * data to/from CE.
  1480. */
  1481. areq->cipher_req.creq.src = &sg_src;
  1482. areq->cipher_req.creq.dst = &sg_dst;
  1483. sg_init_table(&sg_src, 1);
  1484. sg_init_table(&sg_dst, 1);
  1485. for (i = 0; i < areq->offload_cipher_op_req.entries; i++) {
  1486. transfer_data_len = 0;
  1487. pending_data_len = areq->offload_cipher_op_req.vbuf.src[i].len;
  1488. user_src = areq->offload_cipher_op_req.vbuf.src[i].vaddr;
  1489. user_src += byteoffset;
  1490. user_dst = areq->offload_cipher_op_req.vbuf.dst[i].vaddr;
  1491. user_dst += byteoffset;
  1492. areq->cipher_req.creq.iv = areq->offload_cipher_op_req.iv;
  1493. while (pending_data_len) {
  1494. transfer_data_len = min(max_data_xfer,
  1495. pending_data_len);
  1496. sg_src.dma_address = (dma_addr_t)user_src;
  1497. sg_dst.dma_address = (dma_addr_t)user_dst;
  1498. areq->cipher_req.creq.cryptlen = transfer_data_len;
  1499. sg_src.length = transfer_data_len;
  1500. sg_dst.length = transfer_data_len;
  1501. err = submit_req(areq, handle);
  1502. if (err) {
  1503. pr_err("%s: Error processing req, err = %d\n",
  1504. __func__, err);
  1505. goto exit;
  1506. }
  1507. /* update data len to be processed */
  1508. pending_data_len -= transfer_data_len;
  1509. user_src += transfer_data_len;
  1510. user_dst += transfer_data_len;
  1511. }
  1512. }
  1513. exit:
  1514. areq->cipher_req.creq.src = NULL;
  1515. areq->cipher_req.creq.dst = NULL;
  1516. return err;
  1517. }
  1518. static int qcedev_check_cipher_key(struct qcedev_cipher_op_req *req,
  1519. struct qcedev_control *podev)
  1520. {
  1521. /* if intending to use HW key make sure key fields are set
  1522. * correctly and HW key is indeed supported in target
  1523. */
  1524. if (req->encklen == 0) {
  1525. int i;
  1526. for (i = 0; i < QCEDEV_MAX_KEY_SIZE; i++) {
  1527. if (req->enckey[i]) {
  1528. pr_err("%s: Invalid key: non-zero key input\n",
  1529. __func__);
  1530. goto error;
  1531. }
  1532. }
  1533. if ((req->op != QCEDEV_OPER_ENC_NO_KEY) &&
  1534. (req->op != QCEDEV_OPER_DEC_NO_KEY))
  1535. if (!podev->platform_support.hw_key_support) {
  1536. pr_err("%s: Invalid op %d\n", __func__,
  1537. (uint32_t)req->op);
  1538. goto error;
  1539. }
  1540. } else {
  1541. if (req->encklen == QCEDEV_AES_KEY_192) {
  1542. if (!podev->ce_support.aes_key_192) {
  1543. pr_err("%s: AES-192 not supported\n", __func__);
  1544. goto error;
  1545. }
  1546. } else {
  1547. /* if not using HW key make sure key
  1548. * length is valid
  1549. */
  1550. if (req->mode == QCEDEV_AES_MODE_XTS) {
  1551. if ((req->encklen != QCEDEV_AES_KEY_128*2) &&
  1552. (req->encklen != QCEDEV_AES_KEY_256*2)) {
  1553. pr_err("%s: unsupported key size: %d\n",
  1554. __func__, req->encklen);
  1555. goto error;
  1556. }
  1557. } else {
  1558. if ((req->encklen != QCEDEV_AES_KEY_128) &&
  1559. (req->encklen != QCEDEV_AES_KEY_256)) {
  1560. pr_err("%s: unsupported key size %d\n",
  1561. __func__, req->encklen);
  1562. goto error;
  1563. }
  1564. }
  1565. }
  1566. }
  1567. return 0;
  1568. error:
  1569. return -EINVAL;
  1570. }
  1571. static int qcedev_check_cipher_params(struct qcedev_cipher_op_req *req,
  1572. struct qcedev_control *podev)
  1573. {
  1574. uint32_t total = 0;
  1575. uint32_t i;
  1576. if (req->use_pmem) {
  1577. pr_err("%s: Use of PMEM is not supported\n", __func__);
  1578. goto error;
  1579. }
  1580. if ((req->entries == 0) || (req->data_len == 0) ||
  1581. (req->entries > QCEDEV_MAX_BUFFERS)) {
  1582. pr_err("%s: Invalid cipher length/entries\n", __func__);
  1583. goto error;
  1584. }
  1585. if ((req->alg >= QCEDEV_ALG_LAST) ||
  1586. (req->mode >= QCEDEV_AES_DES_MODE_LAST)) {
  1587. pr_err("%s: Invalid algorithm %d\n", __func__,
  1588. (uint32_t)req->alg);
  1589. goto error;
  1590. }
  1591. if ((req->mode == QCEDEV_AES_MODE_XTS) &&
  1592. (!podev->ce_support.aes_xts)) {
  1593. pr_err("%s: XTS algorithm is not supported\n", __func__);
  1594. goto error;
  1595. }
  1596. if (req->alg == QCEDEV_ALG_AES) {
  1597. if (qcedev_check_cipher_key(req, podev))
  1598. goto error;
  1599. }
  1600. /* if using a byteoffset, make sure it is CTR mode using vbuf */
  1601. if (req->byteoffset) {
  1602. if (req->mode != QCEDEV_AES_MODE_CTR) {
  1603. pr_err("%s: Operation on byte offset not supported\n",
  1604. __func__);
  1605. goto error;
  1606. }
  1607. if (req->byteoffset >= AES_CE_BLOCK_SIZE) {
  1608. pr_err("%s: Invalid byte offset\n", __func__);
  1609. goto error;
  1610. }
  1611. total = req->byteoffset;
  1612. for (i = 0; i < req->entries; i++) {
  1613. if (total > U32_MAX - req->vbuf.src[i].len) {
  1614. pr_err("%s:Integer overflow on total src len\n",
  1615. __func__);
  1616. goto error;
  1617. }
  1618. total += req->vbuf.src[i].len;
  1619. }
  1620. }
  1621. if (req->data_len < req->byteoffset) {
  1622. pr_err("%s: req data length %u is less than byteoffset %u\n",
  1623. __func__, req->data_len, req->byteoffset);
  1624. goto error;
  1625. }
  1626. /* Ensure IV size */
  1627. if (req->ivlen > QCEDEV_MAX_IV_SIZE) {
  1628. pr_err("%s: ivlen is not correct: %u\n", __func__, req->ivlen);
  1629. goto error;
  1630. }
  1631. /* Ensure Key size */
  1632. if (req->encklen > QCEDEV_MAX_KEY_SIZE) {
  1633. pr_err("%s: Klen is not correct: %u\n", __func__, req->encklen);
  1634. goto error;
  1635. }
  1636. /* Ensure zer ivlen for ECB mode */
  1637. if (req->ivlen > 0) {
  1638. if ((req->mode == QCEDEV_AES_MODE_ECB) ||
  1639. (req->mode == QCEDEV_DES_MODE_ECB)) {
  1640. pr_err("%s: Expecting a zero length IV\n", __func__);
  1641. goto error;
  1642. }
  1643. } else {
  1644. if ((req->mode != QCEDEV_AES_MODE_ECB) &&
  1645. (req->mode != QCEDEV_DES_MODE_ECB)) {
  1646. pr_err("%s: Expecting a non-zero ength IV\n", __func__);
  1647. goto error;
  1648. }
  1649. }
  1650. /* Check for sum of all dst length is equal to data_len */
  1651. for (i = 0, total = 0; i < req->entries; i++) {
  1652. if (!req->vbuf.dst[i].vaddr && req->vbuf.dst[i].len) {
  1653. pr_err("%s: NULL req dst vbuf[%d] with length %d\n",
  1654. __func__, i, req->vbuf.dst[i].len);
  1655. goto error;
  1656. }
  1657. if (req->vbuf.dst[i].len >= U32_MAX - total) {
  1658. pr_err("%s: Integer overflow on total req dst vbuf length\n",
  1659. __func__);
  1660. goto error;
  1661. }
  1662. total += req->vbuf.dst[i].len;
  1663. }
  1664. if (total != req->data_len) {
  1665. pr_err("%s: Total (i=%d) dst(%d) buf size != data_len (%d)\n",
  1666. __func__, i, total, req->data_len);
  1667. goto error;
  1668. }
  1669. /* Check for sum of all src length is equal to data_len */
  1670. for (i = 0, total = 0; i < req->entries; i++) {
  1671. if (!req->vbuf.src[i].vaddr && req->vbuf.src[i].len) {
  1672. pr_err("%s: NULL req src vbuf[%d] with length %d\n",
  1673. __func__, i, req->vbuf.src[i].len);
  1674. goto error;
  1675. }
  1676. if (req->vbuf.src[i].len > U32_MAX - total) {
  1677. pr_err("%s: Integer overflow on total req src vbuf length\n",
  1678. __func__);
  1679. goto error;
  1680. }
  1681. total += req->vbuf.src[i].len;
  1682. }
  1683. if (total != req->data_len) {
  1684. pr_err("%s: Total src(%d) buf size != data_len (%d)\n",
  1685. __func__, total, req->data_len);
  1686. goto error;
  1687. }
  1688. return 0;
  1689. error:
  1690. return -EINVAL;
  1691. }
  1692. static int qcedev_check_sha_params(struct qcedev_sha_op_req *req,
  1693. struct qcedev_control *podev)
  1694. {
  1695. uint32_t total = 0;
  1696. uint32_t i;
  1697. if ((req->alg == QCEDEV_ALG_AES_CMAC) &&
  1698. (!podev->ce_support.cmac)) {
  1699. pr_err("%s: CMAC not supported\n", __func__);
  1700. goto sha_error;
  1701. }
  1702. if ((!req->entries) || (req->entries > QCEDEV_MAX_BUFFERS)) {
  1703. pr_err("%s: Invalid num entries (%d)\n",
  1704. __func__, req->entries);
  1705. goto sha_error;
  1706. }
  1707. if (req->alg >= QCEDEV_ALG_SHA_ALG_LAST) {
  1708. pr_err("%s: Invalid algorithm (%d)\n", __func__, req->alg);
  1709. goto sha_error;
  1710. }
  1711. if ((req->alg == QCEDEV_ALG_SHA1_HMAC) ||
  1712. (req->alg == QCEDEV_ALG_SHA256_HMAC)) {
  1713. if (req->authkey == NULL) {
  1714. pr_err("%s: Invalid authkey pointer\n", __func__);
  1715. goto sha_error;
  1716. }
  1717. if (req->authklen <= 0) {
  1718. pr_err("%s: Invalid authkey length (%d)\n",
  1719. __func__, req->authklen);
  1720. goto sha_error;
  1721. }
  1722. }
  1723. if (req->alg == QCEDEV_ALG_AES_CMAC) {
  1724. if ((req->authklen != QCEDEV_AES_KEY_128) &&
  1725. (req->authklen != QCEDEV_AES_KEY_256)) {
  1726. pr_err("%s: unsupported key length\n", __func__);
  1727. goto sha_error;
  1728. }
  1729. }
  1730. /* Check for sum of all src length is equal to data_len */
  1731. for (i = 0, total = 0; i < req->entries; i++) {
  1732. if (req->data[i].len > U32_MAX - total) {
  1733. pr_err("%s: Integer overflow on total req buf length\n",
  1734. __func__);
  1735. goto sha_error;
  1736. }
  1737. total += req->data[i].len;
  1738. }
  1739. if (total != req->data_len) {
  1740. pr_err("%s: Total src(%d) buf size != data_len (%d)\n",
  1741. __func__, total, req->data_len);
  1742. goto sha_error;
  1743. }
  1744. return 0;
  1745. sha_error:
  1746. return -EINVAL;
  1747. }
  1748. static int qcedev_check_offload_cipher_key(struct qcedev_offload_cipher_op_req *req,
  1749. struct qcedev_control *podev)
  1750. {
  1751. if (req->encklen == 0)
  1752. return -EINVAL;
  1753. /* AES-192 is not a valid option for OFFLOAD use case */
  1754. if ((req->encklen != QCEDEV_AES_KEY_128) &&
  1755. (req->encklen != QCEDEV_AES_KEY_256)) {
  1756. pr_err("%s: unsupported key size %d\n",
  1757. __func__, req->encklen);
  1758. goto error;
  1759. }
  1760. return 0;
  1761. error:
  1762. return -EINVAL;
  1763. }
  1764. static int qcedev_check_offload_cipher_params(struct qcedev_offload_cipher_op_req *req,
  1765. struct qcedev_control *podev)
  1766. {
  1767. uint32_t total = 0;
  1768. int i = 0;
  1769. if ((req->entries == 0) || (req->data_len == 0) ||
  1770. (req->entries > QCEDEV_MAX_BUFFERS)) {
  1771. pr_err("%s: Invalid cipher length/entries\n", __func__);
  1772. goto error;
  1773. }
  1774. if ((req->alg != QCEDEV_ALG_AES) ||
  1775. (req->mode > QCEDEV_AES_MODE_CTR)) {
  1776. pr_err("%s: Invalid algorithm %d\n", __func__,
  1777. (uint32_t)req->alg);
  1778. goto error;
  1779. }
  1780. if (qcedev_check_offload_cipher_key(req, podev))
  1781. goto error;
  1782. if (req->block_offset >= AES_CE_BLOCK_SIZE)
  1783. goto error;
  1784. /* if using a byteoffset, make sure it is CTR mode using vbuf */
  1785. if (req->byteoffset) {
  1786. if (req->mode != QCEDEV_AES_MODE_CTR) {
  1787. pr_err("%s: Operation on byte offset not supported\n",
  1788. __func__);
  1789. goto error;
  1790. }
  1791. if (req->byteoffset >= AES_CE_BLOCK_SIZE) {
  1792. pr_err("%s: Invalid byte offset\n", __func__);
  1793. goto error;
  1794. }
  1795. total = req->byteoffset;
  1796. for (i = 0; i < req->entries; i++) {
  1797. if (total > U32_MAX - req->vbuf.src[i].len) {
  1798. pr_err("%s:Int overflow on total src len\n",
  1799. __func__);
  1800. goto error;
  1801. }
  1802. total += req->vbuf.src[i].len;
  1803. }
  1804. }
  1805. if (req->data_len < req->byteoffset) {
  1806. pr_err("%s: req data length %u is less than byteoffset %u\n",
  1807. __func__, req->data_len, req->byteoffset);
  1808. goto error;
  1809. }
  1810. /* Ensure IV size */
  1811. if (req->ivlen > QCEDEV_MAX_IV_SIZE) {
  1812. pr_err("%s: ivlen is not correct: %u\n", __func__, req->ivlen);
  1813. goto error;
  1814. }
  1815. /* Ensure Key size */
  1816. if (req->encklen > QCEDEV_MAX_KEY_SIZE) {
  1817. pr_err("%s: Klen is not correct: %u\n", __func__,
  1818. req->encklen);
  1819. goto error;
  1820. }
  1821. /* Check for sum of all dst length is equal to data_len */
  1822. for (i = 0, total = 0; i < req->entries; i++) {
  1823. if (!req->vbuf.dst[i].vaddr && req->vbuf.dst[i].len) {
  1824. pr_err("%s: NULL req dst vbuf[%d] with length %d\n",
  1825. __func__, i, req->vbuf.dst[i].len);
  1826. goto error;
  1827. }
  1828. if (req->vbuf.dst[i].len >= U32_MAX - total) {
  1829. pr_err("%s: Int overflow on total req dst vbuf len\n",
  1830. __func__);
  1831. goto error;
  1832. }
  1833. total += req->vbuf.dst[i].len;
  1834. }
  1835. if (total != req->data_len) {
  1836. pr_err("%s: Total (i=%d) dst(%d) buf size != data_len (%d)\n",
  1837. __func__, i, total, req->data_len);
  1838. goto error;
  1839. }
  1840. /* Check for sum of all src length is equal to data_len */
  1841. for (i = 0, total = 0; i < req->entries; i++) {
  1842. if (!req->vbuf.src[i].vaddr && req->vbuf.src[i].len) {
  1843. pr_err("%s: NULL req src vbuf[%d] with length %d\n",
  1844. __func__, i, req->vbuf.src[i].len);
  1845. goto error;
  1846. }
  1847. if (req->vbuf.src[i].len > U32_MAX - total) {
  1848. pr_err("%s: Int overflow on total req src vbuf len\n",
  1849. __func__);
  1850. goto error;
  1851. }
  1852. total += req->vbuf.src[i].len;
  1853. }
  1854. if (total != req->data_len) {
  1855. pr_err("%s: Total src(%d) buf size != data_len (%d)\n",
  1856. __func__, total, req->data_len);
  1857. goto error;
  1858. }
  1859. return 0;
  1860. error:
  1861. return -EINVAL;
  1862. }
  1863. long qcedev_ioctl(struct file *file,
  1864. unsigned int cmd, unsigned long arg)
  1865. {
  1866. int err = 0;
  1867. struct qcedev_handle *handle;
  1868. struct qcedev_control *podev;
  1869. struct qcedev_async_req *qcedev_areq;
  1870. struct qcedev_stat *pstat;
  1871. qcedev_areq = kzalloc(sizeof(struct qcedev_async_req), GFP_KERNEL);
  1872. if (!qcedev_areq)
  1873. return -ENOMEM;
  1874. handle = file->private_data;
  1875. podev = handle->cntl;
  1876. qcedev_areq->handle = handle;
  1877. if (podev == NULL || podev->magic != QCEDEV_MAGIC) {
  1878. pr_err("%s: invalid handle %pK\n",
  1879. __func__, podev);
  1880. err = -ENOENT;
  1881. goto exit_free_qcedev_areq;
  1882. }
  1883. /* Verify user arguments. */
  1884. if (_IOC_TYPE(cmd) != QCEDEV_IOC_MAGIC) {
  1885. err = -ENOTTY;
  1886. goto exit_free_qcedev_areq;
  1887. }
  1888. init_completion(&qcedev_areq->complete);
  1889. pstat = &_qcedev_stat;
  1890. switch (cmd) {
  1891. case QCEDEV_IOCTL_ENC_REQ:
  1892. case QCEDEV_IOCTL_DEC_REQ:
  1893. if (copy_from_user(&qcedev_areq->cipher_op_req,
  1894. (void __user *)arg,
  1895. sizeof(struct qcedev_cipher_op_req))) {
  1896. err = -EFAULT;
  1897. goto exit_free_qcedev_areq;
  1898. }
  1899. qcedev_areq->op_type = QCEDEV_CRYPTO_OPER_CIPHER;
  1900. if (qcedev_check_cipher_params(&qcedev_areq->cipher_op_req,
  1901. podev)) {
  1902. err = -EINVAL;
  1903. goto exit_free_qcedev_areq;
  1904. }
  1905. err = qcedev_vbuf_ablk_cipher(qcedev_areq, handle);
  1906. if (err)
  1907. goto exit_free_qcedev_areq;
  1908. if (copy_to_user((void __user *)arg,
  1909. &qcedev_areq->cipher_op_req,
  1910. sizeof(struct qcedev_cipher_op_req))) {
  1911. err = -EFAULT;
  1912. goto exit_free_qcedev_areq;
  1913. }
  1914. break;
  1915. case QCEDEV_IOCTL_OFFLOAD_OP_REQ:
  1916. if (copy_from_user(&qcedev_areq->offload_cipher_op_req,
  1917. (void __user *)arg,
  1918. sizeof(struct qcedev_offload_cipher_op_req))) {
  1919. err = -EFAULT;
  1920. goto exit_free_qcedev_areq;
  1921. }
  1922. qcedev_areq->op_type = QCEDEV_CRYPTO_OPER_OFFLOAD_CIPHER;
  1923. if (qcedev_check_offload_cipher_params(
  1924. &qcedev_areq->offload_cipher_op_req, podev)) {
  1925. err = -EINVAL;
  1926. goto exit_free_qcedev_areq;
  1927. }
  1928. qcedev_areq->offload_cipher_op_req.err = QCEDEV_OFFLOAD_NO_ERROR;
  1929. err = qcedev_smmu_ablk_offload_cipher(qcedev_areq, handle);
  1930. if (err)
  1931. goto exit_free_qcedev_areq;
  1932. if (copy_to_user((void __user *)arg,
  1933. &qcedev_areq->offload_cipher_op_req,
  1934. sizeof(struct qcedev_offload_cipher_op_req))) {
  1935. err = -EFAULT;
  1936. goto exit_free_qcedev_areq;
  1937. }
  1938. break;
  1939. case QCEDEV_IOCTL_SHA_INIT_REQ:
  1940. {
  1941. struct scatterlist sg_src;
  1942. if (copy_from_user(&qcedev_areq->sha_op_req,
  1943. (void __user *)arg,
  1944. sizeof(struct qcedev_sha_op_req))) {
  1945. err = -EFAULT;
  1946. goto exit_free_qcedev_areq;
  1947. }
  1948. mutex_lock(&hash_access_lock);
  1949. if (qcedev_check_sha_params(&qcedev_areq->sha_op_req, podev)) {
  1950. mutex_unlock(&hash_access_lock);
  1951. err = -EINVAL;
  1952. goto exit_free_qcedev_areq;
  1953. }
  1954. qcedev_areq->op_type = QCEDEV_CRYPTO_OPER_SHA;
  1955. err = qcedev_hash_init(qcedev_areq, handle, &sg_src);
  1956. if (err) {
  1957. mutex_unlock(&hash_access_lock);
  1958. goto exit_free_qcedev_areq;
  1959. }
  1960. mutex_unlock(&hash_access_lock);
  1961. if (copy_to_user((void __user *)arg, &qcedev_areq->sha_op_req,
  1962. sizeof(struct qcedev_sha_op_req))) {
  1963. err = -EFAULT;
  1964. goto exit_free_qcedev_areq;
  1965. }
  1966. handle->sha_ctxt.init_done = true;
  1967. }
  1968. break;
  1969. case QCEDEV_IOCTL_GET_CMAC_REQ:
  1970. if (!podev->ce_support.cmac) {
  1971. err = -ENOTTY;
  1972. goto exit_free_qcedev_areq;
  1973. }
  1974. fallthrough;
  1975. case QCEDEV_IOCTL_SHA_UPDATE_REQ:
  1976. {
  1977. struct scatterlist sg_src;
  1978. if (copy_from_user(&qcedev_areq->sha_op_req,
  1979. (void __user *)arg,
  1980. sizeof(struct qcedev_sha_op_req))) {
  1981. err = -EFAULT;
  1982. goto exit_free_qcedev_areq;
  1983. }
  1984. mutex_lock(&hash_access_lock);
  1985. if (qcedev_check_sha_params(&qcedev_areq->sha_op_req, podev)) {
  1986. mutex_unlock(&hash_access_lock);
  1987. err = -EINVAL;
  1988. goto exit_free_qcedev_areq;
  1989. }
  1990. qcedev_areq->op_type = QCEDEV_CRYPTO_OPER_SHA;
  1991. if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_AES_CMAC) {
  1992. err = qcedev_hash_cmac(qcedev_areq, handle, &sg_src);
  1993. if (err) {
  1994. mutex_unlock(&hash_access_lock);
  1995. goto exit_free_qcedev_areq;
  1996. }
  1997. } else {
  1998. if (!handle->sha_ctxt.init_done) {
  1999. pr_err("%s Init was not called\n", __func__);
  2000. mutex_unlock(&hash_access_lock);
  2001. err = -EINVAL;
  2002. goto exit_free_qcedev_areq;
  2003. }
  2004. err = qcedev_hash_update(qcedev_areq, handle, &sg_src);
  2005. if (err) {
  2006. mutex_unlock(&hash_access_lock);
  2007. goto exit_free_qcedev_areq;
  2008. }
  2009. }
  2010. if (handle->sha_ctxt.diglen > QCEDEV_MAX_SHA_DIGEST) {
  2011. pr_err("Invalid sha_ctxt.diglen %d\n",
  2012. handle->sha_ctxt.diglen);
  2013. mutex_unlock(&hash_access_lock);
  2014. err = -EINVAL;
  2015. goto exit_free_qcedev_areq;
  2016. }
  2017. memcpy(&qcedev_areq->sha_op_req.digest[0],
  2018. &handle->sha_ctxt.digest[0],
  2019. handle->sha_ctxt.diglen);
  2020. mutex_unlock(&hash_access_lock);
  2021. if (copy_to_user((void __user *)arg, &qcedev_areq->sha_op_req,
  2022. sizeof(struct qcedev_sha_op_req))) {
  2023. err = -EFAULT;
  2024. goto exit_free_qcedev_areq;
  2025. }
  2026. }
  2027. break;
  2028. case QCEDEV_IOCTL_SHA_FINAL_REQ:
  2029. if (!handle->sha_ctxt.init_done) {
  2030. pr_err("%s Init was not called\n", __func__);
  2031. err = -EINVAL;
  2032. goto exit_free_qcedev_areq;
  2033. }
  2034. if (copy_from_user(&qcedev_areq->sha_op_req,
  2035. (void __user *)arg,
  2036. sizeof(struct qcedev_sha_op_req))) {
  2037. err = -EFAULT;
  2038. goto exit_free_qcedev_areq;
  2039. }
  2040. mutex_lock(&hash_access_lock);
  2041. if (qcedev_check_sha_params(&qcedev_areq->sha_op_req, podev)) {
  2042. mutex_unlock(&hash_access_lock);
  2043. err = -EINVAL;
  2044. goto exit_free_qcedev_areq;
  2045. }
  2046. qcedev_areq->op_type = QCEDEV_CRYPTO_OPER_SHA;
  2047. err = qcedev_hash_final(qcedev_areq, handle);
  2048. if (err) {
  2049. mutex_unlock(&hash_access_lock);
  2050. goto exit_free_qcedev_areq;
  2051. }
  2052. if (handle->sha_ctxt.diglen > QCEDEV_MAX_SHA_DIGEST) {
  2053. pr_err("Invalid sha_ctxt.diglen %d\n",
  2054. handle->sha_ctxt.diglen);
  2055. mutex_unlock(&hash_access_lock);
  2056. err = -EINVAL;
  2057. goto exit_free_qcedev_areq;
  2058. }
  2059. qcedev_areq->sha_op_req.diglen = handle->sha_ctxt.diglen;
  2060. memcpy(&qcedev_areq->sha_op_req.digest[0],
  2061. &handle->sha_ctxt.digest[0],
  2062. handle->sha_ctxt.diglen);
  2063. mutex_unlock(&hash_access_lock);
  2064. if (copy_to_user((void __user *)arg, &qcedev_areq->sha_op_req,
  2065. sizeof(struct qcedev_sha_op_req))) {
  2066. err = -EFAULT;
  2067. goto exit_free_qcedev_areq;
  2068. }
  2069. handle->sha_ctxt.init_done = false;
  2070. break;
  2071. case QCEDEV_IOCTL_GET_SHA_REQ:
  2072. {
  2073. struct scatterlist sg_src;
  2074. if (copy_from_user(&qcedev_areq->sha_op_req,
  2075. (void __user *)arg,
  2076. sizeof(struct qcedev_sha_op_req))) {
  2077. err = -EFAULT;
  2078. goto exit_free_qcedev_areq;
  2079. }
  2080. mutex_lock(&hash_access_lock);
  2081. if (qcedev_check_sha_params(&qcedev_areq->sha_op_req, podev)) {
  2082. mutex_unlock(&hash_access_lock);
  2083. err = -EINVAL;
  2084. goto exit_free_qcedev_areq;
  2085. }
  2086. qcedev_areq->op_type = QCEDEV_CRYPTO_OPER_SHA;
  2087. qcedev_hash_init(qcedev_areq, handle, &sg_src);
  2088. err = qcedev_hash_update(qcedev_areq, handle, &sg_src);
  2089. if (err) {
  2090. mutex_unlock(&hash_access_lock);
  2091. goto exit_free_qcedev_areq;
  2092. }
  2093. err = qcedev_hash_final(qcedev_areq, handle);
  2094. if (err) {
  2095. mutex_unlock(&hash_access_lock);
  2096. goto exit_free_qcedev_areq;
  2097. }
  2098. if (handle->sha_ctxt.diglen > QCEDEV_MAX_SHA_DIGEST) {
  2099. pr_err("Invalid sha_ctxt.diglen %d\n",
  2100. handle->sha_ctxt.diglen);
  2101. mutex_unlock(&hash_access_lock);
  2102. err = -EINVAL;
  2103. goto exit_free_qcedev_areq;
  2104. }
  2105. qcedev_areq->sha_op_req.diglen = handle->sha_ctxt.diglen;
  2106. memcpy(&qcedev_areq->sha_op_req.digest[0],
  2107. &handle->sha_ctxt.digest[0],
  2108. handle->sha_ctxt.diglen);
  2109. mutex_unlock(&hash_access_lock);
  2110. if (copy_to_user((void __user *)arg, &qcedev_areq->sha_op_req,
  2111. sizeof(struct qcedev_sha_op_req))) {
  2112. err = -EFAULT;
  2113. goto exit_free_qcedev_areq;
  2114. }
  2115. }
  2116. break;
  2117. case QCEDEV_IOCTL_MAP_BUF_REQ:
  2118. {
  2119. unsigned long long vaddr = 0;
  2120. struct qcedev_map_buf_req map_buf = { {0} };
  2121. int i = 0;
  2122. if (copy_from_user(&map_buf,
  2123. (void __user *)arg, sizeof(map_buf))) {
  2124. err = -EFAULT;
  2125. goto exit_free_qcedev_areq;
  2126. }
  2127. if (map_buf.num_fds > ARRAY_SIZE(map_buf.fd)) {
  2128. pr_err("%s: err: num_fds = %d exceeds max value\n",
  2129. __func__, map_buf.num_fds);
  2130. err = -EINVAL;
  2131. goto exit_free_qcedev_areq;
  2132. }
  2133. for (i = 0; i < map_buf.num_fds; i++) {
  2134. err = qcedev_check_and_map_buffer(handle,
  2135. map_buf.fd[i],
  2136. map_buf.fd_offset[i],
  2137. map_buf.fd_size[i],
  2138. &vaddr);
  2139. if (err) {
  2140. pr_err(
  2141. "%s: err: failed to map fd(%d) - %d\n",
  2142. __func__, map_buf.fd[i], err);
  2143. goto exit_free_qcedev_areq;
  2144. }
  2145. map_buf.buf_vaddr[i] = vaddr;
  2146. pr_info("%s: info: vaddr = %llx\n, fd = %d",
  2147. __func__, vaddr, map_buf.fd[i]);
  2148. }
  2149. if (copy_to_user((void __user *)arg, &map_buf,
  2150. sizeof(map_buf))) {
  2151. err = -EFAULT;
  2152. goto exit_free_qcedev_areq;
  2153. }
  2154. break;
  2155. }
  2156. case QCEDEV_IOCTL_UNMAP_BUF_REQ:
  2157. {
  2158. struct qcedev_unmap_buf_req unmap_buf = { { 0 } };
  2159. int i = 0;
  2160. if (copy_from_user(&unmap_buf,
  2161. (void __user *)arg, sizeof(unmap_buf))) {
  2162. err = -EFAULT;
  2163. goto exit_free_qcedev_areq;
  2164. }
  2165. if (unmap_buf.num_fds > ARRAY_SIZE(unmap_buf.fd)) {
  2166. pr_err("%s: err: num_fds = %d exceeds max value\n",
  2167. __func__, unmap_buf.num_fds);
  2168. err = -EINVAL;
  2169. goto exit_free_qcedev_areq;
  2170. }
  2171. for (i = 0; i < unmap_buf.num_fds; i++) {
  2172. err = qcedev_check_and_unmap_buffer(handle,
  2173. unmap_buf.fd[i]);
  2174. if (err) {
  2175. pr_err(
  2176. "%s: err: failed to unmap fd(%d) - %d\n",
  2177. __func__,
  2178. unmap_buf.fd[i], err);
  2179. goto exit_free_qcedev_areq;
  2180. }
  2181. }
  2182. break;
  2183. }
  2184. default:
  2185. err = -ENOTTY;
  2186. goto exit_free_qcedev_areq;
  2187. }
  2188. exit_free_qcedev_areq:
  2189. kfree(qcedev_areq);
  2190. return err;
  2191. }
  2192. static int qcedev_probe_device(struct platform_device *pdev)
  2193. {
  2194. void *handle = NULL;
  2195. int rc = 0;
  2196. struct qcedev_control *podev;
  2197. struct msm_ce_hw_support *platform_support;
  2198. podev = &qce_dev[0];
  2199. rc = alloc_chrdev_region(&qcedev_device_no, 0, 1, QCEDEV_DEV);
  2200. if (rc < 0) {
  2201. pr_err("alloc_chrdev_region failed %d\n", rc);
  2202. return rc;
  2203. }
  2204. driver_class = class_create(THIS_MODULE, QCEDEV_DEV);
  2205. if (IS_ERR(driver_class)) {
  2206. rc = -ENOMEM;
  2207. pr_err("class_create failed %d\n", rc);
  2208. goto exit_unreg_chrdev_region;
  2209. }
  2210. class_dev = device_create(driver_class, NULL, qcedev_device_no, NULL,
  2211. QCEDEV_DEV);
  2212. if (IS_ERR(class_dev)) {
  2213. pr_err("class_device_create failed %d\n", rc);
  2214. rc = -ENOMEM;
  2215. goto exit_destroy_class;
  2216. }
  2217. cdev_init(&podev->cdev, &qcedev_fops);
  2218. podev->cdev.owner = THIS_MODULE;
  2219. rc = cdev_add(&podev->cdev, MKDEV(MAJOR(qcedev_device_no), 0), 1);
  2220. if (rc < 0) {
  2221. pr_err("cdev_add failed %d\n", rc);
  2222. goto exit_destroy_device;
  2223. }
  2224. podev->minor = 0;
  2225. podev->high_bw_req_count = 0;
  2226. INIT_LIST_HEAD(&podev->ready_commands);
  2227. podev->active_command = NULL;
  2228. INIT_LIST_HEAD(&podev->context_banks);
  2229. spin_lock_init(&podev->lock);
  2230. tasklet_init(&podev->done_tasklet, req_done, (unsigned long)podev);
  2231. podev->icc_path = of_icc_get(&pdev->dev, "data_path");
  2232. if (IS_ERR(podev->icc_path)) {
  2233. rc = PTR_ERR(podev->icc_path);
  2234. pr_err("%s Failed to get icc path with error %d\n",
  2235. __func__, rc);
  2236. goto exit_del_cdev;
  2237. }
  2238. /*
  2239. * HLOS crypto vote values from DTSI. If no values specified, use
  2240. * nominal values.
  2241. */
  2242. if (of_property_read_u32((&pdev->dev)->of_node,
  2243. "qcom,icc_avg_bw",
  2244. &podev->icc_avg_bw)) {
  2245. pr_warn("%s: No icc avg BW set, using default\n", __func__);
  2246. podev->icc_avg_bw = CRYPTO_AVG_BW;
  2247. }
  2248. if (of_property_read_u32((&pdev->dev)->of_node,
  2249. "qcom,icc_peak_bw",
  2250. &podev->icc_peak_bw)) {
  2251. pr_warn("%s: No icc peak BW set, using default\n", __func__);
  2252. podev->icc_peak_bw = CRYPTO_PEAK_BW;
  2253. }
  2254. rc = icc_set_bw(podev->icc_path, podev->icc_avg_bw,
  2255. podev->icc_peak_bw);
  2256. if (rc) {
  2257. pr_err("%s Unable to set high bandwidth\n", __func__);
  2258. goto exit_unregister_bus_scale;
  2259. }
  2260. handle = qce_open(pdev, &rc);
  2261. if (handle == NULL) {
  2262. rc = -ENODEV;
  2263. goto exit_scale_busbandwidth;
  2264. }
  2265. podev->qce = handle;
  2266. rc = qce_set_irqs(podev->qce, false);
  2267. if (rc) {
  2268. pr_err("%s: could not disable bam irqs, ret = %d",
  2269. __func__, rc);
  2270. goto exit_scale_busbandwidth;
  2271. }
  2272. rc = icc_set_bw(podev->icc_path, 0, 0);
  2273. if (rc) {
  2274. pr_err("%s Unable to set to low bandwidth\n", __func__);
  2275. goto exit_qce_close;
  2276. }
  2277. podev->pdev = pdev;
  2278. platform_set_drvdata(pdev, podev);
  2279. qce_hw_support(podev->qce, &podev->ce_support);
  2280. if (podev->ce_support.bam) {
  2281. podev->platform_support.ce_shared = 0;
  2282. podev->platform_support.shared_ce_resource = 0;
  2283. podev->platform_support.hw_key_support =
  2284. podev->ce_support.hw_key;
  2285. podev->platform_support.sha_hmac = 1;
  2286. } else {
  2287. platform_support =
  2288. (struct msm_ce_hw_support *)pdev->dev.platform_data;
  2289. podev->platform_support.ce_shared = platform_support->ce_shared;
  2290. podev->platform_support.shared_ce_resource =
  2291. platform_support->shared_ce_resource;
  2292. podev->platform_support.hw_key_support =
  2293. platform_support->hw_key_support;
  2294. podev->platform_support.sha_hmac = platform_support->sha_hmac;
  2295. }
  2296. podev->mem_client = qcedev_mem_new_client(MEM_ION);
  2297. if (!podev->mem_client) {
  2298. pr_err("%s: err: qcedev_mem_new_client failed\n", __func__);
  2299. goto exit_qce_close;
  2300. }
  2301. rc = of_platform_populate(pdev->dev.of_node, qcedev_match,
  2302. NULL, &pdev->dev);
  2303. if (rc) {
  2304. pr_err("%s: err: of_platform_populate failed: %d\n",
  2305. __func__, rc);
  2306. goto exit_mem_new_client;
  2307. }
  2308. return 0;
  2309. exit_mem_new_client:
  2310. if (podev->mem_client)
  2311. qcedev_mem_delete_client(podev->mem_client);
  2312. podev->mem_client = NULL;
  2313. exit_qce_close:
  2314. if (handle)
  2315. qce_close(handle);
  2316. exit_scale_busbandwidth:
  2317. icc_set_bw(podev->icc_path, 0, 0);
  2318. exit_unregister_bus_scale:
  2319. if (podev->icc_path)
  2320. icc_put(podev->icc_path);
  2321. exit_del_cdev:
  2322. cdev_del(&podev->cdev);
  2323. exit_destroy_device:
  2324. device_destroy(driver_class, qcedev_device_no);
  2325. exit_destroy_class:
  2326. class_destroy(driver_class);
  2327. exit_unreg_chrdev_region:
  2328. unregister_chrdev_region(qcedev_device_no, 1);
  2329. podev->icc_path = NULL;
  2330. platform_set_drvdata(pdev, NULL);
  2331. podev->pdev = NULL;
  2332. podev->qce = NULL;
  2333. return rc;
  2334. }
  2335. static int qcedev_probe(struct platform_device *pdev)
  2336. {
  2337. if (of_device_is_compatible(pdev->dev.of_node, "qcom,qcedev"))
  2338. return qcedev_probe_device(pdev);
  2339. else if (of_device_is_compatible(pdev->dev.of_node,
  2340. "qcom,qcedev,context-bank"))
  2341. return qcedev_parse_context_bank(pdev);
  2342. return -EINVAL;
  2343. };
  2344. static int qcedev_remove(struct platform_device *pdev)
  2345. {
  2346. struct qcedev_control *podev;
  2347. podev = platform_get_drvdata(pdev);
  2348. if (!podev)
  2349. return 0;
  2350. qcedev_ce_high_bw_req(podev, true);
  2351. if (podev->qce)
  2352. qce_close(podev->qce);
  2353. qcedev_ce_high_bw_req(podev, false);
  2354. if (podev->icc_path)
  2355. icc_put(podev->icc_path);
  2356. tasklet_kill(&podev->done_tasklet);
  2357. cdev_del(&podev->cdev);
  2358. device_destroy(driver_class, qcedev_device_no);
  2359. class_destroy(driver_class);
  2360. unregister_chrdev_region(qcedev_device_no, 1);
  2361. return 0;
  2362. };
  2363. static int qcedev_suspend(struct platform_device *pdev, pm_message_t state)
  2364. {
  2365. struct qcedev_control *podev;
  2366. int ret;
  2367. podev = platform_get_drvdata(pdev);
  2368. if (!podev)
  2369. return 0;
  2370. mutex_lock(&qcedev_sent_bw_req);
  2371. if (podev->high_bw_req_count) {
  2372. ret = qce_set_irqs(podev->qce, false);
  2373. if (ret) {
  2374. pr_err("%s: could not disable bam irqs, ret = %d",
  2375. __func__, ret);
  2376. goto suspend_exit;
  2377. }
  2378. ret = qcedev_control_clocks(podev, false);
  2379. if (ret)
  2380. goto suspend_exit;
  2381. }
  2382. suspend_exit:
  2383. mutex_unlock(&qcedev_sent_bw_req);
  2384. return 0;
  2385. }
  2386. static int qcedev_resume(struct platform_device *pdev)
  2387. {
  2388. struct qcedev_control *podev;
  2389. int ret;
  2390. podev = platform_get_drvdata(pdev);
  2391. if (!podev)
  2392. return 0;
  2393. mutex_lock(&qcedev_sent_bw_req);
  2394. if (podev->high_bw_req_count) {
  2395. ret = qcedev_control_clocks(podev, true);
  2396. if (ret)
  2397. goto resume_exit;
  2398. ret = qce_set_irqs(podev->qce, true);
  2399. if (ret) {
  2400. pr_err("%s: could not enable bam irqs, ret = %d",
  2401. __func__, ret);
  2402. qcedev_control_clocks(podev, false);
  2403. }
  2404. }
  2405. resume_exit:
  2406. mutex_unlock(&qcedev_sent_bw_req);
  2407. return 0;
  2408. }
  2409. static struct platform_driver qcedev_plat_driver = {
  2410. .probe = qcedev_probe,
  2411. .remove = qcedev_remove,
  2412. .suspend = qcedev_suspend,
  2413. .resume = qcedev_resume,
  2414. .driver = {
  2415. .name = "qce",
  2416. .of_match_table = qcedev_match,
  2417. },
  2418. };
  2419. static int _disp_stats(int id)
  2420. {
  2421. struct qcedev_stat *pstat;
  2422. int len = 0;
  2423. pstat = &_qcedev_stat;
  2424. len = scnprintf(_debug_read_buf, DEBUG_MAX_RW_BUF - 1,
  2425. "\nQTI QCE dev driver %d Statistics:\n",
  2426. id + 1);
  2427. len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
  2428. " Encryption operation success : %d\n",
  2429. pstat->qcedev_enc_success);
  2430. len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
  2431. " Encryption operation fail : %d\n",
  2432. pstat->qcedev_enc_fail);
  2433. len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
  2434. " Decryption operation success : %d\n",
  2435. pstat->qcedev_dec_success);
  2436. len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
  2437. " Encryption operation fail : %d\n",
  2438. pstat->qcedev_dec_fail);
  2439. return len;
  2440. }
  2441. static ssize_t _debug_stats_read(struct file *file, char __user *buf,
  2442. size_t count, loff_t *ppos)
  2443. {
  2444. ssize_t rc = -EINVAL;
  2445. int qcedev = *((int *) file->private_data);
  2446. int len;
  2447. len = _disp_stats(qcedev);
  2448. if (len <= count)
  2449. rc = simple_read_from_buffer((void __user *) buf, len,
  2450. ppos, (void *) _debug_read_buf, len);
  2451. return rc;
  2452. }
  2453. static ssize_t _debug_stats_write(struct file *file, const char __user *buf,
  2454. size_t count, loff_t *ppos)
  2455. {
  2456. memset((char *)&_qcedev_stat, 0, sizeof(struct qcedev_stat));
  2457. return count;
  2458. };
  2459. static const struct file_operations _debug_stats_ops = {
  2460. .open = simple_open,
  2461. .read = _debug_stats_read,
  2462. .write = _debug_stats_write,
  2463. };
  2464. static int _qcedev_debug_init(void)
  2465. {
  2466. int rc;
  2467. char name[DEBUG_MAX_FNAME];
  2468. struct dentry *dent;
  2469. _debug_dent = debugfs_create_dir("qcedev", NULL);
  2470. if (IS_ERR(_debug_dent)) {
  2471. pr_debug("qcedev debugfs_create_dir fail, error %ld\n",
  2472. PTR_ERR(_debug_dent));
  2473. return PTR_ERR(_debug_dent);
  2474. }
  2475. snprintf(name, DEBUG_MAX_FNAME-1, "stats-%d", 1);
  2476. _debug_qcedev = 0;
  2477. dent = debugfs_create_file(name, 0644, _debug_dent,
  2478. &_debug_qcedev, &_debug_stats_ops);
  2479. if (dent == NULL) {
  2480. pr_debug("qcedev debugfs_create_file fail, error %ld\n",
  2481. PTR_ERR(dent));
  2482. rc = PTR_ERR(dent);
  2483. goto err;
  2484. }
  2485. return 0;
  2486. err:
  2487. debugfs_remove_recursive(_debug_dent);
  2488. return rc;
  2489. }
  2490. static int qcedev_init(void)
  2491. {
  2492. _qcedev_debug_init();
  2493. return platform_driver_register(&qcedev_plat_driver);
  2494. }
  2495. static void qcedev_exit(void)
  2496. {
  2497. debugfs_remove_recursive(_debug_dent);
  2498. platform_driver_unregister(&qcedev_plat_driver);
  2499. }
  2500. MODULE_LICENSE("GPL v2");
  2501. MODULE_DESCRIPTION("QTI DEV Crypto driver");
  2502. MODULE_IMPORT_NS(DMA_BUF);
  2503. module_init(qcedev_init);
  2504. module_exit(qcedev_exit);