qcedev.c 72 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * QTI CE device driver.
  4. *
  5. * Copyright (c) 2010-2021, The Linux Foundation. All rights reserved.
  6. */
  7. #include <linux/mman.h>
  8. #include <linux/module.h>
  9. #include <linux/device.h>
  10. #include <linux/types.h>
  11. #include <linux/platform_device.h>
  12. #include <linux/dma-mapping.h>
  13. #include <linux/kernel.h>
  14. #include <linux/dmapool.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/spinlock.h>
  17. #include <linux/init.h>
  18. #include <linux/module.h>
  19. #include <linux/fs.h>
  20. #include <linux/uaccess.h>
  21. #include <linux/debugfs.h>
  22. #include <linux/scatterlist.h>
  23. #include <linux/crypto.h>
  24. #include "linux/platform_data/qcom_crypto_device.h"
  25. #include "linux/qcedev.h"
  26. #include <linux/interconnect.h>
  27. #include <linux/delay.h>
  28. #include <crypto/hash.h>
  29. #include "qcedevi.h"
  30. #include "qce.h"
  31. #include "qcedev_smmu.h"
  32. #include "compat_qcedev.h"
  33. #include <linux/compat.h>
  34. #define CACHE_LINE_SIZE 64
  35. #define CE_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
  36. #define MAX_CEHW_REQ_TRANSFER_SIZE (128*32*1024)
  37. /*
  38. * Max wait time once a crypto request is done.
  39. * Assuming 5ms per crypto operation, this is calculated for
  40. * the scenario of having 3 offload reqs + 1 tz req + buffer.
  41. */
  42. #define MAX_CRYPTO_WAIT_TIME 25
  43. #define MAX_REQUEST_TIME 5000
  44. enum qcedev_req_status {
  45. QCEDEV_REQ_CURRENT = 0,
  46. QCEDEV_REQ_WAITING = 1,
  47. QCEDEV_REQ_SUBMITTED = 2,
  48. QCEDEV_REQ_DONE = 3,
  49. };
  50. static uint8_t _std_init_vector_sha1_uint8[] = {
  51. 0x67, 0x45, 0x23, 0x01, 0xEF, 0xCD, 0xAB, 0x89,
  52. 0x98, 0xBA, 0xDC, 0xFE, 0x10, 0x32, 0x54, 0x76,
  53. 0xC3, 0xD2, 0xE1, 0xF0
  54. };
  55. /* standard initialization vector for SHA-256, source: FIPS 180-2 */
  56. static uint8_t _std_init_vector_sha256_uint8[] = {
  57. 0x6A, 0x09, 0xE6, 0x67, 0xBB, 0x67, 0xAE, 0x85,
  58. 0x3C, 0x6E, 0xF3, 0x72, 0xA5, 0x4F, 0xF5, 0x3A,
  59. 0x51, 0x0E, 0x52, 0x7F, 0x9B, 0x05, 0x68, 0x8C,
  60. 0x1F, 0x83, 0xD9, 0xAB, 0x5B, 0xE0, 0xCD, 0x19
  61. };
  62. #define QCEDEV_CTX_KEY_MASK 0x000000ff
  63. #define QCEDEV_CTX_USE_HW_KEY 0x00000001
  64. #define QCEDEV_CTX_USE_PIPE_KEY 0x00000002
  65. static DEFINE_MUTEX(send_cmd_lock);
  66. static DEFINE_MUTEX(qcedev_sent_bw_req);
  67. static DEFINE_MUTEX(hash_access_lock);
  68. static dev_t qcedev_device_no;
  69. static struct class *driver_class;
  70. static struct device *class_dev;
  71. static const struct of_device_id qcedev_match[] = {
  72. { .compatible = "qcom,qcedev"},
  73. { .compatible = "qcom,qcedev,context-bank"},
  74. {}
  75. };
  76. MODULE_DEVICE_TABLE(of, qcedev_match);
  77. static int qcedev_control_clocks(struct qcedev_control *podev, bool enable)
  78. {
  79. unsigned int control_flag;
  80. int ret = 0;
  81. if (podev->ce_support.req_bw_before_clk) {
  82. if (enable)
  83. control_flag = QCE_BW_REQUEST_FIRST;
  84. else
  85. control_flag = QCE_CLK_DISABLE_FIRST;
  86. } else {
  87. if (enable)
  88. control_flag = QCE_CLK_ENABLE_FIRST;
  89. else
  90. control_flag = QCE_BW_REQUEST_RESET_FIRST;
  91. }
  92. switch (control_flag) {
  93. case QCE_CLK_ENABLE_FIRST:
  94. ret = qce_enable_clk(podev->qce);
  95. if (ret) {
  96. pr_err("%s Unable enable clk\n", __func__);
  97. return ret;
  98. }
  99. ret = icc_set_bw(podev->icc_path,
  100. podev->icc_avg_bw, podev->icc_peak_bw);
  101. if (ret) {
  102. pr_err("%s Unable to set high bw\n", __func__);
  103. ret = qce_disable_clk(podev->qce);
  104. if (ret)
  105. pr_err("%s Unable disable clk\n", __func__);
  106. return ret;
  107. }
  108. break;
  109. case QCE_BW_REQUEST_FIRST:
  110. ret = icc_set_bw(podev->icc_path,
  111. podev->icc_avg_bw, podev->icc_peak_bw);
  112. if (ret) {
  113. pr_err("%s Unable to set high bw\n", __func__);
  114. return ret;
  115. }
  116. ret = qce_enable_clk(podev->qce);
  117. if (ret) {
  118. pr_err("%s Unable enable clk\n", __func__);
  119. ret = icc_set_bw(podev->icc_path, 0, 0);
  120. if (ret)
  121. pr_err("%s Unable to set low bw\n", __func__);
  122. return ret;
  123. }
  124. break;
  125. case QCE_CLK_DISABLE_FIRST:
  126. ret = qce_disable_clk(podev->qce);
  127. if (ret) {
  128. pr_err("%s Unable to disable clk\n", __func__);
  129. return ret;
  130. }
  131. ret = icc_set_bw(podev->icc_path, 0, 0);
  132. if (ret) {
  133. pr_err("%s Unable to set low bw\n", __func__);
  134. ret = qce_enable_clk(podev->qce);
  135. if (ret)
  136. pr_err("%s Unable enable clk\n", __func__);
  137. return ret;
  138. }
  139. break;
  140. case QCE_BW_REQUEST_RESET_FIRST:
  141. ret = icc_set_bw(podev->icc_path, 0, 0);
  142. if (ret) {
  143. pr_err("%s Unable to set low bw\n", __func__);
  144. return ret;
  145. }
  146. ret = qce_disable_clk(podev->qce);
  147. if (ret) {
  148. pr_err("%s Unable to disable clk\n", __func__);
  149. ret = icc_set_bw(podev->icc_path,
  150. podev->icc_avg_bw, podev->icc_peak_bw);
  151. if (ret)
  152. pr_err("%s Unable to set high bw\n", __func__);
  153. return ret;
  154. }
  155. break;
  156. default:
  157. return -ENOENT;
  158. }
  159. return 0;
  160. }
  161. static void qcedev_ce_high_bw_req(struct qcedev_control *podev,
  162. bool high_bw_req)
  163. {
  164. int ret = 0;
  165. mutex_lock(&qcedev_sent_bw_req);
  166. if (high_bw_req) {
  167. if (podev->high_bw_req_count == 0) {
  168. ret = qcedev_control_clocks(podev, true);
  169. if (ret)
  170. goto exit_unlock_mutex;
  171. }
  172. podev->high_bw_req_count++;
  173. } else {
  174. if (podev->high_bw_req_count == 1) {
  175. ret = qcedev_control_clocks(podev, false);
  176. if (ret)
  177. goto exit_unlock_mutex;
  178. }
  179. podev->high_bw_req_count--;
  180. }
  181. exit_unlock_mutex:
  182. mutex_unlock(&qcedev_sent_bw_req);
  183. }
  184. #define QCEDEV_MAGIC 0x56434544 /* "qced" */
  185. static int qcedev_open(struct inode *inode, struct file *file);
  186. static int qcedev_release(struct inode *inode, struct file *file);
  187. static int start_cipher_req(struct qcedev_control *podev,
  188. int *current_req_info);
  189. static int start_offload_cipher_req(struct qcedev_control *podev,
  190. int *current_req_info);
  191. static int start_sha_req(struct qcedev_control *podev,
  192. int *current_req_info);
  193. static const struct file_operations qcedev_fops = {
  194. .owner = THIS_MODULE,
  195. .unlocked_ioctl = qcedev_ioctl,
  196. #ifdef CONFIG_COMPAT
  197. .compat_ioctl = compat_qcedev_ioctl,
  198. #endif
  199. .open = qcedev_open,
  200. .release = qcedev_release,
  201. };
  202. static struct qcedev_control qce_dev[] = {
  203. {
  204. .magic = QCEDEV_MAGIC,
  205. },
  206. };
  207. #define MAX_QCE_DEVICE ARRAY_SIZE(qce_dev)
  208. #define DEBUG_MAX_FNAME 16
  209. #define DEBUG_MAX_RW_BUF 1024
  210. struct qcedev_stat {
  211. u32 qcedev_dec_success;
  212. u32 qcedev_dec_fail;
  213. u32 qcedev_enc_success;
  214. u32 qcedev_enc_fail;
  215. u32 qcedev_sha_success;
  216. u32 qcedev_sha_fail;
  217. };
  218. static struct qcedev_stat _qcedev_stat;
  219. static struct dentry *_debug_dent;
  220. static char _debug_read_buf[DEBUG_MAX_RW_BUF];
  221. static int _debug_qcedev;
  222. static struct qcedev_control *qcedev_minor_to_control(unsigned int n)
  223. {
  224. int i;
  225. for (i = 0; i < MAX_QCE_DEVICE; i++) {
  226. if (qce_dev[i].minor == n)
  227. return &qce_dev[n];
  228. }
  229. return NULL;
  230. }
  231. static int qcedev_open(struct inode *inode, struct file *file)
  232. {
  233. struct qcedev_handle *handle;
  234. struct qcedev_control *podev;
  235. podev = qcedev_minor_to_control(MINOR(inode->i_rdev));
  236. if (podev == NULL) {
  237. pr_err("%s: no such device %d\n", __func__,
  238. MINOR(inode->i_rdev));
  239. return -ENOENT;
  240. }
  241. handle = kzalloc(sizeof(struct qcedev_handle), GFP_KERNEL);
  242. if (handle == NULL)
  243. return -ENOMEM;
  244. handle->cntl = podev;
  245. file->private_data = handle;
  246. qcedev_ce_high_bw_req(podev, true);
  247. mutex_init(&handle->registeredbufs.lock);
  248. INIT_LIST_HEAD(&handle->registeredbufs.list);
  249. return 0;
  250. }
  251. static int qcedev_release(struct inode *inode, struct file *file)
  252. {
  253. struct qcedev_control *podev;
  254. struct qcedev_handle *handle;
  255. handle = file->private_data;
  256. podev = handle->cntl;
  257. if (podev != NULL && podev->magic != QCEDEV_MAGIC) {
  258. pr_err("%s: invalid handle %pK\n",
  259. __func__, podev);
  260. }
  261. qcedev_ce_high_bw_req(podev, false);
  262. if (qcedev_unmap_all_buffers(handle))
  263. pr_err("%s: failed to unmap all ion buffers\n", __func__);
  264. kfree_sensitive(handle);
  265. file->private_data = NULL;
  266. return 0;
  267. }
  268. static void req_done(unsigned long data)
  269. {
  270. struct qcedev_control *podev = (struct qcedev_control *)data;
  271. struct qcedev_async_req *areq;
  272. unsigned long flags = 0;
  273. struct qcedev_async_req *new_req = NULL;
  274. spin_lock_irqsave(&podev->lock, flags);
  275. areq = podev->active_command;
  276. podev->active_command = NULL;
  277. if (areq) {
  278. if (!areq->timed_out)
  279. complete(&areq->complete);
  280. areq->state = QCEDEV_REQ_DONE;
  281. }
  282. /* Look through queued requests and wake up the corresponding thread */
  283. if (!list_empty(&podev->ready_commands)) {
  284. new_req = container_of(podev->ready_commands.next,
  285. struct qcedev_async_req, list);
  286. list_del(&new_req->list);
  287. new_req->state = QCEDEV_REQ_CURRENT;
  288. wake_up_interruptible(&new_req->wait_q);
  289. }
  290. spin_unlock_irqrestore(&podev->lock, flags);
  291. }
  292. void qcedev_sha_req_cb(void *cookie, unsigned char *digest,
  293. unsigned char *authdata, int ret)
  294. {
  295. struct qcedev_sha_req *areq;
  296. struct qcedev_control *pdev;
  297. struct qcedev_handle *handle;
  298. uint32_t *auth32 = (uint32_t *)authdata;
  299. areq = (struct qcedev_sha_req *) cookie;
  300. if (!areq || !areq->cookie)
  301. return;
  302. handle = (struct qcedev_handle *) areq->cookie;
  303. pdev = handle->cntl;
  304. if (!pdev)
  305. return;
  306. if (digest)
  307. memcpy(&handle->sha_ctxt.digest[0], digest, 32);
  308. if (authdata) {
  309. handle->sha_ctxt.auth_data[0] = auth32[0];
  310. handle->sha_ctxt.auth_data[1] = auth32[1];
  311. }
  312. tasklet_schedule(&pdev->done_tasklet);
  313. };
  314. void qcedev_cipher_req_cb(void *cookie, unsigned char *icv,
  315. unsigned char *iv, int ret)
  316. {
  317. struct qcedev_cipher_req *areq;
  318. struct qcedev_handle *handle;
  319. struct qcedev_control *podev;
  320. struct qcedev_async_req *qcedev_areq;
  321. areq = (struct qcedev_cipher_req *) cookie;
  322. if (!areq || !areq->cookie)
  323. return;
  324. handle = (struct qcedev_handle *) areq->cookie;
  325. podev = handle->cntl;
  326. if (!podev)
  327. return;
  328. qcedev_areq = podev->active_command;
  329. if (iv)
  330. memcpy(&qcedev_areq->cipher_op_req.iv[0], iv,
  331. qcedev_areq->cipher_op_req.ivlen);
  332. tasklet_schedule(&podev->done_tasklet);
  333. };
  334. static int start_cipher_req(struct qcedev_control *podev,
  335. int *current_req_info)
  336. {
  337. struct qcedev_async_req *qcedev_areq;
  338. struct qce_req creq;
  339. int ret = 0;
  340. memset(&creq, 0, sizeof(creq));
  341. /* start the command on the podev->active_command */
  342. qcedev_areq = podev->active_command;
  343. qcedev_areq->cipher_req.cookie = qcedev_areq->handle;
  344. if (qcedev_areq->cipher_op_req.use_pmem == QCEDEV_USE_PMEM) {
  345. pr_err("%s: Use of PMEM is not supported\n", __func__);
  346. goto unsupported;
  347. }
  348. creq.pmem = NULL;
  349. switch (qcedev_areq->cipher_op_req.alg) {
  350. case QCEDEV_ALG_DES:
  351. creq.alg = CIPHER_ALG_DES;
  352. break;
  353. case QCEDEV_ALG_3DES:
  354. creq.alg = CIPHER_ALG_3DES;
  355. break;
  356. case QCEDEV_ALG_AES:
  357. creq.alg = CIPHER_ALG_AES;
  358. break;
  359. default:
  360. return -EINVAL;
  361. }
  362. switch (qcedev_areq->cipher_op_req.mode) {
  363. case QCEDEV_AES_MODE_CBC:
  364. case QCEDEV_DES_MODE_CBC:
  365. creq.mode = QCE_MODE_CBC;
  366. break;
  367. case QCEDEV_AES_MODE_ECB:
  368. case QCEDEV_DES_MODE_ECB:
  369. creq.mode = QCE_MODE_ECB;
  370. break;
  371. case QCEDEV_AES_MODE_CTR:
  372. creq.mode = QCE_MODE_CTR;
  373. break;
  374. case QCEDEV_AES_MODE_XTS:
  375. creq.mode = QCE_MODE_XTS;
  376. break;
  377. default:
  378. return -EINVAL;
  379. }
  380. if ((creq.alg == CIPHER_ALG_AES) &&
  381. (creq.mode == QCE_MODE_CTR)) {
  382. creq.dir = QCE_ENCRYPT;
  383. } else {
  384. if (qcedev_areq->cipher_op_req.op == QCEDEV_OPER_ENC)
  385. creq.dir = QCE_ENCRYPT;
  386. else
  387. creq.dir = QCE_DECRYPT;
  388. }
  389. creq.iv = &qcedev_areq->cipher_op_req.iv[0];
  390. creq.ivsize = qcedev_areq->cipher_op_req.ivlen;
  391. creq.iv_ctr_size = 0;
  392. creq.enckey = &qcedev_areq->cipher_op_req.enckey[0];
  393. creq.encklen = qcedev_areq->cipher_op_req.encklen;
  394. creq.cryptlen = qcedev_areq->cipher_op_req.data_len;
  395. if (qcedev_areq->cipher_op_req.encklen == 0) {
  396. if ((qcedev_areq->cipher_op_req.op == QCEDEV_OPER_ENC_NO_KEY)
  397. || (qcedev_areq->cipher_op_req.op ==
  398. QCEDEV_OPER_DEC_NO_KEY))
  399. creq.op = QCE_REQ_ABLK_CIPHER_NO_KEY;
  400. else {
  401. int i;
  402. for (i = 0; i < QCEDEV_MAX_KEY_SIZE; i++) {
  403. if (qcedev_areq->cipher_op_req.enckey[i] != 0)
  404. break;
  405. }
  406. if ((podev->platform_support.hw_key_support == 1) &&
  407. (i == QCEDEV_MAX_KEY_SIZE))
  408. creq.op = QCE_REQ_ABLK_CIPHER;
  409. else {
  410. ret = -EINVAL;
  411. goto unsupported;
  412. }
  413. }
  414. } else {
  415. creq.op = QCE_REQ_ABLK_CIPHER;
  416. }
  417. creq.qce_cb = qcedev_cipher_req_cb;
  418. creq.areq = (void *)&qcedev_areq->cipher_req;
  419. creq.flags = 0;
  420. creq.offload_op = QCE_OFFLOAD_NONE;
  421. ret = qce_ablk_cipher_req(podev->qce, &creq);
  422. *current_req_info = creq.current_req_info;
  423. unsupported:
  424. qcedev_areq->err = ret ? -ENXIO : 0;
  425. return ret;
  426. };
  427. void qcedev_offload_cipher_req_cb(void *cookie, unsigned char *icv,
  428. unsigned char *iv, int ret)
  429. {
  430. struct qcedev_cipher_req *areq;
  431. struct qcedev_handle *handle;
  432. struct qcedev_control *podev;
  433. struct qcedev_async_req *qcedev_areq;
  434. areq = (struct qcedev_cipher_req *) cookie;
  435. if (!areq || !areq->cookie)
  436. return;
  437. handle = (struct qcedev_handle *) areq->cookie;
  438. podev = handle->cntl;
  439. if (!podev)
  440. return;
  441. qcedev_areq = podev->active_command;
  442. if (iv)
  443. memcpy(&qcedev_areq->offload_cipher_op_req.iv[0], iv,
  444. qcedev_areq->offload_cipher_op_req.ivlen);
  445. tasklet_schedule(&podev->done_tasklet);
  446. }
  447. static int start_offload_cipher_req(struct qcedev_control *podev,
  448. int *current_req_info)
  449. {
  450. struct qcedev_async_req *qcedev_areq;
  451. struct qce_req creq;
  452. u8 patt_sz = 0, proc_data_sz = 0;
  453. int ret = 0;
  454. memset(&creq, 0, sizeof(creq));
  455. /* Start the command on the podev->active_command */
  456. qcedev_areq = podev->active_command;
  457. qcedev_areq->cipher_req.cookie = qcedev_areq->handle;
  458. switch (qcedev_areq->offload_cipher_op_req.alg) {
  459. case QCEDEV_ALG_AES:
  460. creq.alg = CIPHER_ALG_AES;
  461. break;
  462. default:
  463. return -EINVAL;
  464. }
  465. switch (qcedev_areq->offload_cipher_op_req.mode) {
  466. case QCEDEV_AES_MODE_CBC:
  467. creq.mode = QCE_MODE_CBC;
  468. break;
  469. case QCEDEV_AES_MODE_CTR:
  470. creq.mode = QCE_MODE_CTR;
  471. break;
  472. default:
  473. return -EINVAL;
  474. }
  475. if (qcedev_areq->offload_cipher_op_req.is_copy_op) {
  476. creq.dir = QCE_ENCRYPT;
  477. } else {
  478. switch(qcedev_areq->offload_cipher_op_req.op) {
  479. case QCEDEV_OFFLOAD_HLOS_HLOS:
  480. case QCEDEV_OFFLOAD_HLOS_CPB:
  481. creq.dir = QCE_DECRYPT;
  482. break;
  483. case QCEDEV_OFFLOAD_CPB_HLOS:
  484. creq.dir = QCE_ENCRYPT;
  485. break;
  486. default:
  487. return -EINVAL;
  488. }
  489. }
  490. creq.iv = &qcedev_areq->offload_cipher_op_req.iv[0];
  491. creq.ivsize = qcedev_areq->offload_cipher_op_req.ivlen;
  492. creq.iv_ctr_size = qcedev_areq->offload_cipher_op_req.iv_ctr_size;
  493. creq.encklen = qcedev_areq->offload_cipher_op_req.encklen;
  494. /* OFFLOAD use cases use PIPE keys so no need to set keys */
  495. creq.flags = QCEDEV_CTX_USE_PIPE_KEY;
  496. creq.op = QCE_REQ_ABLK_CIPHER_NO_KEY;
  497. creq.offload_op = (int)qcedev_areq->offload_cipher_op_req.op;
  498. if (qcedev_areq->offload_cipher_op_req.is_copy_op)
  499. creq.is_copy_op = true;
  500. creq.cryptlen = qcedev_areq->offload_cipher_op_req.data_len;
  501. creq.qce_cb = qcedev_offload_cipher_req_cb;
  502. creq.areq = (void *)&qcedev_areq->cipher_req;
  503. patt_sz = qcedev_areq->offload_cipher_op_req.pattern_info.patt_sz;
  504. proc_data_sz =
  505. qcedev_areq->offload_cipher_op_req.pattern_info.proc_data_sz;
  506. creq.is_pattern_valid =
  507. qcedev_areq->offload_cipher_op_req.is_pattern_valid;
  508. if (creq.is_pattern_valid) {
  509. creq.pattern_info = 0x1;
  510. if (patt_sz)
  511. creq.pattern_info |= (patt_sz - 1) << 4;
  512. if (proc_data_sz)
  513. creq.pattern_info |= (proc_data_sz - 1) << 8;
  514. creq.pattern_info |=
  515. qcedev_areq->offload_cipher_op_req.pattern_info.patt_offset << 12;
  516. }
  517. creq.block_offset = qcedev_areq->offload_cipher_op_req.block_offset;
  518. ret = qce_ablk_cipher_req(podev->qce, &creq);
  519. *current_req_info = creq.current_req_info;
  520. qcedev_areq->err = ret ? -ENXIO : 0;
  521. return ret;
  522. }
  523. static int start_sha_req(struct qcedev_control *podev,
  524. int *current_req_info)
  525. {
  526. struct qcedev_async_req *qcedev_areq;
  527. struct qce_sha_req sreq;
  528. int ret = 0;
  529. struct qcedev_handle *handle;
  530. /* start the command on the podev->active_command */
  531. qcedev_areq = podev->active_command;
  532. handle = qcedev_areq->handle;
  533. switch (qcedev_areq->sha_op_req.alg) {
  534. case QCEDEV_ALG_SHA1:
  535. sreq.alg = QCE_HASH_SHA1;
  536. break;
  537. case QCEDEV_ALG_SHA256:
  538. sreq.alg = QCE_HASH_SHA256;
  539. break;
  540. case QCEDEV_ALG_SHA1_HMAC:
  541. if (podev->ce_support.sha_hmac) {
  542. sreq.alg = QCE_HASH_SHA1_HMAC;
  543. sreq.authkey = &handle->sha_ctxt.authkey[0];
  544. sreq.authklen = QCEDEV_MAX_SHA_BLOCK_SIZE;
  545. } else {
  546. sreq.alg = QCE_HASH_SHA1;
  547. sreq.authkey = NULL;
  548. }
  549. break;
  550. case QCEDEV_ALG_SHA256_HMAC:
  551. if (podev->ce_support.sha_hmac) {
  552. sreq.alg = QCE_HASH_SHA256_HMAC;
  553. sreq.authkey = &handle->sha_ctxt.authkey[0];
  554. sreq.authklen = QCEDEV_MAX_SHA_BLOCK_SIZE;
  555. } else {
  556. sreq.alg = QCE_HASH_SHA256;
  557. sreq.authkey = NULL;
  558. }
  559. break;
  560. case QCEDEV_ALG_AES_CMAC:
  561. sreq.alg = QCE_HASH_AES_CMAC;
  562. sreq.authkey = &handle->sha_ctxt.authkey[0];
  563. sreq.authklen = qcedev_areq->sha_op_req.authklen;
  564. break;
  565. default:
  566. pr_err("Algorithm %d not supported, exiting\n",
  567. qcedev_areq->sha_op_req.alg);
  568. return -EINVAL;
  569. }
  570. qcedev_areq->sha_req.cookie = handle;
  571. sreq.qce_cb = qcedev_sha_req_cb;
  572. if (qcedev_areq->sha_op_req.alg != QCEDEV_ALG_AES_CMAC) {
  573. sreq.auth_data[0] = handle->sha_ctxt.auth_data[0];
  574. sreq.auth_data[1] = handle->sha_ctxt.auth_data[1];
  575. sreq.auth_data[2] = handle->sha_ctxt.auth_data[2];
  576. sreq.auth_data[3] = handle->sha_ctxt.auth_data[3];
  577. sreq.digest = &handle->sha_ctxt.digest[0];
  578. sreq.first_blk = handle->sha_ctxt.first_blk;
  579. sreq.last_blk = handle->sha_ctxt.last_blk;
  580. }
  581. sreq.size = qcedev_areq->sha_req.sreq.nbytes;
  582. sreq.src = qcedev_areq->sha_req.sreq.src;
  583. sreq.areq = (void *)&qcedev_areq->sha_req;
  584. sreq.flags = 0;
  585. ret = qce_process_sha_req(podev->qce, &sreq);
  586. *current_req_info = sreq.current_req_info;
  587. qcedev_areq->err = ret ? -ENXIO : 0;
  588. return ret;
  589. };
  590. static void qcedev_check_crypto_status(
  591. struct qcedev_async_req *qcedev_areq, void *handle)
  592. {
  593. struct qce_error error = {0};
  594. qcedev_areq->offload_cipher_op_req.err = QCEDEV_OFFLOAD_NO_ERROR;
  595. qce_get_crypto_status(handle, &error);
  596. if (error.timer_error) {
  597. qcedev_areq->offload_cipher_op_req.err =
  598. QCEDEV_OFFLOAD_KEY_TIMER_EXPIRED_ERROR;
  599. } else if (error.key_paused) {
  600. qcedev_areq->offload_cipher_op_req.err =
  601. QCEDEV_OFFLOAD_KEY_PAUSE_ERROR;
  602. } else if (error.generic_error) {
  603. qcedev_areq->offload_cipher_op_req.err =
  604. QCEDEV_OFFLOAD_GENERIC_ERROR;
  605. }
  606. return;
  607. }
  608. #define MAX_RETRIES 333
  609. static int submit_req(struct qcedev_async_req *qcedev_areq,
  610. struct qcedev_handle *handle)
  611. {
  612. struct qcedev_control *podev;
  613. unsigned long flags = 0;
  614. int ret = 0;
  615. struct qcedev_stat *pstat;
  616. int current_req_info = 0;
  617. int wait = MAX_CRYPTO_WAIT_TIME;
  618. struct qcedev_async_req *new_req = NULL;
  619. int retries = 0;
  620. qcedev_areq->err = 0;
  621. podev = handle->cntl;
  622. init_waitqueue_head(&qcedev_areq->wait_q);
  623. spin_lock_irqsave(&podev->lock, flags);
  624. /*
  625. * Service only one crypto request at a time.
  626. * Any other new requests are queued in ready_commands and woken up
  627. * only when the active command has finished successfully or when the
  628. * request times out or when the command failed when setting up.
  629. */
  630. do {
  631. if (podev->active_command == NULL) {
  632. podev->active_command = qcedev_areq;
  633. qcedev_areq->state = QCEDEV_REQ_SUBMITTED;
  634. switch (qcedev_areq->op_type) {
  635. case QCEDEV_CRYPTO_OPER_CIPHER:
  636. ret = start_cipher_req(podev,
  637. &current_req_info);
  638. break;
  639. case QCEDEV_CRYPTO_OPER_OFFLOAD_CIPHER:
  640. ret = start_offload_cipher_req(podev,
  641. &current_req_info);
  642. break;
  643. default:
  644. ret = start_sha_req(podev,
  645. &current_req_info);
  646. break;
  647. }
  648. } else {
  649. list_add_tail(&qcedev_areq->list,
  650. &podev->ready_commands);
  651. qcedev_areq->state = QCEDEV_REQ_WAITING;
  652. if (wait_event_interruptible_lock_irq_timeout(
  653. qcedev_areq->wait_q,
  654. (qcedev_areq->state == QCEDEV_REQ_CURRENT),
  655. podev->lock,
  656. msecs_to_jiffies(MAX_REQUEST_TIME)) == 0) {
  657. pr_err("%s: request timed out\n", __func__);
  658. spin_unlock_irqrestore(&podev->lock, flags);
  659. return qcedev_areq->err;
  660. }
  661. }
  662. } while (qcedev_areq->state != QCEDEV_REQ_SUBMITTED);
  663. if (ret != 0) {
  664. podev->active_command = NULL;
  665. /*
  666. * Look through queued requests and wake up the corresponding
  667. * thread.
  668. */
  669. if (!list_empty(&podev->ready_commands)) {
  670. new_req = container_of(podev->ready_commands.next,
  671. struct qcedev_async_req, list);
  672. list_del(&new_req->list);
  673. new_req->state = QCEDEV_REQ_CURRENT;
  674. wake_up_interruptible(&new_req->wait_q);
  675. }
  676. }
  677. spin_unlock_irqrestore(&podev->lock, flags);
  678. qcedev_areq->timed_out = false;
  679. if (ret == 0)
  680. wait = wait_for_completion_timeout(&qcedev_areq->complete,
  681. msecs_to_jiffies(MAX_CRYPTO_WAIT_TIME));
  682. if (!wait) {
  683. /*
  684. * This means wait timed out, and the callback routine was not
  685. * exercised. The callback sequence does some housekeeping which
  686. * would be missed here, hence having a call to qce here to do
  687. * that.
  688. */
  689. pr_err("%s: wait timed out, req info = %d\n", __func__,
  690. current_req_info);
  691. qcedev_check_crypto_status(qcedev_areq, podev->qce);
  692. if (qcedev_areq->offload_cipher_op_req.err ==
  693. QCEDEV_OFFLOAD_NO_ERROR) {
  694. pr_err("%s: no error, wait for request to be done", __func__);
  695. while (qcedev_areq->state != QCEDEV_REQ_DONE &&
  696. retries < MAX_RETRIES) {
  697. usleep_range(3000, 5000);
  698. retries++;
  699. pr_err("%s: waiting for req state to be done, retries = %d",
  700. __func__, retries);
  701. }
  702. return 0;
  703. }
  704. spin_lock_irqsave(&podev->lock, flags);
  705. qcedev_areq->timed_out = true;
  706. ret = qce_manage_timeout(podev->qce, current_req_info);
  707. if (ret)
  708. pr_err("%s: error during manage timeout", __func__);
  709. spin_unlock_irqrestore(&podev->lock, flags);
  710. req_done((unsigned long) podev);
  711. if (qcedev_areq->offload_cipher_op_req.err !=
  712. QCEDEV_OFFLOAD_NO_ERROR)
  713. return 0;
  714. }
  715. if (ret)
  716. qcedev_areq->err = -EIO;
  717. pstat = &_qcedev_stat;
  718. if (qcedev_areq->op_type == QCEDEV_CRYPTO_OPER_CIPHER) {
  719. switch (qcedev_areq->cipher_op_req.op) {
  720. case QCEDEV_OPER_DEC:
  721. if (qcedev_areq->err)
  722. pstat->qcedev_dec_fail++;
  723. else
  724. pstat->qcedev_dec_success++;
  725. break;
  726. case QCEDEV_OPER_ENC:
  727. if (qcedev_areq->err)
  728. pstat->qcedev_enc_fail++;
  729. else
  730. pstat->qcedev_enc_success++;
  731. break;
  732. default:
  733. break;
  734. }
  735. } else if (qcedev_areq->op_type == QCEDEV_CRYPTO_OPER_OFFLOAD_CIPHER) {
  736. //Do nothing
  737. } else {
  738. if (qcedev_areq->err)
  739. pstat->qcedev_sha_fail++;
  740. else
  741. pstat->qcedev_sha_success++;
  742. }
  743. return qcedev_areq->err;
  744. }
  745. static int qcedev_sha_init(struct qcedev_async_req *areq,
  746. struct qcedev_handle *handle)
  747. {
  748. struct qcedev_sha_ctxt *sha_ctxt = &handle->sha_ctxt;
  749. memset(sha_ctxt, 0, sizeof(struct qcedev_sha_ctxt));
  750. sha_ctxt->first_blk = 1;
  751. if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA1) ||
  752. (areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC)) {
  753. memcpy(&sha_ctxt->digest[0],
  754. &_std_init_vector_sha1_uint8[0], SHA1_DIGEST_SIZE);
  755. sha_ctxt->diglen = SHA1_DIGEST_SIZE;
  756. } else {
  757. if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA256) ||
  758. (areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC)) {
  759. memcpy(&sha_ctxt->digest[0],
  760. &_std_init_vector_sha256_uint8[0],
  761. SHA256_DIGEST_SIZE);
  762. sha_ctxt->diglen = SHA256_DIGEST_SIZE;
  763. }
  764. }
  765. sha_ctxt->init_done = true;
  766. return 0;
  767. }
  768. static int qcedev_sha_update_max_xfer(struct qcedev_async_req *qcedev_areq,
  769. struct qcedev_handle *handle,
  770. struct scatterlist *sg_src)
  771. {
  772. int err = 0;
  773. int i = 0;
  774. uint32_t total;
  775. uint8_t *user_src = NULL;
  776. uint8_t *k_src = NULL;
  777. uint8_t *k_buf_src = NULL;
  778. uint32_t buf_size = 0;
  779. uint8_t *k_align_src = NULL;
  780. uint32_t sha_pad_len = 0;
  781. uint32_t trailing_buf_len = 0;
  782. uint32_t t_buf = handle->sha_ctxt.trailing_buf_len;
  783. uint32_t sha_block_size;
  784. total = qcedev_areq->sha_op_req.data_len + t_buf;
  785. if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1)
  786. sha_block_size = SHA1_BLOCK_SIZE;
  787. else
  788. sha_block_size = SHA256_BLOCK_SIZE;
  789. if (total <= sha_block_size) {
  790. uint32_t len = qcedev_areq->sha_op_req.data_len;
  791. i = 0;
  792. k_src = &handle->sha_ctxt.trailing_buf[t_buf];
  793. /* Copy data from user src(s) */
  794. while (len > 0) {
  795. user_src = qcedev_areq->sha_op_req.data[i].vaddr;
  796. if (user_src && copy_from_user(k_src,
  797. (void __user *)user_src,
  798. qcedev_areq->sha_op_req.data[i].len))
  799. return -EFAULT;
  800. len -= qcedev_areq->sha_op_req.data[i].len;
  801. k_src += qcedev_areq->sha_op_req.data[i].len;
  802. i++;
  803. }
  804. handle->sha_ctxt.trailing_buf_len = total;
  805. return 0;
  806. }
  807. buf_size = total + CACHE_LINE_SIZE * 2;
  808. k_buf_src = kmalloc(buf_size, GFP_KERNEL);
  809. if (k_buf_src == NULL)
  810. return -ENOMEM;
  811. k_align_src = (uint8_t *)ALIGN(((uintptr_t)k_buf_src),
  812. CACHE_LINE_SIZE);
  813. k_src = k_align_src;
  814. /* check for trailing buffer from previous updates and append it */
  815. if (t_buf > 0) {
  816. memcpy(k_src, &handle->sha_ctxt.trailing_buf[0],
  817. t_buf);
  818. k_src += t_buf;
  819. }
  820. /* Copy data from user src(s) */
  821. user_src = qcedev_areq->sha_op_req.data[0].vaddr;
  822. if (user_src && copy_from_user(k_src,
  823. (void __user *)user_src,
  824. qcedev_areq->sha_op_req.data[0].len)) {
  825. memset(k_buf_src, 0, buf_size);
  826. kfree(k_buf_src);
  827. return -EFAULT;
  828. }
  829. k_src += qcedev_areq->sha_op_req.data[0].len;
  830. for (i = 1; i < qcedev_areq->sha_op_req.entries; i++) {
  831. user_src = qcedev_areq->sha_op_req.data[i].vaddr;
  832. if (user_src && copy_from_user(k_src,
  833. (void __user *)user_src,
  834. qcedev_areq->sha_op_req.data[i].len)) {
  835. memset(k_buf_src, 0, buf_size);
  836. kfree(k_buf_src);
  837. return -EFAULT;
  838. }
  839. k_src += qcedev_areq->sha_op_req.data[i].len;
  840. }
  841. /* get new trailing buffer */
  842. sha_pad_len = ALIGN(total, CE_SHA_BLOCK_SIZE) - total;
  843. trailing_buf_len = CE_SHA_BLOCK_SIZE - sha_pad_len;
  844. qcedev_areq->sha_req.sreq.src = sg_src;
  845. sg_init_one(qcedev_areq->sha_req.sreq.src, k_align_src,
  846. total-trailing_buf_len);
  847. qcedev_areq->sha_req.sreq.nbytes = total - trailing_buf_len;
  848. /* update sha_ctxt trailing buf content to new trailing buf */
  849. if (trailing_buf_len > 0) {
  850. memset(&handle->sha_ctxt.trailing_buf[0], 0, 64);
  851. memcpy(&handle->sha_ctxt.trailing_buf[0],
  852. (k_src - trailing_buf_len),
  853. trailing_buf_len);
  854. }
  855. handle->sha_ctxt.trailing_buf_len = trailing_buf_len;
  856. err = submit_req(qcedev_areq, handle);
  857. handle->sha_ctxt.last_blk = 0;
  858. handle->sha_ctxt.first_blk = 0;
  859. memset(k_buf_src, 0, buf_size);
  860. kfree(k_buf_src);
  861. return err;
  862. }
  863. static int qcedev_sha_update(struct qcedev_async_req *qcedev_areq,
  864. struct qcedev_handle *handle,
  865. struct scatterlist *sg_src)
  866. {
  867. int err = 0;
  868. int i = 0;
  869. int j = 0;
  870. int k = 0;
  871. int num_entries = 0;
  872. uint32_t total = 0;
  873. if (!handle->sha_ctxt.init_done) {
  874. pr_err("%s Init was not called\n", __func__);
  875. return -EINVAL;
  876. }
  877. if (qcedev_areq->sha_op_req.data_len > QCE_MAX_OPER_DATA) {
  878. struct qcedev_sha_op_req *saved_req;
  879. struct qcedev_sha_op_req req;
  880. struct qcedev_sha_op_req *sreq = &qcedev_areq->sha_op_req;
  881. uint32_t req_size = 0;
  882. req_size = sizeof(struct qcedev_sha_op_req);
  883. /* save the original req structure */
  884. saved_req =
  885. kmalloc(req_size, GFP_KERNEL);
  886. if (saved_req == NULL) {
  887. pr_err("%s:Can't Allocate mem:saved_req 0x%lx\n",
  888. __func__, (uintptr_t)saved_req);
  889. return -ENOMEM;
  890. }
  891. memcpy(&req, sreq, sizeof(*sreq));
  892. memcpy(saved_req, sreq, sizeof(*sreq));
  893. i = 0;
  894. /* Address 32 KB at a time */
  895. while ((i < req.entries) && (err == 0)) {
  896. if (sreq->data[i].len > QCE_MAX_OPER_DATA) {
  897. sreq->data[0].len = QCE_MAX_OPER_DATA;
  898. if (i > 0) {
  899. sreq->data[0].vaddr =
  900. sreq->data[i].vaddr;
  901. }
  902. sreq->data_len = QCE_MAX_OPER_DATA;
  903. sreq->entries = 1;
  904. err = qcedev_sha_update_max_xfer(qcedev_areq,
  905. handle, sg_src);
  906. sreq->data[i].len = req.data[i].len -
  907. QCE_MAX_OPER_DATA;
  908. sreq->data[i].vaddr = req.data[i].vaddr +
  909. QCE_MAX_OPER_DATA;
  910. req.data[i].vaddr = sreq->data[i].vaddr;
  911. req.data[i].len = sreq->data[i].len;
  912. } else {
  913. total = 0;
  914. for (j = i; j < req.entries; j++) {
  915. num_entries++;
  916. if ((total + sreq->data[j].len) >=
  917. QCE_MAX_OPER_DATA) {
  918. sreq->data[j].len =
  919. (QCE_MAX_OPER_DATA - total);
  920. total = QCE_MAX_OPER_DATA;
  921. break;
  922. }
  923. total += sreq->data[j].len;
  924. }
  925. sreq->data_len = total;
  926. if (i > 0)
  927. for (k = 0; k < num_entries; k++) {
  928. sreq->data[k].len =
  929. sreq->data[i+k].len;
  930. sreq->data[k].vaddr =
  931. sreq->data[i+k].vaddr;
  932. }
  933. sreq->entries = num_entries;
  934. i = j;
  935. err = qcedev_sha_update_max_xfer(qcedev_areq,
  936. handle, sg_src);
  937. num_entries = 0;
  938. sreq->data[i].vaddr = req.data[i].vaddr +
  939. sreq->data[i].len;
  940. sreq->data[i].len = req.data[i].len -
  941. sreq->data[i].len;
  942. req.data[i].vaddr = sreq->data[i].vaddr;
  943. req.data[i].len = sreq->data[i].len;
  944. if (sreq->data[i].len == 0)
  945. i++;
  946. }
  947. } /* end of while ((i < req.entries) && (err == 0)) */
  948. /* Restore the original req structure */
  949. for (i = 0; i < saved_req->entries; i++) {
  950. sreq->data[i].len = saved_req->data[i].len;
  951. sreq->data[i].vaddr = saved_req->data[i].vaddr;
  952. }
  953. sreq->entries = saved_req->entries;
  954. sreq->data_len = saved_req->data_len;
  955. memset(saved_req, 0, req_size);
  956. kfree(saved_req);
  957. } else
  958. err = qcedev_sha_update_max_xfer(qcedev_areq, handle, sg_src);
  959. return err;
  960. }
  961. static int qcedev_sha_final(struct qcedev_async_req *qcedev_areq,
  962. struct qcedev_handle *handle)
  963. {
  964. int err = 0;
  965. struct scatterlist sg_src;
  966. uint32_t total;
  967. uint8_t *k_buf_src = NULL;
  968. uint32_t buf_size = 0;
  969. uint8_t *k_align_src = NULL;
  970. if (!handle->sha_ctxt.init_done) {
  971. pr_err("%s Init was not called\n", __func__);
  972. return -EINVAL;
  973. }
  974. handle->sha_ctxt.last_blk = 1;
  975. total = handle->sha_ctxt.trailing_buf_len;
  976. buf_size = total + CACHE_LINE_SIZE * 2;
  977. k_buf_src = kmalloc(buf_size, GFP_KERNEL);
  978. if (k_buf_src == NULL)
  979. return -ENOMEM;
  980. k_align_src = (uint8_t *)ALIGN(((uintptr_t)k_buf_src),
  981. CACHE_LINE_SIZE);
  982. memcpy(k_align_src, &handle->sha_ctxt.trailing_buf[0], total);
  983. qcedev_areq->sha_req.sreq.src = (struct scatterlist *) &sg_src;
  984. sg_init_one(qcedev_areq->sha_req.sreq.src, k_align_src, total);
  985. qcedev_areq->sha_req.sreq.nbytes = total;
  986. err = submit_req(qcedev_areq, handle);
  987. handle->sha_ctxt.first_blk = 0;
  988. handle->sha_ctxt.last_blk = 0;
  989. handle->sha_ctxt.auth_data[0] = 0;
  990. handle->sha_ctxt.auth_data[1] = 0;
  991. handle->sha_ctxt.trailing_buf_len = 0;
  992. handle->sha_ctxt.init_done = false;
  993. memset(&handle->sha_ctxt.trailing_buf[0], 0, 64);
  994. memset(k_buf_src, 0, buf_size);
  995. kfree(k_buf_src);
  996. qcedev_areq->sha_req.sreq.src = NULL;
  997. return err;
  998. }
  999. static int qcedev_hash_cmac(struct qcedev_async_req *qcedev_areq,
  1000. struct qcedev_handle *handle,
  1001. struct scatterlist *sg_src)
  1002. {
  1003. int err = 0;
  1004. int i = 0;
  1005. uint32_t total;
  1006. uint8_t *user_src = NULL;
  1007. uint8_t *k_src = NULL;
  1008. uint8_t *k_buf_src = NULL;
  1009. uint32_t buf_size = 0;
  1010. total = qcedev_areq->sha_op_req.data_len;
  1011. if ((qcedev_areq->sha_op_req.authklen != QCEDEV_AES_KEY_128) &&
  1012. (qcedev_areq->sha_op_req.authklen != QCEDEV_AES_KEY_256)) {
  1013. pr_err("%s: unsupported key length\n", __func__);
  1014. return -EINVAL;
  1015. }
  1016. if (copy_from_user(&handle->sha_ctxt.authkey[0],
  1017. (void __user *)qcedev_areq->sha_op_req.authkey,
  1018. qcedev_areq->sha_op_req.authklen))
  1019. return -EFAULT;
  1020. if (total > U32_MAX - CACHE_LINE_SIZE * 2)
  1021. return -EINVAL;
  1022. buf_size = total + CACHE_LINE_SIZE * 2;
  1023. k_buf_src = kmalloc(buf_size, GFP_KERNEL);
  1024. if (k_buf_src == NULL)
  1025. return -ENOMEM;
  1026. k_src = k_buf_src;
  1027. /* Copy data from user src(s) */
  1028. user_src = qcedev_areq->sha_op_req.data[0].vaddr;
  1029. for (i = 0; i < qcedev_areq->sha_op_req.entries; i++) {
  1030. user_src = qcedev_areq->sha_op_req.data[i].vaddr;
  1031. if (user_src && copy_from_user(k_src, (void __user *)user_src,
  1032. qcedev_areq->sha_op_req.data[i].len)) {
  1033. memset(k_buf_src, 0, buf_size);
  1034. kfree(k_buf_src);
  1035. return -EFAULT;
  1036. }
  1037. k_src += qcedev_areq->sha_op_req.data[i].len;
  1038. }
  1039. qcedev_areq->sha_req.sreq.src = sg_src;
  1040. sg_init_one(qcedev_areq->sha_req.sreq.src, k_buf_src, total);
  1041. qcedev_areq->sha_req.sreq.nbytes = total;
  1042. handle->sha_ctxt.diglen = qcedev_areq->sha_op_req.diglen;
  1043. err = submit_req(qcedev_areq, handle);
  1044. memset(k_buf_src, 0, buf_size);
  1045. kfree(k_buf_src);
  1046. return err;
  1047. }
  1048. static int qcedev_set_hmac_auth_key(struct qcedev_async_req *areq,
  1049. struct qcedev_handle *handle,
  1050. struct scatterlist *sg_src)
  1051. {
  1052. int err = 0;
  1053. if (areq->sha_op_req.authklen <= QCEDEV_MAX_KEY_SIZE) {
  1054. qcedev_sha_init(areq, handle);
  1055. if (copy_from_user(&handle->sha_ctxt.authkey[0],
  1056. (void __user *)areq->sha_op_req.authkey,
  1057. areq->sha_op_req.authklen))
  1058. return -EFAULT;
  1059. } else {
  1060. struct qcedev_async_req authkey_areq;
  1061. uint8_t authkey[QCEDEV_MAX_SHA_BLOCK_SIZE];
  1062. init_completion(&authkey_areq.complete);
  1063. authkey_areq.sha_op_req.entries = 1;
  1064. authkey_areq.sha_op_req.data[0].vaddr =
  1065. areq->sha_op_req.authkey;
  1066. authkey_areq.sha_op_req.data[0].len = areq->sha_op_req.authklen;
  1067. authkey_areq.sha_op_req.data_len = areq->sha_op_req.authklen;
  1068. authkey_areq.sha_op_req.diglen = 0;
  1069. authkey_areq.handle = handle;
  1070. memset(&authkey_areq.sha_op_req.digest[0], 0,
  1071. QCEDEV_MAX_SHA_DIGEST);
  1072. if (areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC)
  1073. authkey_areq.sha_op_req.alg = QCEDEV_ALG_SHA1;
  1074. if (areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC)
  1075. authkey_areq.sha_op_req.alg = QCEDEV_ALG_SHA256;
  1076. authkey_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
  1077. qcedev_sha_init(&authkey_areq, handle);
  1078. err = qcedev_sha_update(&authkey_areq, handle, sg_src);
  1079. if (!err)
  1080. err = qcedev_sha_final(&authkey_areq, handle);
  1081. else
  1082. return err;
  1083. memcpy(&authkey[0], &handle->sha_ctxt.digest[0],
  1084. handle->sha_ctxt.diglen);
  1085. qcedev_sha_init(areq, handle);
  1086. memcpy(&handle->sha_ctxt.authkey[0], &authkey[0],
  1087. handle->sha_ctxt.diglen);
  1088. }
  1089. return err;
  1090. }
  1091. static int qcedev_hmac_get_ohash(struct qcedev_async_req *qcedev_areq,
  1092. struct qcedev_handle *handle)
  1093. {
  1094. int err = 0;
  1095. struct scatterlist sg_src;
  1096. uint8_t *k_src = NULL;
  1097. uint32_t sha_block_size = 0;
  1098. uint32_t sha_digest_size = 0;
  1099. if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC) {
  1100. sha_digest_size = SHA1_DIGEST_SIZE;
  1101. sha_block_size = SHA1_BLOCK_SIZE;
  1102. } else {
  1103. if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC) {
  1104. sha_digest_size = SHA256_DIGEST_SIZE;
  1105. sha_block_size = SHA256_BLOCK_SIZE;
  1106. }
  1107. }
  1108. k_src = kmalloc(sha_block_size, GFP_KERNEL);
  1109. if (k_src == NULL)
  1110. return -ENOMEM;
  1111. /* check for trailing buffer from previous updates and append it */
  1112. memcpy(k_src, &handle->sha_ctxt.trailing_buf[0],
  1113. handle->sha_ctxt.trailing_buf_len);
  1114. qcedev_areq->sha_req.sreq.src = (struct scatterlist *) &sg_src;
  1115. sg_init_one(qcedev_areq->sha_req.sreq.src, k_src, sha_block_size);
  1116. qcedev_areq->sha_req.sreq.nbytes = sha_block_size;
  1117. memset(&handle->sha_ctxt.trailing_buf[0], 0, sha_block_size);
  1118. memcpy(&handle->sha_ctxt.trailing_buf[0], &handle->sha_ctxt.digest[0],
  1119. sha_digest_size);
  1120. handle->sha_ctxt.trailing_buf_len = sha_digest_size;
  1121. handle->sha_ctxt.first_blk = 1;
  1122. handle->sha_ctxt.last_blk = 0;
  1123. handle->sha_ctxt.auth_data[0] = 0;
  1124. handle->sha_ctxt.auth_data[1] = 0;
  1125. if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC) {
  1126. memcpy(&handle->sha_ctxt.digest[0],
  1127. &_std_init_vector_sha1_uint8[0], SHA1_DIGEST_SIZE);
  1128. handle->sha_ctxt.diglen = SHA1_DIGEST_SIZE;
  1129. }
  1130. if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC) {
  1131. memcpy(&handle->sha_ctxt.digest[0],
  1132. &_std_init_vector_sha256_uint8[0], SHA256_DIGEST_SIZE);
  1133. handle->sha_ctxt.diglen = SHA256_DIGEST_SIZE;
  1134. }
  1135. err = submit_req(qcedev_areq, handle);
  1136. handle->sha_ctxt.last_blk = 0;
  1137. handle->sha_ctxt.first_blk = 0;
  1138. memset(k_src, 0, sha_block_size);
  1139. kfree(k_src);
  1140. qcedev_areq->sha_req.sreq.src = NULL;
  1141. return err;
  1142. }
  1143. static int qcedev_hmac_update_iokey(struct qcedev_async_req *areq,
  1144. struct qcedev_handle *handle, bool ikey)
  1145. {
  1146. int i;
  1147. uint32_t constant;
  1148. uint32_t sha_block_size;
  1149. if (ikey)
  1150. constant = 0x36;
  1151. else
  1152. constant = 0x5c;
  1153. if (areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC)
  1154. sha_block_size = SHA1_BLOCK_SIZE;
  1155. else
  1156. sha_block_size = SHA256_BLOCK_SIZE;
  1157. memset(&handle->sha_ctxt.trailing_buf[0], 0, sha_block_size);
  1158. for (i = 0; i < sha_block_size; i++)
  1159. handle->sha_ctxt.trailing_buf[i] =
  1160. (handle->sha_ctxt.authkey[i] ^ constant);
  1161. handle->sha_ctxt.trailing_buf_len = sha_block_size;
  1162. return 0;
  1163. }
  1164. static int qcedev_hmac_init(struct qcedev_async_req *areq,
  1165. struct qcedev_handle *handle,
  1166. struct scatterlist *sg_src)
  1167. {
  1168. int err;
  1169. struct qcedev_control *podev = handle->cntl;
  1170. err = qcedev_set_hmac_auth_key(areq, handle, sg_src);
  1171. if (err)
  1172. return err;
  1173. if (!podev->ce_support.sha_hmac)
  1174. qcedev_hmac_update_iokey(areq, handle, true);
  1175. return 0;
  1176. }
  1177. static int qcedev_hmac_final(struct qcedev_async_req *areq,
  1178. struct qcedev_handle *handle)
  1179. {
  1180. int err;
  1181. struct qcedev_control *podev = handle->cntl;
  1182. err = qcedev_sha_final(areq, handle);
  1183. if (podev->ce_support.sha_hmac)
  1184. return err;
  1185. qcedev_hmac_update_iokey(areq, handle, false);
  1186. err = qcedev_hmac_get_ohash(areq, handle);
  1187. if (err)
  1188. return err;
  1189. err = qcedev_sha_final(areq, handle);
  1190. return err;
  1191. }
  1192. static int qcedev_hash_init(struct qcedev_async_req *areq,
  1193. struct qcedev_handle *handle,
  1194. struct scatterlist *sg_src)
  1195. {
  1196. if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA1) ||
  1197. (areq->sha_op_req.alg == QCEDEV_ALG_SHA256))
  1198. return qcedev_sha_init(areq, handle);
  1199. else
  1200. return qcedev_hmac_init(areq, handle, sg_src);
  1201. }
  1202. static int qcedev_hash_update(struct qcedev_async_req *qcedev_areq,
  1203. struct qcedev_handle *handle,
  1204. struct scatterlist *sg_src)
  1205. {
  1206. return qcedev_sha_update(qcedev_areq, handle, sg_src);
  1207. }
  1208. static int qcedev_hash_final(struct qcedev_async_req *areq,
  1209. struct qcedev_handle *handle)
  1210. {
  1211. if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA1) ||
  1212. (areq->sha_op_req.alg == QCEDEV_ALG_SHA256))
  1213. return qcedev_sha_final(areq, handle);
  1214. else
  1215. return qcedev_hmac_final(areq, handle);
  1216. }
  1217. static int qcedev_vbuf_ablk_cipher_max_xfer(struct qcedev_async_req *areq,
  1218. int *di, struct qcedev_handle *handle,
  1219. uint8_t *k_align_src)
  1220. {
  1221. int err = 0;
  1222. int i = 0;
  1223. int dst_i = *di;
  1224. struct scatterlist sg_src;
  1225. uint32_t byteoffset = 0;
  1226. uint8_t *user_src = NULL;
  1227. uint8_t *k_align_dst = k_align_src;
  1228. struct qcedev_cipher_op_req *creq = &areq->cipher_op_req;
  1229. if (areq->cipher_op_req.mode == QCEDEV_AES_MODE_CTR)
  1230. byteoffset = areq->cipher_op_req.byteoffset;
  1231. user_src = areq->cipher_op_req.vbuf.src[0].vaddr;
  1232. if (user_src && copy_from_user((k_align_src + byteoffset),
  1233. (void __user *)user_src,
  1234. areq->cipher_op_req.vbuf.src[0].len))
  1235. return -EFAULT;
  1236. k_align_src += byteoffset + areq->cipher_op_req.vbuf.src[0].len;
  1237. for (i = 1; i < areq->cipher_op_req.entries; i++) {
  1238. user_src = areq->cipher_op_req.vbuf.src[i].vaddr;
  1239. if (user_src && copy_from_user(k_align_src,
  1240. (void __user *)user_src,
  1241. areq->cipher_op_req.vbuf.src[i].len)) {
  1242. return -EFAULT;
  1243. }
  1244. k_align_src += areq->cipher_op_req.vbuf.src[i].len;
  1245. }
  1246. /* restore src beginning */
  1247. k_align_src = k_align_dst;
  1248. areq->cipher_op_req.data_len += byteoffset;
  1249. areq->cipher_req.creq.src = (struct scatterlist *) &sg_src;
  1250. areq->cipher_req.creq.dst = (struct scatterlist *) &sg_src;
  1251. /* In place encryption/decryption */
  1252. sg_init_one(areq->cipher_req.creq.src,
  1253. k_align_dst,
  1254. areq->cipher_op_req.data_len);
  1255. areq->cipher_req.creq.cryptlen = areq->cipher_op_req.data_len;
  1256. areq->cipher_req.creq.iv = areq->cipher_op_req.iv;
  1257. areq->cipher_op_req.entries = 1;
  1258. err = submit_req(areq, handle);
  1259. /* copy data to destination buffer*/
  1260. creq->data_len -= byteoffset;
  1261. while (creq->data_len > 0) {
  1262. if (creq->vbuf.dst[dst_i].len <= creq->data_len) {
  1263. if (err == 0 && copy_to_user(
  1264. (void __user *)creq->vbuf.dst[dst_i].vaddr,
  1265. (k_align_dst + byteoffset),
  1266. creq->vbuf.dst[dst_i].len)) {
  1267. err = -EFAULT;
  1268. goto exit;
  1269. }
  1270. k_align_dst += creq->vbuf.dst[dst_i].len;
  1271. creq->data_len -= creq->vbuf.dst[dst_i].len;
  1272. dst_i++;
  1273. } else {
  1274. if (err == 0 && copy_to_user(
  1275. (void __user *)creq->vbuf.dst[dst_i].vaddr,
  1276. (k_align_dst + byteoffset),
  1277. creq->data_len)) {
  1278. err = -EFAULT;
  1279. goto exit;
  1280. }
  1281. k_align_dst += creq->data_len;
  1282. creq->vbuf.dst[dst_i].len -= creq->data_len;
  1283. creq->vbuf.dst[dst_i].vaddr += creq->data_len;
  1284. creq->data_len = 0;
  1285. }
  1286. }
  1287. *di = dst_i;
  1288. exit:
  1289. areq->cipher_req.creq.src = NULL;
  1290. areq->cipher_req.creq.dst = NULL;
  1291. return err;
  1292. };
  1293. static int qcedev_vbuf_ablk_cipher(struct qcedev_async_req *areq,
  1294. struct qcedev_handle *handle)
  1295. {
  1296. int err = 0;
  1297. int di = 0;
  1298. int i = 0;
  1299. int j = 0;
  1300. int k = 0;
  1301. uint32_t byteoffset = 0;
  1302. int num_entries = 0;
  1303. uint32_t total = 0;
  1304. uint32_t len;
  1305. uint8_t *k_buf_src = NULL;
  1306. uint32_t buf_size = 0;
  1307. uint8_t *k_align_src = NULL;
  1308. uint32_t max_data_xfer;
  1309. struct qcedev_cipher_op_req *saved_req;
  1310. uint32_t req_size = 0;
  1311. struct qcedev_cipher_op_req *creq = &areq->cipher_op_req;
  1312. total = 0;
  1313. if (areq->cipher_op_req.mode == QCEDEV_AES_MODE_CTR)
  1314. byteoffset = areq->cipher_op_req.byteoffset;
  1315. buf_size = QCE_MAX_OPER_DATA + CACHE_LINE_SIZE * 2;
  1316. k_buf_src = kmalloc(buf_size, GFP_KERNEL);
  1317. if (k_buf_src == NULL)
  1318. return -ENOMEM;
  1319. k_align_src = (uint8_t *)ALIGN(((uintptr_t)k_buf_src),
  1320. CACHE_LINE_SIZE);
  1321. max_data_xfer = QCE_MAX_OPER_DATA - byteoffset;
  1322. req_size = sizeof(struct qcedev_cipher_op_req);
  1323. saved_req = kmemdup(creq, req_size, GFP_KERNEL);
  1324. if (saved_req == NULL) {
  1325. memset(k_buf_src, 0, buf_size);
  1326. kfree(k_buf_src);
  1327. return -ENOMEM;
  1328. }
  1329. if (areq->cipher_op_req.data_len > max_data_xfer) {
  1330. struct qcedev_cipher_op_req req;
  1331. /* save the original req structure */
  1332. memcpy(&req, creq, sizeof(struct qcedev_cipher_op_req));
  1333. i = 0;
  1334. /* Address 32 KB at a time */
  1335. while ((i < req.entries) && (err == 0)) {
  1336. if (creq->vbuf.src[i].len > max_data_xfer) {
  1337. creq->vbuf.src[0].len = max_data_xfer;
  1338. if (i > 0) {
  1339. creq->vbuf.src[0].vaddr =
  1340. creq->vbuf.src[i].vaddr;
  1341. }
  1342. creq->data_len = max_data_xfer;
  1343. creq->entries = 1;
  1344. err = qcedev_vbuf_ablk_cipher_max_xfer(areq,
  1345. &di, handle, k_align_src);
  1346. if (err < 0) {
  1347. memset(saved_req, 0, req_size);
  1348. memset(k_buf_src, 0, buf_size);
  1349. kfree(k_buf_src);
  1350. kfree(saved_req);
  1351. return err;
  1352. }
  1353. creq->vbuf.src[i].len = req.vbuf.src[i].len -
  1354. max_data_xfer;
  1355. creq->vbuf.src[i].vaddr =
  1356. req.vbuf.src[i].vaddr +
  1357. max_data_xfer;
  1358. req.vbuf.src[i].vaddr =
  1359. creq->vbuf.src[i].vaddr;
  1360. req.vbuf.src[i].len = creq->vbuf.src[i].len;
  1361. } else {
  1362. total = areq->cipher_op_req.byteoffset;
  1363. for (j = i; j < req.entries; j++) {
  1364. num_entries++;
  1365. if ((total + creq->vbuf.src[j].len)
  1366. >= max_data_xfer) {
  1367. creq->vbuf.src[j].len =
  1368. max_data_xfer - total;
  1369. total = max_data_xfer;
  1370. break;
  1371. }
  1372. total += creq->vbuf.src[j].len;
  1373. }
  1374. creq->data_len = total;
  1375. if (i > 0)
  1376. for (k = 0; k < num_entries; k++) {
  1377. creq->vbuf.src[k].len =
  1378. creq->vbuf.src[i+k].len;
  1379. creq->vbuf.src[k].vaddr =
  1380. creq->vbuf.src[i+k].vaddr;
  1381. }
  1382. creq->entries = num_entries;
  1383. i = j;
  1384. err = qcedev_vbuf_ablk_cipher_max_xfer(areq,
  1385. &di, handle, k_align_src);
  1386. if (err < 0) {
  1387. memset(saved_req, 0, req_size);
  1388. memset(k_buf_src, 0, buf_size);
  1389. kfree(k_buf_src);
  1390. kfree(saved_req);
  1391. return err;
  1392. }
  1393. num_entries = 0;
  1394. areq->cipher_op_req.byteoffset = 0;
  1395. creq->vbuf.src[i].vaddr = req.vbuf.src[i].vaddr
  1396. + creq->vbuf.src[i].len;
  1397. creq->vbuf.src[i].len = req.vbuf.src[i].len -
  1398. creq->vbuf.src[i].len;
  1399. req.vbuf.src[i].vaddr =
  1400. creq->vbuf.src[i].vaddr;
  1401. req.vbuf.src[i].len = creq->vbuf.src[i].len;
  1402. if (creq->vbuf.src[i].len == 0)
  1403. i++;
  1404. }
  1405. areq->cipher_op_req.byteoffset = 0;
  1406. max_data_xfer = QCE_MAX_OPER_DATA;
  1407. byteoffset = 0;
  1408. } /* end of while ((i < req.entries) && (err == 0)) */
  1409. } else
  1410. err = qcedev_vbuf_ablk_cipher_max_xfer(areq, &di, handle,
  1411. k_align_src);
  1412. /* Restore the original req structure */
  1413. for (i = 0; i < saved_req->entries; i++) {
  1414. creq->vbuf.src[i].len = saved_req->vbuf.src[i].len;
  1415. creq->vbuf.src[i].vaddr = saved_req->vbuf.src[i].vaddr;
  1416. }
  1417. for (len = 0, i = 0; len < saved_req->data_len; i++) {
  1418. creq->vbuf.dst[i].len = saved_req->vbuf.dst[i].len;
  1419. creq->vbuf.dst[i].vaddr = saved_req->vbuf.dst[i].vaddr;
  1420. len += saved_req->vbuf.dst[i].len;
  1421. }
  1422. creq->entries = saved_req->entries;
  1423. creq->data_len = saved_req->data_len;
  1424. creq->byteoffset = saved_req->byteoffset;
  1425. memset(saved_req, 0, req_size);
  1426. memset(k_buf_src, 0, buf_size);
  1427. kfree(saved_req);
  1428. kfree(k_buf_src);
  1429. return err;
  1430. }
  1431. static int qcedev_smmu_ablk_offload_cipher(struct qcedev_async_req *areq,
  1432. struct qcedev_handle *handle)
  1433. {
  1434. int i = 0;
  1435. int err = 0;
  1436. size_t byteoffset = 0;
  1437. size_t transfer_data_len = 0;
  1438. size_t pending_data_len = 0;
  1439. size_t max_data_xfer = MAX_CEHW_REQ_TRANSFER_SIZE - byteoffset;
  1440. uint8_t *user_src = NULL;
  1441. uint8_t *user_dst = NULL;
  1442. struct scatterlist sg_src;
  1443. struct scatterlist sg_dst;
  1444. if (areq->offload_cipher_op_req.mode == QCEDEV_AES_MODE_CTR)
  1445. byteoffset = areq->offload_cipher_op_req.byteoffset;
  1446. /*
  1447. * areq has two components:
  1448. * a) Request that comes from userspace i.e. offload_cipher_op_req
  1449. * b) Request that QCE understands - skcipher i.e. cipher_req.creq
  1450. * skcipher has sglist pointers src and dest that would carry
  1451. * data to/from CE.
  1452. */
  1453. areq->cipher_req.creq.src = &sg_src;
  1454. areq->cipher_req.creq.dst = &sg_dst;
  1455. sg_init_table(&sg_src, 1);
  1456. sg_init_table(&sg_dst, 1);
  1457. for (i = 0; i < areq->offload_cipher_op_req.entries; i++) {
  1458. transfer_data_len = 0;
  1459. pending_data_len = areq->offload_cipher_op_req.vbuf.src[i].len;
  1460. user_src = areq->offload_cipher_op_req.vbuf.src[i].vaddr;
  1461. user_src += byteoffset;
  1462. user_dst = areq->offload_cipher_op_req.vbuf.dst[i].vaddr;
  1463. user_dst += byteoffset;
  1464. areq->cipher_req.creq.iv = areq->offload_cipher_op_req.iv;
  1465. while (pending_data_len) {
  1466. transfer_data_len = min(max_data_xfer,
  1467. pending_data_len);
  1468. sg_src.dma_address = (dma_addr_t)user_src;
  1469. sg_dst.dma_address = (dma_addr_t)user_dst;
  1470. areq->cipher_req.creq.cryptlen = transfer_data_len;
  1471. sg_src.length = transfer_data_len;
  1472. sg_dst.length = transfer_data_len;
  1473. err = submit_req(areq, handle);
  1474. if (err) {
  1475. pr_err("%s: Error processing req, err = %d\n",
  1476. __func__, err);
  1477. goto exit;
  1478. }
  1479. /* update data len to be processed */
  1480. pending_data_len -= transfer_data_len;
  1481. user_src += transfer_data_len;
  1482. user_dst += transfer_data_len;
  1483. }
  1484. }
  1485. exit:
  1486. return err;
  1487. }
  1488. static int qcedev_check_cipher_key(struct qcedev_cipher_op_req *req,
  1489. struct qcedev_control *podev)
  1490. {
  1491. /* if intending to use HW key make sure key fields are set
  1492. * correctly and HW key is indeed supported in target
  1493. */
  1494. if (req->encklen == 0) {
  1495. int i;
  1496. for (i = 0; i < QCEDEV_MAX_KEY_SIZE; i++) {
  1497. if (req->enckey[i]) {
  1498. pr_err("%s: Invalid key: non-zero key input\n",
  1499. __func__);
  1500. goto error;
  1501. }
  1502. }
  1503. if ((req->op != QCEDEV_OPER_ENC_NO_KEY) &&
  1504. (req->op != QCEDEV_OPER_DEC_NO_KEY))
  1505. if (!podev->platform_support.hw_key_support) {
  1506. pr_err("%s: Invalid op %d\n", __func__,
  1507. (uint32_t)req->op);
  1508. goto error;
  1509. }
  1510. } else {
  1511. if (req->encklen == QCEDEV_AES_KEY_192) {
  1512. if (!podev->ce_support.aes_key_192) {
  1513. pr_err("%s: AES-192 not supported\n", __func__);
  1514. goto error;
  1515. }
  1516. } else {
  1517. /* if not using HW key make sure key
  1518. * length is valid
  1519. */
  1520. if (req->mode == QCEDEV_AES_MODE_XTS) {
  1521. if ((req->encklen != QCEDEV_AES_KEY_128*2) &&
  1522. (req->encklen != QCEDEV_AES_KEY_256*2)) {
  1523. pr_err("%s: unsupported key size: %d\n",
  1524. __func__, req->encklen);
  1525. goto error;
  1526. }
  1527. } else {
  1528. if ((req->encklen != QCEDEV_AES_KEY_128) &&
  1529. (req->encklen != QCEDEV_AES_KEY_256)) {
  1530. pr_err("%s: unsupported key size %d\n",
  1531. __func__, req->encklen);
  1532. goto error;
  1533. }
  1534. }
  1535. }
  1536. }
  1537. return 0;
  1538. error:
  1539. return -EINVAL;
  1540. }
  1541. static int qcedev_check_cipher_params(struct qcedev_cipher_op_req *req,
  1542. struct qcedev_control *podev)
  1543. {
  1544. uint32_t total = 0;
  1545. uint32_t i;
  1546. if (req->use_pmem) {
  1547. pr_err("%s: Use of PMEM is not supported\n", __func__);
  1548. goto error;
  1549. }
  1550. if ((req->entries == 0) || (req->data_len == 0) ||
  1551. (req->entries > QCEDEV_MAX_BUFFERS)) {
  1552. pr_err("%s: Invalid cipher length/entries\n", __func__);
  1553. goto error;
  1554. }
  1555. if ((req->alg >= QCEDEV_ALG_LAST) ||
  1556. (req->mode >= QCEDEV_AES_DES_MODE_LAST)) {
  1557. pr_err("%s: Invalid algorithm %d\n", __func__,
  1558. (uint32_t)req->alg);
  1559. goto error;
  1560. }
  1561. if ((req->mode == QCEDEV_AES_MODE_XTS) &&
  1562. (!podev->ce_support.aes_xts)) {
  1563. pr_err("%s: XTS algorithm is not supported\n", __func__);
  1564. goto error;
  1565. }
  1566. if (req->alg == QCEDEV_ALG_AES) {
  1567. if (qcedev_check_cipher_key(req, podev))
  1568. goto error;
  1569. }
  1570. /* if using a byteoffset, make sure it is CTR mode using vbuf */
  1571. if (req->byteoffset) {
  1572. if (req->mode != QCEDEV_AES_MODE_CTR) {
  1573. pr_err("%s: Operation on byte offset not supported\n",
  1574. __func__);
  1575. goto error;
  1576. }
  1577. if (req->byteoffset >= AES_CE_BLOCK_SIZE) {
  1578. pr_err("%s: Invalid byte offset\n", __func__);
  1579. goto error;
  1580. }
  1581. total = req->byteoffset;
  1582. for (i = 0; i < req->entries; i++) {
  1583. if (total > U32_MAX - req->vbuf.src[i].len) {
  1584. pr_err("%s:Integer overflow on total src len\n",
  1585. __func__);
  1586. goto error;
  1587. }
  1588. total += req->vbuf.src[i].len;
  1589. }
  1590. }
  1591. if (req->data_len < req->byteoffset) {
  1592. pr_err("%s: req data length %u is less than byteoffset %u\n",
  1593. __func__, req->data_len, req->byteoffset);
  1594. goto error;
  1595. }
  1596. /* Ensure IV size */
  1597. if (req->ivlen > QCEDEV_MAX_IV_SIZE) {
  1598. pr_err("%s: ivlen is not correct: %u\n", __func__, req->ivlen);
  1599. goto error;
  1600. }
  1601. /* Ensure Key size */
  1602. if (req->encklen > QCEDEV_MAX_KEY_SIZE) {
  1603. pr_err("%s: Klen is not correct: %u\n", __func__, req->encklen);
  1604. goto error;
  1605. }
  1606. /* Ensure zer ivlen for ECB mode */
  1607. if (req->ivlen > 0) {
  1608. if ((req->mode == QCEDEV_AES_MODE_ECB) ||
  1609. (req->mode == QCEDEV_DES_MODE_ECB)) {
  1610. pr_err("%s: Expecting a zero length IV\n", __func__);
  1611. goto error;
  1612. }
  1613. } else {
  1614. if ((req->mode != QCEDEV_AES_MODE_ECB) &&
  1615. (req->mode != QCEDEV_DES_MODE_ECB)) {
  1616. pr_err("%s: Expecting a non-zero ength IV\n", __func__);
  1617. goto error;
  1618. }
  1619. }
  1620. /* Check for sum of all dst length is equal to data_len */
  1621. for (i = 0, total = 0; i < req->entries; i++) {
  1622. if (!req->vbuf.dst[i].vaddr && req->vbuf.dst[i].len) {
  1623. pr_err("%s: NULL req dst vbuf[%d] with length %d\n",
  1624. __func__, i, req->vbuf.dst[i].len);
  1625. goto error;
  1626. }
  1627. if (req->vbuf.dst[i].len >= U32_MAX - total) {
  1628. pr_err("%s: Integer overflow on total req dst vbuf length\n",
  1629. __func__);
  1630. goto error;
  1631. }
  1632. total += req->vbuf.dst[i].len;
  1633. }
  1634. if (total != req->data_len) {
  1635. pr_err("%s: Total (i=%d) dst(%d) buf size != data_len (%d)\n",
  1636. __func__, i, total, req->data_len);
  1637. goto error;
  1638. }
  1639. /* Check for sum of all src length is equal to data_len */
  1640. for (i = 0, total = 0; i < req->entries; i++) {
  1641. if (!req->vbuf.src[i].vaddr && req->vbuf.src[i].len) {
  1642. pr_err("%s: NULL req src vbuf[%d] with length %d\n",
  1643. __func__, i, req->vbuf.src[i].len);
  1644. goto error;
  1645. }
  1646. if (req->vbuf.src[i].len > U32_MAX - total) {
  1647. pr_err("%s: Integer overflow on total req src vbuf length\n",
  1648. __func__);
  1649. goto error;
  1650. }
  1651. total += req->vbuf.src[i].len;
  1652. }
  1653. if (total != req->data_len) {
  1654. pr_err("%s: Total src(%d) buf size != data_len (%d)\n",
  1655. __func__, total, req->data_len);
  1656. goto error;
  1657. }
  1658. return 0;
  1659. error:
  1660. return -EINVAL;
  1661. }
  1662. static int qcedev_check_sha_params(struct qcedev_sha_op_req *req,
  1663. struct qcedev_control *podev)
  1664. {
  1665. uint32_t total = 0;
  1666. uint32_t i;
  1667. if ((req->alg == QCEDEV_ALG_AES_CMAC) &&
  1668. (!podev->ce_support.cmac)) {
  1669. pr_err("%s: CMAC not supported\n", __func__);
  1670. goto sha_error;
  1671. }
  1672. if ((!req->entries) || (req->entries > QCEDEV_MAX_BUFFERS)) {
  1673. pr_err("%s: Invalid num entries (%d)\n",
  1674. __func__, req->entries);
  1675. goto sha_error;
  1676. }
  1677. if (req->alg >= QCEDEV_ALG_SHA_ALG_LAST) {
  1678. pr_err("%s: Invalid algorithm (%d)\n", __func__, req->alg);
  1679. goto sha_error;
  1680. }
  1681. if ((req->alg == QCEDEV_ALG_SHA1_HMAC) ||
  1682. (req->alg == QCEDEV_ALG_SHA256_HMAC)) {
  1683. if (req->authkey == NULL) {
  1684. pr_err("%s: Invalid authkey pointer\n", __func__);
  1685. goto sha_error;
  1686. }
  1687. if (req->authklen <= 0) {
  1688. pr_err("%s: Invalid authkey length (%d)\n",
  1689. __func__, req->authklen);
  1690. goto sha_error;
  1691. }
  1692. }
  1693. if (req->alg == QCEDEV_ALG_AES_CMAC) {
  1694. if ((req->authklen != QCEDEV_AES_KEY_128) &&
  1695. (req->authklen != QCEDEV_AES_KEY_256)) {
  1696. pr_err("%s: unsupported key length\n", __func__);
  1697. goto sha_error;
  1698. }
  1699. }
  1700. /* Check for sum of all src length is equal to data_len */
  1701. for (i = 0, total = 0; i < req->entries; i++) {
  1702. if (req->data[i].len > U32_MAX - total) {
  1703. pr_err("%s: Integer overflow on total req buf length\n",
  1704. __func__);
  1705. goto sha_error;
  1706. }
  1707. total += req->data[i].len;
  1708. }
  1709. if (total != req->data_len) {
  1710. pr_err("%s: Total src(%d) buf size != data_len (%d)\n",
  1711. __func__, total, req->data_len);
  1712. goto sha_error;
  1713. }
  1714. return 0;
  1715. sha_error:
  1716. return -EINVAL;
  1717. }
  1718. static int qcedev_check_offload_cipher_key(struct qcedev_offload_cipher_op_req *req,
  1719. struct qcedev_control *podev)
  1720. {
  1721. if (req->encklen == 0)
  1722. return -EINVAL;
  1723. /* AES-192 is not a valid option for OFFLOAD use case */
  1724. if ((req->encklen != QCEDEV_AES_KEY_128) &&
  1725. (req->encklen != QCEDEV_AES_KEY_256)) {
  1726. pr_err("%s: unsupported key size %d\n",
  1727. __func__, req->encklen);
  1728. goto error;
  1729. }
  1730. return 0;
  1731. error:
  1732. return -EINVAL;
  1733. }
  1734. static int qcedev_check_offload_cipher_params(struct qcedev_offload_cipher_op_req *req,
  1735. struct qcedev_control *podev)
  1736. {
  1737. uint32_t total = 0;
  1738. int i = 0;
  1739. if ((req->entries == 0) || (req->data_len == 0) ||
  1740. (req->entries > QCEDEV_MAX_BUFFERS)) {
  1741. pr_err("%s: Invalid cipher length/entries\n", __func__);
  1742. goto error;
  1743. }
  1744. if ((req->alg != QCEDEV_ALG_AES) ||
  1745. (req->mode > QCEDEV_AES_MODE_CTR)) {
  1746. pr_err("%s: Invalid algorithm %d\n", __func__,
  1747. (uint32_t)req->alg);
  1748. goto error;
  1749. }
  1750. if (qcedev_check_offload_cipher_key(req, podev))
  1751. goto error;
  1752. if (req->block_offset >= AES_CE_BLOCK_SIZE)
  1753. goto error;
  1754. /* if using a byteoffset, make sure it is CTR mode using vbuf */
  1755. if (req->byteoffset) {
  1756. if (req->mode != QCEDEV_AES_MODE_CTR) {
  1757. pr_err("%s: Operation on byte offset not supported\n",
  1758. __func__);
  1759. goto error;
  1760. }
  1761. if (req->byteoffset >= AES_CE_BLOCK_SIZE) {
  1762. pr_err("%s: Invalid byte offset\n", __func__);
  1763. goto error;
  1764. }
  1765. total = req->byteoffset;
  1766. for (i = 0; i < req->entries; i++) {
  1767. if (total > U32_MAX - req->vbuf.src[i].len) {
  1768. pr_err("%s:Int overflow on total src len\n",
  1769. __func__);
  1770. goto error;
  1771. }
  1772. total += req->vbuf.src[i].len;
  1773. }
  1774. }
  1775. if (req->data_len < req->byteoffset) {
  1776. pr_err("%s: req data length %u is less than byteoffset %u\n",
  1777. __func__, req->data_len, req->byteoffset);
  1778. goto error;
  1779. }
  1780. /* Ensure IV size */
  1781. if (req->ivlen > QCEDEV_MAX_IV_SIZE) {
  1782. pr_err("%s: ivlen is not correct: %u\n", __func__, req->ivlen);
  1783. goto error;
  1784. }
  1785. /* Ensure Key size */
  1786. if (req->encklen > QCEDEV_MAX_KEY_SIZE) {
  1787. pr_err("%s: Klen is not correct: %u\n", __func__,
  1788. req->encklen);
  1789. goto error;
  1790. }
  1791. /* Check for sum of all dst length is equal to data_len */
  1792. for (i = 0, total = 0; i < req->entries; i++) {
  1793. if (!req->vbuf.dst[i].vaddr && req->vbuf.dst[i].len) {
  1794. pr_err("%s: NULL req dst vbuf[%d] with length %d\n",
  1795. __func__, i, req->vbuf.dst[i].len);
  1796. goto error;
  1797. }
  1798. if (req->vbuf.dst[i].len >= U32_MAX - total) {
  1799. pr_err("%s: Int overflow on total req dst vbuf len\n",
  1800. __func__);
  1801. goto error;
  1802. }
  1803. total += req->vbuf.dst[i].len;
  1804. }
  1805. if (total != req->data_len) {
  1806. pr_err("%s: Total (i=%d) dst(%d) buf size != data_len (%d)\n",
  1807. __func__, i, total, req->data_len);
  1808. goto error;
  1809. }
  1810. /* Check for sum of all src length is equal to data_len */
  1811. for (i = 0, total = 0; i < req->entries; i++) {
  1812. if (!req->vbuf.src[i].vaddr && req->vbuf.src[i].len) {
  1813. pr_err("%s: NULL req src vbuf[%d] with length %d\n",
  1814. __func__, i, req->vbuf.src[i].len);
  1815. goto error;
  1816. }
  1817. if (req->vbuf.src[i].len > U32_MAX - total) {
  1818. pr_err("%s: Int overflow on total req src vbuf len\n",
  1819. __func__);
  1820. goto error;
  1821. }
  1822. total += req->vbuf.src[i].len;
  1823. }
  1824. if (total != req->data_len) {
  1825. pr_err("%s: Total src(%d) buf size != data_len (%d)\n",
  1826. __func__, total, req->data_len);
  1827. goto error;
  1828. }
  1829. return 0;
  1830. error:
  1831. return -EINVAL;
  1832. }
  1833. long qcedev_ioctl(struct file *file,
  1834. unsigned int cmd, unsigned long arg)
  1835. {
  1836. int err = 0;
  1837. struct qcedev_handle *handle;
  1838. struct qcedev_control *podev;
  1839. struct qcedev_async_req *qcedev_areq;
  1840. struct qcedev_stat *pstat;
  1841. qcedev_areq = kzalloc(sizeof(struct qcedev_async_req), GFP_KERNEL);
  1842. if (!qcedev_areq)
  1843. return -ENOMEM;
  1844. handle = file->private_data;
  1845. podev = handle->cntl;
  1846. qcedev_areq->handle = handle;
  1847. if (podev == NULL || podev->magic != QCEDEV_MAGIC) {
  1848. pr_err("%s: invalid handle %pK\n",
  1849. __func__, podev);
  1850. err = -ENOENT;
  1851. goto exit_free_qcedev_areq;
  1852. }
  1853. /* Verify user arguments. */
  1854. if (_IOC_TYPE(cmd) != QCEDEV_IOC_MAGIC) {
  1855. err = -ENOTTY;
  1856. goto exit_free_qcedev_areq;
  1857. }
  1858. init_completion(&qcedev_areq->complete);
  1859. pstat = &_qcedev_stat;
  1860. switch (cmd) {
  1861. case QCEDEV_IOCTL_ENC_REQ:
  1862. case QCEDEV_IOCTL_DEC_REQ:
  1863. if (copy_from_user(&qcedev_areq->cipher_op_req,
  1864. (void __user *)arg,
  1865. sizeof(struct qcedev_cipher_op_req))) {
  1866. err = -EFAULT;
  1867. goto exit_free_qcedev_areq;
  1868. }
  1869. qcedev_areq->op_type = QCEDEV_CRYPTO_OPER_CIPHER;
  1870. if (qcedev_check_cipher_params(&qcedev_areq->cipher_op_req,
  1871. podev)) {
  1872. err = -EINVAL;
  1873. goto exit_free_qcedev_areq;
  1874. }
  1875. err = qcedev_vbuf_ablk_cipher(qcedev_areq, handle);
  1876. if (err)
  1877. goto exit_free_qcedev_areq;
  1878. if (copy_to_user((void __user *)arg,
  1879. &qcedev_areq->cipher_op_req,
  1880. sizeof(struct qcedev_cipher_op_req))) {
  1881. err = -EFAULT;
  1882. goto exit_free_qcedev_areq;
  1883. }
  1884. break;
  1885. case QCEDEV_IOCTL_OFFLOAD_OP_REQ:
  1886. if (copy_from_user(&qcedev_areq->offload_cipher_op_req,
  1887. (void __user *)arg,
  1888. sizeof(struct qcedev_offload_cipher_op_req))) {
  1889. err = -EFAULT;
  1890. goto exit_free_qcedev_areq;
  1891. }
  1892. qcedev_areq->op_type = QCEDEV_CRYPTO_OPER_OFFLOAD_CIPHER;
  1893. if (qcedev_check_offload_cipher_params(
  1894. &qcedev_areq->offload_cipher_op_req, podev)) {
  1895. err = -EINVAL;
  1896. goto exit_free_qcedev_areq;
  1897. }
  1898. qcedev_areq->offload_cipher_op_req.err = QCEDEV_OFFLOAD_NO_ERROR;
  1899. err = qcedev_smmu_ablk_offload_cipher(qcedev_areq, handle);
  1900. if (err)
  1901. goto exit_free_qcedev_areq;
  1902. if (copy_to_user((void __user *)arg,
  1903. &qcedev_areq->offload_cipher_op_req,
  1904. sizeof(struct qcedev_offload_cipher_op_req))) {
  1905. err = -EFAULT;
  1906. goto exit_free_qcedev_areq;
  1907. }
  1908. break;
  1909. case QCEDEV_IOCTL_SHA_INIT_REQ:
  1910. {
  1911. struct scatterlist sg_src;
  1912. if (copy_from_user(&qcedev_areq->sha_op_req,
  1913. (void __user *)arg,
  1914. sizeof(struct qcedev_sha_op_req))) {
  1915. err = -EFAULT;
  1916. goto exit_free_qcedev_areq;
  1917. }
  1918. mutex_lock(&hash_access_lock);
  1919. if (qcedev_check_sha_params(&qcedev_areq->sha_op_req, podev)) {
  1920. mutex_unlock(&hash_access_lock);
  1921. err = -EINVAL;
  1922. goto exit_free_qcedev_areq;
  1923. }
  1924. qcedev_areq->op_type = QCEDEV_CRYPTO_OPER_SHA;
  1925. err = qcedev_hash_init(qcedev_areq, handle, &sg_src);
  1926. if (err) {
  1927. mutex_unlock(&hash_access_lock);
  1928. goto exit_free_qcedev_areq;
  1929. }
  1930. mutex_unlock(&hash_access_lock);
  1931. if (copy_to_user((void __user *)arg, &qcedev_areq->sha_op_req,
  1932. sizeof(struct qcedev_sha_op_req))) {
  1933. err = -EFAULT;
  1934. goto exit_free_qcedev_areq;
  1935. }
  1936. handle->sha_ctxt.init_done = true;
  1937. }
  1938. break;
  1939. case QCEDEV_IOCTL_GET_CMAC_REQ:
  1940. if (!podev->ce_support.cmac) {
  1941. err = -ENOTTY;
  1942. goto exit_free_qcedev_areq;
  1943. }
  1944. fallthrough;
  1945. case QCEDEV_IOCTL_SHA_UPDATE_REQ:
  1946. {
  1947. struct scatterlist sg_src;
  1948. if (copy_from_user(&qcedev_areq->sha_op_req,
  1949. (void __user *)arg,
  1950. sizeof(struct qcedev_sha_op_req))) {
  1951. err = -EFAULT;
  1952. goto exit_free_qcedev_areq;
  1953. }
  1954. mutex_lock(&hash_access_lock);
  1955. if (qcedev_check_sha_params(&qcedev_areq->sha_op_req, podev)) {
  1956. mutex_unlock(&hash_access_lock);
  1957. err = -EINVAL;
  1958. goto exit_free_qcedev_areq;
  1959. }
  1960. qcedev_areq->op_type = QCEDEV_CRYPTO_OPER_SHA;
  1961. if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_AES_CMAC) {
  1962. err = qcedev_hash_cmac(qcedev_areq, handle, &sg_src);
  1963. if (err) {
  1964. mutex_unlock(&hash_access_lock);
  1965. goto exit_free_qcedev_areq;
  1966. }
  1967. } else {
  1968. if (!handle->sha_ctxt.init_done) {
  1969. pr_err("%s Init was not called\n", __func__);
  1970. mutex_unlock(&hash_access_lock);
  1971. err = -EINVAL;
  1972. goto exit_free_qcedev_areq;
  1973. }
  1974. err = qcedev_hash_update(qcedev_areq, handle, &sg_src);
  1975. if (err) {
  1976. mutex_unlock(&hash_access_lock);
  1977. goto exit_free_qcedev_areq;
  1978. }
  1979. }
  1980. if (handle->sha_ctxt.diglen > QCEDEV_MAX_SHA_DIGEST) {
  1981. pr_err("Invalid sha_ctxt.diglen %d\n",
  1982. handle->sha_ctxt.diglen);
  1983. mutex_unlock(&hash_access_lock);
  1984. err = -EINVAL;
  1985. goto exit_free_qcedev_areq;
  1986. }
  1987. memcpy(&qcedev_areq->sha_op_req.digest[0],
  1988. &handle->sha_ctxt.digest[0],
  1989. handle->sha_ctxt.diglen);
  1990. mutex_unlock(&hash_access_lock);
  1991. if (copy_to_user((void __user *)arg, &qcedev_areq->sha_op_req,
  1992. sizeof(struct qcedev_sha_op_req))) {
  1993. err = -EFAULT;
  1994. goto exit_free_qcedev_areq;
  1995. }
  1996. }
  1997. break;
  1998. case QCEDEV_IOCTL_SHA_FINAL_REQ:
  1999. if (!handle->sha_ctxt.init_done) {
  2000. pr_err("%s Init was not called\n", __func__);
  2001. err = -EINVAL;
  2002. goto exit_free_qcedev_areq;
  2003. }
  2004. if (copy_from_user(&qcedev_areq->sha_op_req,
  2005. (void __user *)arg,
  2006. sizeof(struct qcedev_sha_op_req))) {
  2007. err = -EFAULT;
  2008. goto exit_free_qcedev_areq;
  2009. }
  2010. mutex_lock(&hash_access_lock);
  2011. if (qcedev_check_sha_params(&qcedev_areq->sha_op_req, podev)) {
  2012. mutex_unlock(&hash_access_lock);
  2013. err = -EINVAL;
  2014. goto exit_free_qcedev_areq;
  2015. }
  2016. qcedev_areq->op_type = QCEDEV_CRYPTO_OPER_SHA;
  2017. err = qcedev_hash_final(qcedev_areq, handle);
  2018. if (err) {
  2019. mutex_unlock(&hash_access_lock);
  2020. goto exit_free_qcedev_areq;
  2021. }
  2022. if (handle->sha_ctxt.diglen > QCEDEV_MAX_SHA_DIGEST) {
  2023. pr_err("Invalid sha_ctxt.diglen %d\n",
  2024. handle->sha_ctxt.diglen);
  2025. mutex_unlock(&hash_access_lock);
  2026. err = -EINVAL;
  2027. goto exit_free_qcedev_areq;
  2028. }
  2029. qcedev_areq->sha_op_req.diglen = handle->sha_ctxt.diglen;
  2030. memcpy(&qcedev_areq->sha_op_req.digest[0],
  2031. &handle->sha_ctxt.digest[0],
  2032. handle->sha_ctxt.diglen);
  2033. mutex_unlock(&hash_access_lock);
  2034. if (copy_to_user((void __user *)arg, &qcedev_areq->sha_op_req,
  2035. sizeof(struct qcedev_sha_op_req))) {
  2036. err = -EFAULT;
  2037. goto exit_free_qcedev_areq;
  2038. }
  2039. handle->sha_ctxt.init_done = false;
  2040. break;
  2041. case QCEDEV_IOCTL_GET_SHA_REQ:
  2042. {
  2043. struct scatterlist sg_src;
  2044. if (copy_from_user(&qcedev_areq->sha_op_req,
  2045. (void __user *)arg,
  2046. sizeof(struct qcedev_sha_op_req))) {
  2047. err = -EFAULT;
  2048. goto exit_free_qcedev_areq;
  2049. }
  2050. mutex_lock(&hash_access_lock);
  2051. if (qcedev_check_sha_params(&qcedev_areq->sha_op_req, podev)) {
  2052. mutex_unlock(&hash_access_lock);
  2053. err = -EINVAL;
  2054. goto exit_free_qcedev_areq;
  2055. }
  2056. qcedev_areq->op_type = QCEDEV_CRYPTO_OPER_SHA;
  2057. qcedev_hash_init(qcedev_areq, handle, &sg_src);
  2058. err = qcedev_hash_update(qcedev_areq, handle, &sg_src);
  2059. if (err) {
  2060. mutex_unlock(&hash_access_lock);
  2061. goto exit_free_qcedev_areq;
  2062. }
  2063. err = qcedev_hash_final(qcedev_areq, handle);
  2064. if (err) {
  2065. mutex_unlock(&hash_access_lock);
  2066. goto exit_free_qcedev_areq;
  2067. }
  2068. if (handle->sha_ctxt.diglen > QCEDEV_MAX_SHA_DIGEST) {
  2069. pr_err("Invalid sha_ctxt.diglen %d\n",
  2070. handle->sha_ctxt.diglen);
  2071. mutex_unlock(&hash_access_lock);
  2072. err = -EINVAL;
  2073. goto exit_free_qcedev_areq;
  2074. }
  2075. qcedev_areq->sha_op_req.diglen = handle->sha_ctxt.diglen;
  2076. memcpy(&qcedev_areq->sha_op_req.digest[0],
  2077. &handle->sha_ctxt.digest[0],
  2078. handle->sha_ctxt.diglen);
  2079. mutex_unlock(&hash_access_lock);
  2080. if (copy_to_user((void __user *)arg, &qcedev_areq->sha_op_req,
  2081. sizeof(struct qcedev_sha_op_req))) {
  2082. err = -EFAULT;
  2083. goto exit_free_qcedev_areq;
  2084. }
  2085. }
  2086. break;
  2087. case QCEDEV_IOCTL_MAP_BUF_REQ:
  2088. {
  2089. unsigned long long vaddr = 0;
  2090. struct qcedev_map_buf_req map_buf = { {0} };
  2091. int i = 0;
  2092. if (copy_from_user(&map_buf,
  2093. (void __user *)arg, sizeof(map_buf))) {
  2094. err = -EFAULT;
  2095. goto exit_free_qcedev_areq;
  2096. }
  2097. if (map_buf.num_fds > ARRAY_SIZE(map_buf.fd)) {
  2098. pr_err("%s: err: num_fds = %d exceeds max value\n",
  2099. __func__, map_buf.num_fds);
  2100. err = -EINVAL;
  2101. goto exit_free_qcedev_areq;
  2102. }
  2103. for (i = 0; i < map_buf.num_fds; i++) {
  2104. err = qcedev_check_and_map_buffer(handle,
  2105. map_buf.fd[i],
  2106. map_buf.fd_offset[i],
  2107. map_buf.fd_size[i],
  2108. &vaddr);
  2109. if (err) {
  2110. pr_err(
  2111. "%s: err: failed to map fd(%d) - %d\n",
  2112. __func__, map_buf.fd[i], err);
  2113. goto exit_free_qcedev_areq;
  2114. }
  2115. map_buf.buf_vaddr[i] = vaddr;
  2116. pr_info("%s: info: vaddr = %llx\n, fd = %d",
  2117. __func__, vaddr, map_buf.fd[i]);
  2118. }
  2119. if (copy_to_user((void __user *)arg, &map_buf,
  2120. sizeof(map_buf))) {
  2121. err = -EFAULT;
  2122. goto exit_free_qcedev_areq;
  2123. }
  2124. break;
  2125. }
  2126. case QCEDEV_IOCTL_UNMAP_BUF_REQ:
  2127. {
  2128. struct qcedev_unmap_buf_req unmap_buf = { { 0 } };
  2129. int i = 0;
  2130. if (copy_from_user(&unmap_buf,
  2131. (void __user *)arg, sizeof(unmap_buf))) {
  2132. err = -EFAULT;
  2133. goto exit_free_qcedev_areq;
  2134. }
  2135. if (unmap_buf.num_fds > ARRAY_SIZE(unmap_buf.fd)) {
  2136. pr_err("%s: err: num_fds = %d exceeds max value\n",
  2137. __func__, unmap_buf.num_fds);
  2138. err = -EINVAL;
  2139. goto exit_free_qcedev_areq;
  2140. }
  2141. for (i = 0; i < unmap_buf.num_fds; i++) {
  2142. err = qcedev_check_and_unmap_buffer(handle,
  2143. unmap_buf.fd[i]);
  2144. if (err) {
  2145. pr_err(
  2146. "%s: err: failed to unmap fd(%d) - %d\n",
  2147. __func__,
  2148. unmap_buf.fd[i], err);
  2149. goto exit_free_qcedev_areq;
  2150. }
  2151. }
  2152. break;
  2153. }
  2154. default:
  2155. err = -ENOTTY;
  2156. goto exit_free_qcedev_areq;
  2157. }
  2158. exit_free_qcedev_areq:
  2159. kfree(qcedev_areq);
  2160. return err;
  2161. }
  2162. static int qcedev_probe_device(struct platform_device *pdev)
  2163. {
  2164. void *handle = NULL;
  2165. int rc = 0;
  2166. struct qcedev_control *podev;
  2167. struct msm_ce_hw_support *platform_support;
  2168. podev = &qce_dev[0];
  2169. rc = alloc_chrdev_region(&qcedev_device_no, 0, 1, QCEDEV_DEV);
  2170. if (rc < 0) {
  2171. pr_err("alloc_chrdev_region failed %d\n", rc);
  2172. return rc;
  2173. }
  2174. driver_class = class_create(THIS_MODULE, QCEDEV_DEV);
  2175. if (IS_ERR(driver_class)) {
  2176. rc = -ENOMEM;
  2177. pr_err("class_create failed %d\n", rc);
  2178. goto exit_unreg_chrdev_region;
  2179. }
  2180. class_dev = device_create(driver_class, NULL, qcedev_device_no, NULL,
  2181. QCEDEV_DEV);
  2182. if (IS_ERR(class_dev)) {
  2183. pr_err("class_device_create failed %d\n", rc);
  2184. rc = -ENOMEM;
  2185. goto exit_destroy_class;
  2186. }
  2187. cdev_init(&podev->cdev, &qcedev_fops);
  2188. podev->cdev.owner = THIS_MODULE;
  2189. rc = cdev_add(&podev->cdev, MKDEV(MAJOR(qcedev_device_no), 0), 1);
  2190. if (rc < 0) {
  2191. pr_err("cdev_add failed %d\n", rc);
  2192. goto exit_destroy_device;
  2193. }
  2194. podev->minor = 0;
  2195. podev->high_bw_req_count = 0;
  2196. INIT_LIST_HEAD(&podev->ready_commands);
  2197. podev->active_command = NULL;
  2198. INIT_LIST_HEAD(&podev->context_banks);
  2199. spin_lock_init(&podev->lock);
  2200. tasklet_init(&podev->done_tasklet, req_done, (unsigned long)podev);
  2201. podev->icc_path = of_icc_get(&pdev->dev, "data_path");
  2202. if (IS_ERR(podev->icc_path)) {
  2203. rc = PTR_ERR(podev->icc_path);
  2204. pr_err("%s Failed to get icc path with error %d\n",
  2205. __func__, rc);
  2206. goto exit_del_cdev;
  2207. }
  2208. /*
  2209. * HLOS crypto vote values from DTSI. If no values specified, use
  2210. * nominal values.
  2211. */
  2212. if (of_property_read_u32((&pdev->dev)->of_node,
  2213. "qcom,icc_avg_bw",
  2214. &podev->icc_avg_bw)) {
  2215. pr_warn("%s: No icc avg BW set, using default\n", __func__);
  2216. podev->icc_avg_bw = CRYPTO_AVG_BW;
  2217. }
  2218. if (of_property_read_u32((&pdev->dev)->of_node,
  2219. "qcom,icc_peak_bw",
  2220. &podev->icc_peak_bw)) {
  2221. pr_warn("%s: No icc peak BW set, using default\n", __func__);
  2222. podev->icc_peak_bw = CRYPTO_PEAK_BW;
  2223. }
  2224. rc = icc_set_bw(podev->icc_path, podev->icc_avg_bw,
  2225. podev->icc_peak_bw);
  2226. if (rc) {
  2227. pr_err("%s Unable to set high bandwidth\n", __func__);
  2228. goto exit_unregister_bus_scale;
  2229. }
  2230. handle = qce_open(pdev, &rc);
  2231. if (handle == NULL) {
  2232. rc = -ENODEV;
  2233. goto exit_scale_busbandwidth;
  2234. }
  2235. rc = icc_set_bw(podev->icc_path, 0, 0);
  2236. if (rc) {
  2237. pr_err("%s Unable to set to low bandwidth\n", __func__);
  2238. goto exit_qce_close;
  2239. }
  2240. podev->qce = handle;
  2241. podev->pdev = pdev;
  2242. platform_set_drvdata(pdev, podev);
  2243. qce_hw_support(podev->qce, &podev->ce_support);
  2244. if (podev->ce_support.bam) {
  2245. podev->platform_support.ce_shared = 0;
  2246. podev->platform_support.shared_ce_resource = 0;
  2247. podev->platform_support.hw_key_support =
  2248. podev->ce_support.hw_key;
  2249. podev->platform_support.sha_hmac = 1;
  2250. } else {
  2251. platform_support =
  2252. (struct msm_ce_hw_support *)pdev->dev.platform_data;
  2253. podev->platform_support.ce_shared = platform_support->ce_shared;
  2254. podev->platform_support.shared_ce_resource =
  2255. platform_support->shared_ce_resource;
  2256. podev->platform_support.hw_key_support =
  2257. platform_support->hw_key_support;
  2258. podev->platform_support.sha_hmac = platform_support->sha_hmac;
  2259. }
  2260. podev->mem_client = qcedev_mem_new_client(MEM_ION);
  2261. if (!podev->mem_client) {
  2262. pr_err("%s: err: qcedev_mem_new_client failed\n", __func__);
  2263. goto exit_qce_close;
  2264. }
  2265. rc = of_platform_populate(pdev->dev.of_node, qcedev_match,
  2266. NULL, &pdev->dev);
  2267. if (rc) {
  2268. pr_err("%s: err: of_platform_populate failed: %d\n",
  2269. __func__, rc);
  2270. goto exit_mem_new_client;
  2271. }
  2272. return 0;
  2273. exit_mem_new_client:
  2274. if (podev->mem_client)
  2275. qcedev_mem_delete_client(podev->mem_client);
  2276. podev->mem_client = NULL;
  2277. exit_qce_close:
  2278. if (handle)
  2279. qce_close(handle);
  2280. exit_scale_busbandwidth:
  2281. icc_set_bw(podev->icc_path, 0, 0);
  2282. exit_unregister_bus_scale:
  2283. if (podev->icc_path)
  2284. icc_put(podev->icc_path);
  2285. exit_del_cdev:
  2286. cdev_del(&podev->cdev);
  2287. exit_destroy_device:
  2288. device_destroy(driver_class, qcedev_device_no);
  2289. exit_destroy_class:
  2290. class_destroy(driver_class);
  2291. exit_unreg_chrdev_region:
  2292. unregister_chrdev_region(qcedev_device_no, 1);
  2293. podev->icc_path = NULL;
  2294. platform_set_drvdata(pdev, NULL);
  2295. podev->pdev = NULL;
  2296. podev->qce = NULL;
  2297. return rc;
  2298. }
  2299. static int qcedev_probe(struct platform_device *pdev)
  2300. {
  2301. if (of_device_is_compatible(pdev->dev.of_node, "qcom,qcedev"))
  2302. return qcedev_probe_device(pdev);
  2303. else if (of_device_is_compatible(pdev->dev.of_node,
  2304. "qcom,qcedev,context-bank"))
  2305. return qcedev_parse_context_bank(pdev);
  2306. return -EINVAL;
  2307. };
  2308. static int qcedev_remove(struct platform_device *pdev)
  2309. {
  2310. struct qcedev_control *podev;
  2311. podev = platform_get_drvdata(pdev);
  2312. if (!podev)
  2313. return 0;
  2314. if (podev->qce)
  2315. qce_close(podev->qce);
  2316. if (podev->icc_path)
  2317. icc_put(podev->icc_path);
  2318. tasklet_kill(&podev->done_tasklet);
  2319. cdev_del(&podev->cdev);
  2320. device_destroy(driver_class, qcedev_device_no);
  2321. class_destroy(driver_class);
  2322. unregister_chrdev_region(qcedev_device_no, 1);
  2323. return 0;
  2324. };
  2325. static int qcedev_suspend(struct platform_device *pdev, pm_message_t state)
  2326. {
  2327. struct qcedev_control *podev;
  2328. int ret;
  2329. podev = platform_get_drvdata(pdev);
  2330. if (!podev)
  2331. return 0;
  2332. mutex_lock(&qcedev_sent_bw_req);
  2333. if (podev->high_bw_req_count) {
  2334. ret = qcedev_control_clocks(podev, false);
  2335. if (ret)
  2336. goto suspend_exit;
  2337. }
  2338. suspend_exit:
  2339. mutex_unlock(&qcedev_sent_bw_req);
  2340. return 0;
  2341. }
  2342. static int qcedev_resume(struct platform_device *pdev)
  2343. {
  2344. struct qcedev_control *podev;
  2345. int ret;
  2346. podev = platform_get_drvdata(pdev);
  2347. if (!podev)
  2348. return 0;
  2349. mutex_lock(&qcedev_sent_bw_req);
  2350. if (podev->high_bw_req_count) {
  2351. ret = qcedev_control_clocks(podev, true);
  2352. if (ret)
  2353. goto resume_exit;
  2354. }
  2355. resume_exit:
  2356. mutex_unlock(&qcedev_sent_bw_req);
  2357. return 0;
  2358. }
  2359. static struct platform_driver qcedev_plat_driver = {
  2360. .probe = qcedev_probe,
  2361. .remove = qcedev_remove,
  2362. .suspend = qcedev_suspend,
  2363. .resume = qcedev_resume,
  2364. .driver = {
  2365. .name = "qce",
  2366. .of_match_table = qcedev_match,
  2367. },
  2368. };
  2369. static int _disp_stats(int id)
  2370. {
  2371. struct qcedev_stat *pstat;
  2372. int len = 0;
  2373. pstat = &_qcedev_stat;
  2374. len = scnprintf(_debug_read_buf, DEBUG_MAX_RW_BUF - 1,
  2375. "\nQTI QCE dev driver %d Statistics:\n",
  2376. id + 1);
  2377. len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
  2378. " Encryption operation success : %d\n",
  2379. pstat->qcedev_enc_success);
  2380. len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
  2381. " Encryption operation fail : %d\n",
  2382. pstat->qcedev_enc_fail);
  2383. len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
  2384. " Decryption operation success : %d\n",
  2385. pstat->qcedev_dec_success);
  2386. len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
  2387. " Encryption operation fail : %d\n",
  2388. pstat->qcedev_dec_fail);
  2389. return len;
  2390. }
  2391. static ssize_t _debug_stats_read(struct file *file, char __user *buf,
  2392. size_t count, loff_t *ppos)
  2393. {
  2394. ssize_t rc = -EINVAL;
  2395. int qcedev = *((int *) file->private_data);
  2396. int len;
  2397. len = _disp_stats(qcedev);
  2398. if (len <= count)
  2399. rc = simple_read_from_buffer((void __user *) buf, len,
  2400. ppos, (void *) _debug_read_buf, len);
  2401. return rc;
  2402. }
  2403. static ssize_t _debug_stats_write(struct file *file, const char __user *buf,
  2404. size_t count, loff_t *ppos)
  2405. {
  2406. memset((char *)&_qcedev_stat, 0, sizeof(struct qcedev_stat));
  2407. return count;
  2408. };
  2409. static const struct file_operations _debug_stats_ops = {
  2410. .open = simple_open,
  2411. .read = _debug_stats_read,
  2412. .write = _debug_stats_write,
  2413. };
  2414. static int _qcedev_debug_init(void)
  2415. {
  2416. int rc;
  2417. char name[DEBUG_MAX_FNAME];
  2418. struct dentry *dent;
  2419. _debug_dent = debugfs_create_dir("qcedev", NULL);
  2420. if (IS_ERR(_debug_dent)) {
  2421. pr_debug("qcedev debugfs_create_dir fail, error %ld\n",
  2422. PTR_ERR(_debug_dent));
  2423. return PTR_ERR(_debug_dent);
  2424. }
  2425. snprintf(name, DEBUG_MAX_FNAME-1, "stats-%d", 1);
  2426. _debug_qcedev = 0;
  2427. dent = debugfs_create_file(name, 0644, _debug_dent,
  2428. &_debug_qcedev, &_debug_stats_ops);
  2429. if (dent == NULL) {
  2430. pr_debug("qcedev debugfs_create_file fail, error %ld\n",
  2431. PTR_ERR(dent));
  2432. rc = PTR_ERR(dent);
  2433. goto err;
  2434. }
  2435. return 0;
  2436. err:
  2437. debugfs_remove_recursive(_debug_dent);
  2438. return rc;
  2439. }
  2440. static int qcedev_init(void)
  2441. {
  2442. _qcedev_debug_init();
  2443. return platform_driver_register(&qcedev_plat_driver);
  2444. }
  2445. static void qcedev_exit(void)
  2446. {
  2447. debugfs_remove_recursive(_debug_dent);
  2448. platform_driver_unregister(&qcedev_plat_driver);
  2449. }
  2450. MODULE_LICENSE("GPL v2");
  2451. MODULE_DESCRIPTION("QTI DEV Crypto driver");
  2452. MODULE_IMPORT_NS(DMA_BUF);
  2453. module_init(qcedev_init);
  2454. module_exit(qcedev_exit);