af_alg.c 27 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * af_alg: User-space algorithm interface
  4. *
  5. * This file provides the user-space API for algorithms.
  6. *
  7. * Copyright (c) 2010 Herbert Xu <[email protected]>
  8. */
  9. #include <linux/atomic.h>
  10. #include <crypto/if_alg.h>
  11. #include <linux/crypto.h>
  12. #include <linux/init.h>
  13. #include <linux/kernel.h>
  14. #include <linux/list.h>
  15. #include <linux/module.h>
  16. #include <linux/net.h>
  17. #include <linux/rwsem.h>
  18. #include <linux/sched.h>
  19. #include <linux/sched/signal.h>
  20. #include <linux/security.h>
  21. struct alg_type_list {
  22. const struct af_alg_type *type;
  23. struct list_head list;
  24. };
  25. static struct proto alg_proto = {
  26. .name = "ALG",
  27. .owner = THIS_MODULE,
  28. .obj_size = sizeof(struct alg_sock),
  29. };
  30. static LIST_HEAD(alg_types);
  31. static DECLARE_RWSEM(alg_types_sem);
  32. static const struct af_alg_type *alg_get_type(const char *name)
  33. {
  34. const struct af_alg_type *type = ERR_PTR(-ENOENT);
  35. struct alg_type_list *node;
  36. down_read(&alg_types_sem);
  37. list_for_each_entry(node, &alg_types, list) {
  38. if (strcmp(node->type->name, name))
  39. continue;
  40. if (try_module_get(node->type->owner))
  41. type = node->type;
  42. break;
  43. }
  44. up_read(&alg_types_sem);
  45. return type;
  46. }
  47. int af_alg_register_type(const struct af_alg_type *type)
  48. {
  49. struct alg_type_list *node;
  50. int err = -EEXIST;
  51. down_write(&alg_types_sem);
  52. list_for_each_entry(node, &alg_types, list) {
  53. if (!strcmp(node->type->name, type->name))
  54. goto unlock;
  55. }
  56. node = kmalloc(sizeof(*node), GFP_KERNEL);
  57. err = -ENOMEM;
  58. if (!node)
  59. goto unlock;
  60. type->ops->owner = THIS_MODULE;
  61. if (type->ops_nokey)
  62. type->ops_nokey->owner = THIS_MODULE;
  63. node->type = type;
  64. list_add(&node->list, &alg_types);
  65. err = 0;
  66. unlock:
  67. up_write(&alg_types_sem);
  68. return err;
  69. }
  70. EXPORT_SYMBOL_GPL(af_alg_register_type);
  71. int af_alg_unregister_type(const struct af_alg_type *type)
  72. {
  73. struct alg_type_list *node;
  74. int err = -ENOENT;
  75. down_write(&alg_types_sem);
  76. list_for_each_entry(node, &alg_types, list) {
  77. if (strcmp(node->type->name, type->name))
  78. continue;
  79. list_del(&node->list);
  80. kfree(node);
  81. err = 0;
  82. break;
  83. }
  84. up_write(&alg_types_sem);
  85. return err;
  86. }
  87. EXPORT_SYMBOL_GPL(af_alg_unregister_type);
  88. static void alg_do_release(const struct af_alg_type *type, void *private)
  89. {
  90. if (!type)
  91. return;
  92. type->release(private);
  93. module_put(type->owner);
  94. }
  95. int af_alg_release(struct socket *sock)
  96. {
  97. if (sock->sk) {
  98. sock_put(sock->sk);
  99. sock->sk = NULL;
  100. }
  101. return 0;
  102. }
  103. EXPORT_SYMBOL_GPL(af_alg_release);
  104. void af_alg_release_parent(struct sock *sk)
  105. {
  106. struct alg_sock *ask = alg_sk(sk);
  107. unsigned int nokey = atomic_read(&ask->nokey_refcnt);
  108. sk = ask->parent;
  109. ask = alg_sk(sk);
  110. if (nokey)
  111. atomic_dec(&ask->nokey_refcnt);
  112. if (atomic_dec_and_test(&ask->refcnt))
  113. sock_put(sk);
  114. }
  115. EXPORT_SYMBOL_GPL(af_alg_release_parent);
  116. static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
  117. {
  118. const u32 allowed = CRYPTO_ALG_KERN_DRIVER_ONLY;
  119. struct sock *sk = sock->sk;
  120. struct alg_sock *ask = alg_sk(sk);
  121. struct sockaddr_alg_new *sa = (void *)uaddr;
  122. const struct af_alg_type *type;
  123. void *private;
  124. int err;
  125. if (sock->state == SS_CONNECTED)
  126. return -EINVAL;
  127. BUILD_BUG_ON(offsetof(struct sockaddr_alg_new, salg_name) !=
  128. offsetof(struct sockaddr_alg, salg_name));
  129. BUILD_BUG_ON(offsetof(struct sockaddr_alg, salg_name) != sizeof(*sa));
  130. if (addr_len < sizeof(*sa) + 1)
  131. return -EINVAL;
  132. /* If caller uses non-allowed flag, return error. */
  133. if ((sa->salg_feat & ~allowed) || (sa->salg_mask & ~allowed))
  134. return -EINVAL;
  135. sa->salg_type[sizeof(sa->salg_type) - 1] = 0;
  136. sa->salg_name[addr_len - sizeof(*sa) - 1] = 0;
  137. type = alg_get_type(sa->salg_type);
  138. if (PTR_ERR(type) == -ENOENT) {
  139. request_module("algif-%s", sa->salg_type);
  140. type = alg_get_type(sa->salg_type);
  141. }
  142. if (IS_ERR(type))
  143. return PTR_ERR(type);
  144. private = type->bind(sa->salg_name, sa->salg_feat, sa->salg_mask);
  145. if (IS_ERR(private)) {
  146. module_put(type->owner);
  147. return PTR_ERR(private);
  148. }
  149. err = -EBUSY;
  150. lock_sock(sk);
  151. if (atomic_read(&ask->refcnt))
  152. goto unlock;
  153. swap(ask->type, type);
  154. swap(ask->private, private);
  155. err = 0;
  156. unlock:
  157. release_sock(sk);
  158. alg_do_release(type, private);
  159. return err;
  160. }
  161. static int alg_setkey(struct sock *sk, sockptr_t ukey, unsigned int keylen)
  162. {
  163. struct alg_sock *ask = alg_sk(sk);
  164. const struct af_alg_type *type = ask->type;
  165. u8 *key;
  166. int err;
  167. key = sock_kmalloc(sk, keylen, GFP_KERNEL);
  168. if (!key)
  169. return -ENOMEM;
  170. err = -EFAULT;
  171. if (copy_from_sockptr(key, ukey, keylen))
  172. goto out;
  173. err = type->setkey(ask->private, key, keylen);
  174. out:
  175. sock_kzfree_s(sk, key, keylen);
  176. return err;
  177. }
  178. static int alg_setsockopt(struct socket *sock, int level, int optname,
  179. sockptr_t optval, unsigned int optlen)
  180. {
  181. struct sock *sk = sock->sk;
  182. struct alg_sock *ask = alg_sk(sk);
  183. const struct af_alg_type *type;
  184. int err = -EBUSY;
  185. lock_sock(sk);
  186. if (atomic_read(&ask->refcnt) != atomic_read(&ask->nokey_refcnt))
  187. goto unlock;
  188. type = ask->type;
  189. err = -ENOPROTOOPT;
  190. if (level != SOL_ALG || !type)
  191. goto unlock;
  192. switch (optname) {
  193. case ALG_SET_KEY:
  194. if (sock->state == SS_CONNECTED)
  195. goto unlock;
  196. if (!type->setkey)
  197. goto unlock;
  198. err = alg_setkey(sk, optval, optlen);
  199. break;
  200. case ALG_SET_AEAD_AUTHSIZE:
  201. if (sock->state == SS_CONNECTED)
  202. goto unlock;
  203. if (!type->setauthsize)
  204. goto unlock;
  205. err = type->setauthsize(ask->private, optlen);
  206. break;
  207. case ALG_SET_DRBG_ENTROPY:
  208. if (sock->state == SS_CONNECTED)
  209. goto unlock;
  210. if (!type->setentropy)
  211. goto unlock;
  212. err = type->setentropy(ask->private, optval, optlen);
  213. }
  214. unlock:
  215. release_sock(sk);
  216. return err;
  217. }
  218. int af_alg_accept(struct sock *sk, struct socket *newsock, bool kern)
  219. {
  220. struct alg_sock *ask = alg_sk(sk);
  221. const struct af_alg_type *type;
  222. struct sock *sk2;
  223. unsigned int nokey;
  224. int err;
  225. lock_sock(sk);
  226. type = ask->type;
  227. err = -EINVAL;
  228. if (!type)
  229. goto unlock;
  230. sk2 = sk_alloc(sock_net(sk), PF_ALG, GFP_KERNEL, &alg_proto, kern);
  231. err = -ENOMEM;
  232. if (!sk2)
  233. goto unlock;
  234. sock_init_data(newsock, sk2);
  235. security_sock_graft(sk2, newsock);
  236. security_sk_clone(sk, sk2);
  237. /*
  238. * newsock->ops assigned here to allow type->accept call to override
  239. * them when required.
  240. */
  241. newsock->ops = type->ops;
  242. err = type->accept(ask->private, sk2);
  243. nokey = err == -ENOKEY;
  244. if (nokey && type->accept_nokey)
  245. err = type->accept_nokey(ask->private, sk2);
  246. if (err)
  247. goto unlock;
  248. if (atomic_inc_return_relaxed(&ask->refcnt) == 1)
  249. sock_hold(sk);
  250. if (nokey) {
  251. atomic_inc(&ask->nokey_refcnt);
  252. atomic_set(&alg_sk(sk2)->nokey_refcnt, 1);
  253. }
  254. alg_sk(sk2)->parent = sk;
  255. alg_sk(sk2)->type = type;
  256. newsock->state = SS_CONNECTED;
  257. if (nokey)
  258. newsock->ops = type->ops_nokey;
  259. err = 0;
  260. unlock:
  261. release_sock(sk);
  262. return err;
  263. }
  264. EXPORT_SYMBOL_GPL(af_alg_accept);
  265. static int alg_accept(struct socket *sock, struct socket *newsock, int flags,
  266. bool kern)
  267. {
  268. return af_alg_accept(sock->sk, newsock, kern);
  269. }
  270. static const struct proto_ops alg_proto_ops = {
  271. .family = PF_ALG,
  272. .owner = THIS_MODULE,
  273. .connect = sock_no_connect,
  274. .socketpair = sock_no_socketpair,
  275. .getname = sock_no_getname,
  276. .ioctl = sock_no_ioctl,
  277. .listen = sock_no_listen,
  278. .shutdown = sock_no_shutdown,
  279. .mmap = sock_no_mmap,
  280. .sendpage = sock_no_sendpage,
  281. .sendmsg = sock_no_sendmsg,
  282. .recvmsg = sock_no_recvmsg,
  283. .bind = alg_bind,
  284. .release = af_alg_release,
  285. .setsockopt = alg_setsockopt,
  286. .accept = alg_accept,
  287. };
  288. static void alg_sock_destruct(struct sock *sk)
  289. {
  290. struct alg_sock *ask = alg_sk(sk);
  291. alg_do_release(ask->type, ask->private);
  292. }
  293. static int alg_create(struct net *net, struct socket *sock, int protocol,
  294. int kern)
  295. {
  296. struct sock *sk;
  297. int err;
  298. if (sock->type != SOCK_SEQPACKET)
  299. return -ESOCKTNOSUPPORT;
  300. if (protocol != 0)
  301. return -EPROTONOSUPPORT;
  302. err = -ENOMEM;
  303. sk = sk_alloc(net, PF_ALG, GFP_KERNEL, &alg_proto, kern);
  304. if (!sk)
  305. goto out;
  306. sock->ops = &alg_proto_ops;
  307. sock_init_data(sock, sk);
  308. sk->sk_destruct = alg_sock_destruct;
  309. return 0;
  310. out:
  311. return err;
  312. }
  313. static const struct net_proto_family alg_family = {
  314. .family = PF_ALG,
  315. .create = alg_create,
  316. .owner = THIS_MODULE,
  317. };
  318. int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len)
  319. {
  320. size_t off;
  321. ssize_t n;
  322. int npages, i;
  323. n = iov_iter_get_pages2(iter, sgl->pages, len, ALG_MAX_PAGES, &off);
  324. if (n < 0)
  325. return n;
  326. npages = DIV_ROUND_UP(off + n, PAGE_SIZE);
  327. if (WARN_ON(npages == 0))
  328. return -EINVAL;
  329. /* Add one extra for linking */
  330. sg_init_table(sgl->sg, npages + 1);
  331. for (i = 0, len = n; i < npages; i++) {
  332. int plen = min_t(int, len, PAGE_SIZE - off);
  333. sg_set_page(sgl->sg + i, sgl->pages[i], plen, off);
  334. off = 0;
  335. len -= plen;
  336. }
  337. sg_mark_end(sgl->sg + npages - 1);
  338. sgl->npages = npages;
  339. return n;
  340. }
  341. EXPORT_SYMBOL_GPL(af_alg_make_sg);
  342. static void af_alg_link_sg(struct af_alg_sgl *sgl_prev,
  343. struct af_alg_sgl *sgl_new)
  344. {
  345. sg_unmark_end(sgl_prev->sg + sgl_prev->npages - 1);
  346. sg_chain(sgl_prev->sg, sgl_prev->npages + 1, sgl_new->sg);
  347. }
  348. void af_alg_free_sg(struct af_alg_sgl *sgl)
  349. {
  350. int i;
  351. for (i = 0; i < sgl->npages; i++)
  352. put_page(sgl->pages[i]);
  353. }
  354. EXPORT_SYMBOL_GPL(af_alg_free_sg);
  355. static int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con)
  356. {
  357. struct cmsghdr *cmsg;
  358. for_each_cmsghdr(cmsg, msg) {
  359. if (!CMSG_OK(msg, cmsg))
  360. return -EINVAL;
  361. if (cmsg->cmsg_level != SOL_ALG)
  362. continue;
  363. switch (cmsg->cmsg_type) {
  364. case ALG_SET_IV:
  365. if (cmsg->cmsg_len < CMSG_LEN(sizeof(*con->iv)))
  366. return -EINVAL;
  367. con->iv = (void *)CMSG_DATA(cmsg);
  368. if (cmsg->cmsg_len < CMSG_LEN(con->iv->ivlen +
  369. sizeof(*con->iv)))
  370. return -EINVAL;
  371. break;
  372. case ALG_SET_OP:
  373. if (cmsg->cmsg_len < CMSG_LEN(sizeof(u32)))
  374. return -EINVAL;
  375. con->op = *(u32 *)CMSG_DATA(cmsg);
  376. break;
  377. case ALG_SET_AEAD_ASSOCLEN:
  378. if (cmsg->cmsg_len < CMSG_LEN(sizeof(u32)))
  379. return -EINVAL;
  380. con->aead_assoclen = *(u32 *)CMSG_DATA(cmsg);
  381. break;
  382. default:
  383. return -EINVAL;
  384. }
  385. }
  386. return 0;
  387. }
  388. /**
  389. * af_alg_alloc_tsgl - allocate the TX SGL
  390. *
  391. * @sk: socket of connection to user space
  392. * Return: 0 upon success, < 0 upon error
  393. */
  394. static int af_alg_alloc_tsgl(struct sock *sk)
  395. {
  396. struct alg_sock *ask = alg_sk(sk);
  397. struct af_alg_ctx *ctx = ask->private;
  398. struct af_alg_tsgl *sgl;
  399. struct scatterlist *sg = NULL;
  400. sgl = list_entry(ctx->tsgl_list.prev, struct af_alg_tsgl, list);
  401. if (!list_empty(&ctx->tsgl_list))
  402. sg = sgl->sg;
  403. if (!sg || sgl->cur >= MAX_SGL_ENTS) {
  404. sgl = sock_kmalloc(sk,
  405. struct_size(sgl, sg, (MAX_SGL_ENTS + 1)),
  406. GFP_KERNEL);
  407. if (!sgl)
  408. return -ENOMEM;
  409. sg_init_table(sgl->sg, MAX_SGL_ENTS + 1);
  410. sgl->cur = 0;
  411. if (sg)
  412. sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg);
  413. list_add_tail(&sgl->list, &ctx->tsgl_list);
  414. }
  415. return 0;
  416. }
  417. /**
  418. * af_alg_count_tsgl - Count number of TX SG entries
  419. *
  420. * The counting starts from the beginning of the SGL to @bytes. If
  421. * an @offset is provided, the counting of the SG entries starts at the @offset.
  422. *
  423. * @sk: socket of connection to user space
  424. * @bytes: Count the number of SG entries holding given number of bytes.
  425. * @offset: Start the counting of SG entries from the given offset.
  426. * Return: Number of TX SG entries found given the constraints
  427. */
  428. unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset)
  429. {
  430. const struct alg_sock *ask = alg_sk(sk);
  431. const struct af_alg_ctx *ctx = ask->private;
  432. const struct af_alg_tsgl *sgl;
  433. unsigned int i;
  434. unsigned int sgl_count = 0;
  435. if (!bytes)
  436. return 0;
  437. list_for_each_entry(sgl, &ctx->tsgl_list, list) {
  438. const struct scatterlist *sg = sgl->sg;
  439. for (i = 0; i < sgl->cur; i++) {
  440. size_t bytes_count;
  441. /* Skip offset */
  442. if (offset >= sg[i].length) {
  443. offset -= sg[i].length;
  444. bytes -= sg[i].length;
  445. continue;
  446. }
  447. bytes_count = sg[i].length - offset;
  448. offset = 0;
  449. sgl_count++;
  450. /* If we have seen requested number of bytes, stop */
  451. if (bytes_count >= bytes)
  452. return sgl_count;
  453. bytes -= bytes_count;
  454. }
  455. }
  456. return sgl_count;
  457. }
  458. EXPORT_SYMBOL_GPL(af_alg_count_tsgl);
  459. /**
  460. * af_alg_pull_tsgl - Release the specified buffers from TX SGL
  461. *
  462. * If @dst is non-null, reassign the pages to @dst. The caller must release
  463. * the pages. If @dst_offset is given only reassign the pages to @dst starting
  464. * at the @dst_offset (byte). The caller must ensure that @dst is large
  465. * enough (e.g. by using af_alg_count_tsgl with the same offset).
  466. *
  467. * @sk: socket of connection to user space
  468. * @used: Number of bytes to pull from TX SGL
  469. * @dst: If non-NULL, buffer is reassigned to dst SGL instead of releasing. The
  470. * caller must release the buffers in dst.
  471. * @dst_offset: Reassign the TX SGL from given offset. All buffers before
  472. * reaching the offset is released.
  473. */
  474. void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst,
  475. size_t dst_offset)
  476. {
  477. struct alg_sock *ask = alg_sk(sk);
  478. struct af_alg_ctx *ctx = ask->private;
  479. struct af_alg_tsgl *sgl;
  480. struct scatterlist *sg;
  481. unsigned int i, j = 0;
  482. while (!list_empty(&ctx->tsgl_list)) {
  483. sgl = list_first_entry(&ctx->tsgl_list, struct af_alg_tsgl,
  484. list);
  485. sg = sgl->sg;
  486. for (i = 0; i < sgl->cur; i++) {
  487. size_t plen = min_t(size_t, used, sg[i].length);
  488. struct page *page = sg_page(sg + i);
  489. if (!page)
  490. continue;
  491. /*
  492. * Assumption: caller created af_alg_count_tsgl(len)
  493. * SG entries in dst.
  494. */
  495. if (dst) {
  496. if (dst_offset >= plen) {
  497. /* discard page before offset */
  498. dst_offset -= plen;
  499. } else {
  500. /* reassign page to dst after offset */
  501. get_page(page);
  502. sg_set_page(dst + j, page,
  503. plen - dst_offset,
  504. sg[i].offset + dst_offset);
  505. dst_offset = 0;
  506. j++;
  507. }
  508. }
  509. sg[i].length -= plen;
  510. sg[i].offset += plen;
  511. used -= plen;
  512. ctx->used -= plen;
  513. if (sg[i].length)
  514. return;
  515. put_page(page);
  516. sg_assign_page(sg + i, NULL);
  517. }
  518. list_del(&sgl->list);
  519. sock_kfree_s(sk, sgl, struct_size(sgl, sg, MAX_SGL_ENTS + 1));
  520. }
  521. if (!ctx->used)
  522. ctx->merge = 0;
  523. ctx->init = ctx->more;
  524. }
  525. EXPORT_SYMBOL_GPL(af_alg_pull_tsgl);
  526. /**
  527. * af_alg_free_areq_sgls - Release TX and RX SGLs of the request
  528. *
  529. * @areq: Request holding the TX and RX SGL
  530. */
  531. static void af_alg_free_areq_sgls(struct af_alg_async_req *areq)
  532. {
  533. struct sock *sk = areq->sk;
  534. struct alg_sock *ask = alg_sk(sk);
  535. struct af_alg_ctx *ctx = ask->private;
  536. struct af_alg_rsgl *rsgl, *tmp;
  537. struct scatterlist *tsgl;
  538. struct scatterlist *sg;
  539. unsigned int i;
  540. list_for_each_entry_safe(rsgl, tmp, &areq->rsgl_list, list) {
  541. atomic_sub(rsgl->sg_num_bytes, &ctx->rcvused);
  542. af_alg_free_sg(&rsgl->sgl);
  543. list_del(&rsgl->list);
  544. if (rsgl != &areq->first_rsgl)
  545. sock_kfree_s(sk, rsgl, sizeof(*rsgl));
  546. }
  547. tsgl = areq->tsgl;
  548. if (tsgl) {
  549. for_each_sg(tsgl, sg, areq->tsgl_entries, i) {
  550. if (!sg_page(sg))
  551. continue;
  552. put_page(sg_page(sg));
  553. }
  554. sock_kfree_s(sk, tsgl, areq->tsgl_entries * sizeof(*tsgl));
  555. }
  556. }
  557. /**
  558. * af_alg_wait_for_wmem - wait for availability of writable memory
  559. *
  560. * @sk: socket of connection to user space
  561. * @flags: If MSG_DONTWAIT is set, then only report if function would sleep
  562. * Return: 0 when writable memory is available, < 0 upon error
  563. */
  564. static int af_alg_wait_for_wmem(struct sock *sk, unsigned int flags)
  565. {
  566. DEFINE_WAIT_FUNC(wait, woken_wake_function);
  567. int err = -ERESTARTSYS;
  568. long timeout;
  569. if (flags & MSG_DONTWAIT)
  570. return -EAGAIN;
  571. sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
  572. add_wait_queue(sk_sleep(sk), &wait);
  573. for (;;) {
  574. if (signal_pending(current))
  575. break;
  576. timeout = MAX_SCHEDULE_TIMEOUT;
  577. if (sk_wait_event(sk, &timeout, af_alg_writable(sk), &wait)) {
  578. err = 0;
  579. break;
  580. }
  581. }
  582. remove_wait_queue(sk_sleep(sk), &wait);
  583. return err;
  584. }
  585. /**
  586. * af_alg_wmem_wakeup - wakeup caller when writable memory is available
  587. *
  588. * @sk: socket of connection to user space
  589. */
  590. void af_alg_wmem_wakeup(struct sock *sk)
  591. {
  592. struct socket_wq *wq;
  593. if (!af_alg_writable(sk))
  594. return;
  595. rcu_read_lock();
  596. wq = rcu_dereference(sk->sk_wq);
  597. if (skwq_has_sleeper(wq))
  598. wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN |
  599. EPOLLRDNORM |
  600. EPOLLRDBAND);
  601. sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
  602. rcu_read_unlock();
  603. }
  604. EXPORT_SYMBOL_GPL(af_alg_wmem_wakeup);
  605. /**
  606. * af_alg_wait_for_data - wait for availability of TX data
  607. *
  608. * @sk: socket of connection to user space
  609. * @flags: If MSG_DONTWAIT is set, then only report if function would sleep
  610. * @min: Set to minimum request size if partial requests are allowed.
  611. * Return: 0 when writable memory is available, < 0 upon error
  612. */
  613. int af_alg_wait_for_data(struct sock *sk, unsigned flags, unsigned min)
  614. {
  615. DEFINE_WAIT_FUNC(wait, woken_wake_function);
  616. struct alg_sock *ask = alg_sk(sk);
  617. struct af_alg_ctx *ctx = ask->private;
  618. long timeout;
  619. int err = -ERESTARTSYS;
  620. if (flags & MSG_DONTWAIT)
  621. return -EAGAIN;
  622. sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
  623. add_wait_queue(sk_sleep(sk), &wait);
  624. for (;;) {
  625. if (signal_pending(current))
  626. break;
  627. timeout = MAX_SCHEDULE_TIMEOUT;
  628. if (sk_wait_event(sk, &timeout,
  629. ctx->init && (!ctx->more ||
  630. (min && ctx->used >= min)),
  631. &wait)) {
  632. err = 0;
  633. break;
  634. }
  635. }
  636. remove_wait_queue(sk_sleep(sk), &wait);
  637. sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
  638. return err;
  639. }
  640. EXPORT_SYMBOL_GPL(af_alg_wait_for_data);
  641. /**
  642. * af_alg_data_wakeup - wakeup caller when new data can be sent to kernel
  643. *
  644. * @sk: socket of connection to user space
  645. */
  646. static void af_alg_data_wakeup(struct sock *sk)
  647. {
  648. struct alg_sock *ask = alg_sk(sk);
  649. struct af_alg_ctx *ctx = ask->private;
  650. struct socket_wq *wq;
  651. if (!ctx->used)
  652. return;
  653. rcu_read_lock();
  654. wq = rcu_dereference(sk->sk_wq);
  655. if (skwq_has_sleeper(wq))
  656. wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT |
  657. EPOLLRDNORM |
  658. EPOLLRDBAND);
  659. sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
  660. rcu_read_unlock();
  661. }
  662. /**
  663. * af_alg_sendmsg - implementation of sendmsg system call handler
  664. *
  665. * The sendmsg system call handler obtains the user data and stores it
  666. * in ctx->tsgl_list. This implies allocation of the required numbers of
  667. * struct af_alg_tsgl.
  668. *
  669. * In addition, the ctx is filled with the information sent via CMSG.
  670. *
  671. * @sock: socket of connection to user space
  672. * @msg: message from user space
  673. * @size: size of message from user space
  674. * @ivsize: the size of the IV for the cipher operation to verify that the
  675. * user-space-provided IV has the right size
  676. * Return: the number of copied data upon success, < 0 upon error
  677. */
  678. int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
  679. unsigned int ivsize)
  680. {
  681. struct sock *sk = sock->sk;
  682. struct alg_sock *ask = alg_sk(sk);
  683. struct af_alg_ctx *ctx = ask->private;
  684. struct af_alg_tsgl *sgl;
  685. struct af_alg_control con = {};
  686. long copied = 0;
  687. bool enc = false;
  688. bool init = false;
  689. int err = 0;
  690. if (msg->msg_controllen) {
  691. err = af_alg_cmsg_send(msg, &con);
  692. if (err)
  693. return err;
  694. init = true;
  695. switch (con.op) {
  696. case ALG_OP_ENCRYPT:
  697. enc = true;
  698. break;
  699. case ALG_OP_DECRYPT:
  700. enc = false;
  701. break;
  702. default:
  703. return -EINVAL;
  704. }
  705. if (con.iv && con.iv->ivlen != ivsize)
  706. return -EINVAL;
  707. }
  708. lock_sock(sk);
  709. if (ctx->init && !ctx->more) {
  710. if (ctx->used) {
  711. err = -EINVAL;
  712. goto unlock;
  713. }
  714. pr_info_once(
  715. "%s sent an empty control message without MSG_MORE.\n",
  716. current->comm);
  717. }
  718. ctx->init = true;
  719. if (init) {
  720. ctx->enc = enc;
  721. if (con.iv)
  722. memcpy(ctx->iv, con.iv->iv, ivsize);
  723. ctx->aead_assoclen = con.aead_assoclen;
  724. }
  725. while (size) {
  726. struct scatterlist *sg;
  727. size_t len = size;
  728. size_t plen;
  729. /* use the existing memory in an allocated page */
  730. if (ctx->merge) {
  731. sgl = list_entry(ctx->tsgl_list.prev,
  732. struct af_alg_tsgl, list);
  733. sg = sgl->sg + sgl->cur - 1;
  734. len = min_t(size_t, len,
  735. PAGE_SIZE - sg->offset - sg->length);
  736. err = memcpy_from_msg(page_address(sg_page(sg)) +
  737. sg->offset + sg->length,
  738. msg, len);
  739. if (err)
  740. goto unlock;
  741. sg->length += len;
  742. ctx->merge = (sg->offset + sg->length) &
  743. (PAGE_SIZE - 1);
  744. ctx->used += len;
  745. copied += len;
  746. size -= len;
  747. continue;
  748. }
  749. if (!af_alg_writable(sk)) {
  750. err = af_alg_wait_for_wmem(sk, msg->msg_flags);
  751. if (err)
  752. goto unlock;
  753. }
  754. /* allocate a new page */
  755. len = min_t(unsigned long, len, af_alg_sndbuf(sk));
  756. err = af_alg_alloc_tsgl(sk);
  757. if (err)
  758. goto unlock;
  759. sgl = list_entry(ctx->tsgl_list.prev, struct af_alg_tsgl,
  760. list);
  761. sg = sgl->sg;
  762. if (sgl->cur)
  763. sg_unmark_end(sg + sgl->cur - 1);
  764. do {
  765. struct page *pg;
  766. unsigned int i = sgl->cur;
  767. plen = min_t(size_t, len, PAGE_SIZE);
  768. pg = alloc_page(GFP_KERNEL);
  769. if (!pg) {
  770. err = -ENOMEM;
  771. goto unlock;
  772. }
  773. sg_assign_page(sg + i, pg);
  774. err = memcpy_from_msg(page_address(sg_page(sg + i)),
  775. msg, plen);
  776. if (err) {
  777. __free_page(sg_page(sg + i));
  778. sg_assign_page(sg + i, NULL);
  779. goto unlock;
  780. }
  781. sg[i].length = plen;
  782. len -= plen;
  783. ctx->used += plen;
  784. copied += plen;
  785. size -= plen;
  786. sgl->cur++;
  787. } while (len && sgl->cur < MAX_SGL_ENTS);
  788. if (!size)
  789. sg_mark_end(sg + sgl->cur - 1);
  790. ctx->merge = plen & (PAGE_SIZE - 1);
  791. }
  792. err = 0;
  793. ctx->more = msg->msg_flags & MSG_MORE;
  794. unlock:
  795. af_alg_data_wakeup(sk);
  796. release_sock(sk);
  797. return copied ?: err;
  798. }
  799. EXPORT_SYMBOL_GPL(af_alg_sendmsg);
  800. /**
  801. * af_alg_sendpage - sendpage system call handler
  802. * @sock: socket of connection to user space to write to
  803. * @page: data to send
  804. * @offset: offset into page to begin sending
  805. * @size: length of data
  806. * @flags: message send/receive flags
  807. *
  808. * This is a generic implementation of sendpage to fill ctx->tsgl_list.
  809. */
  810. ssize_t af_alg_sendpage(struct socket *sock, struct page *page,
  811. int offset, size_t size, int flags)
  812. {
  813. struct sock *sk = sock->sk;
  814. struct alg_sock *ask = alg_sk(sk);
  815. struct af_alg_ctx *ctx = ask->private;
  816. struct af_alg_tsgl *sgl;
  817. int err = -EINVAL;
  818. if (flags & MSG_SENDPAGE_NOTLAST)
  819. flags |= MSG_MORE;
  820. lock_sock(sk);
  821. if (!ctx->more && ctx->used)
  822. goto unlock;
  823. if (!size)
  824. goto done;
  825. if (!af_alg_writable(sk)) {
  826. err = af_alg_wait_for_wmem(sk, flags);
  827. if (err)
  828. goto unlock;
  829. }
  830. err = af_alg_alloc_tsgl(sk);
  831. if (err)
  832. goto unlock;
  833. ctx->merge = 0;
  834. sgl = list_entry(ctx->tsgl_list.prev, struct af_alg_tsgl, list);
  835. if (sgl->cur)
  836. sg_unmark_end(sgl->sg + sgl->cur - 1);
  837. sg_mark_end(sgl->sg + sgl->cur);
  838. get_page(page);
  839. sg_set_page(sgl->sg + sgl->cur, page, size, offset);
  840. sgl->cur++;
  841. ctx->used += size;
  842. done:
  843. ctx->more = flags & MSG_MORE;
  844. unlock:
  845. af_alg_data_wakeup(sk);
  846. release_sock(sk);
  847. return err ?: size;
  848. }
  849. EXPORT_SYMBOL_GPL(af_alg_sendpage);
  850. /**
  851. * af_alg_free_resources - release resources required for crypto request
  852. * @areq: Request holding the TX and RX SGL
  853. */
  854. void af_alg_free_resources(struct af_alg_async_req *areq)
  855. {
  856. struct sock *sk = areq->sk;
  857. af_alg_free_areq_sgls(areq);
  858. sock_kfree_s(sk, areq, areq->areqlen);
  859. }
  860. EXPORT_SYMBOL_GPL(af_alg_free_resources);
  861. /**
  862. * af_alg_async_cb - AIO callback handler
  863. * @_req: async request info
  864. * @err: if non-zero, error result to be returned via ki_complete();
  865. * otherwise return the AIO output length via ki_complete().
  866. *
  867. * This handler cleans up the struct af_alg_async_req upon completion of the
  868. * AIO operation.
  869. *
  870. * The number of bytes to be generated with the AIO operation must be set
  871. * in areq->outlen before the AIO callback handler is invoked.
  872. */
  873. void af_alg_async_cb(struct crypto_async_request *_req, int err)
  874. {
  875. struct af_alg_async_req *areq = _req->data;
  876. struct sock *sk = areq->sk;
  877. struct kiocb *iocb = areq->iocb;
  878. unsigned int resultlen;
  879. /* Buffer size written by crypto operation. */
  880. resultlen = areq->outlen;
  881. af_alg_free_resources(areq);
  882. sock_put(sk);
  883. iocb->ki_complete(iocb, err ? err : (int)resultlen);
  884. }
  885. EXPORT_SYMBOL_GPL(af_alg_async_cb);
  886. /**
  887. * af_alg_poll - poll system call handler
  888. * @file: file pointer
  889. * @sock: socket to poll
  890. * @wait: poll_table
  891. */
  892. __poll_t af_alg_poll(struct file *file, struct socket *sock,
  893. poll_table *wait)
  894. {
  895. struct sock *sk = sock->sk;
  896. struct alg_sock *ask = alg_sk(sk);
  897. struct af_alg_ctx *ctx = ask->private;
  898. __poll_t mask;
  899. sock_poll_wait(file, sock, wait);
  900. mask = 0;
  901. if (!ctx->more || ctx->used)
  902. mask |= EPOLLIN | EPOLLRDNORM;
  903. if (af_alg_writable(sk))
  904. mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
  905. return mask;
  906. }
  907. EXPORT_SYMBOL_GPL(af_alg_poll);
  908. /**
  909. * af_alg_alloc_areq - allocate struct af_alg_async_req
  910. *
  911. * @sk: socket of connection to user space
  912. * @areqlen: size of struct af_alg_async_req + crypto_*_reqsize
  913. * Return: allocated data structure or ERR_PTR upon error
  914. */
  915. struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk,
  916. unsigned int areqlen)
  917. {
  918. struct af_alg_async_req *areq = sock_kmalloc(sk, areqlen, GFP_KERNEL);
  919. if (unlikely(!areq))
  920. return ERR_PTR(-ENOMEM);
  921. areq->areqlen = areqlen;
  922. areq->sk = sk;
  923. areq->last_rsgl = NULL;
  924. INIT_LIST_HEAD(&areq->rsgl_list);
  925. areq->tsgl = NULL;
  926. areq->tsgl_entries = 0;
  927. return areq;
  928. }
  929. EXPORT_SYMBOL_GPL(af_alg_alloc_areq);
  930. /**
  931. * af_alg_get_rsgl - create the RX SGL for the output data from the crypto
  932. * operation
  933. *
  934. * @sk: socket of connection to user space
  935. * @msg: user space message
  936. * @flags: flags used to invoke recvmsg with
  937. * @areq: instance of the cryptographic request that will hold the RX SGL
  938. * @maxsize: maximum number of bytes to be pulled from user space
  939. * @outlen: number of bytes in the RX SGL
  940. * Return: 0 on success, < 0 upon error
  941. */
  942. int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags,
  943. struct af_alg_async_req *areq, size_t maxsize,
  944. size_t *outlen)
  945. {
  946. struct alg_sock *ask = alg_sk(sk);
  947. struct af_alg_ctx *ctx = ask->private;
  948. size_t len = 0;
  949. while (maxsize > len && msg_data_left(msg)) {
  950. struct af_alg_rsgl *rsgl;
  951. size_t seglen;
  952. int err;
  953. /* limit the amount of readable buffers */
  954. if (!af_alg_readable(sk))
  955. break;
  956. seglen = min_t(size_t, (maxsize - len),
  957. msg_data_left(msg));
  958. if (list_empty(&areq->rsgl_list)) {
  959. rsgl = &areq->first_rsgl;
  960. } else {
  961. rsgl = sock_kmalloc(sk, sizeof(*rsgl), GFP_KERNEL);
  962. if (unlikely(!rsgl))
  963. return -ENOMEM;
  964. }
  965. rsgl->sgl.npages = 0;
  966. list_add_tail(&rsgl->list, &areq->rsgl_list);
  967. /* make one iovec available as scatterlist */
  968. err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen);
  969. if (err < 0) {
  970. rsgl->sg_num_bytes = 0;
  971. return err;
  972. }
  973. /* chain the new scatterlist with previous one */
  974. if (areq->last_rsgl)
  975. af_alg_link_sg(&areq->last_rsgl->sgl, &rsgl->sgl);
  976. areq->last_rsgl = rsgl;
  977. len += err;
  978. atomic_add(err, &ctx->rcvused);
  979. rsgl->sg_num_bytes = err;
  980. }
  981. *outlen = len;
  982. return 0;
  983. }
  984. EXPORT_SYMBOL_GPL(af_alg_get_rsgl);
  985. static int __init af_alg_init(void)
  986. {
  987. int err = proto_register(&alg_proto, 0);
  988. if (err)
  989. goto out;
  990. err = sock_register(&alg_family);
  991. if (err != 0)
  992. goto out_unregister_proto;
  993. out:
  994. return err;
  995. out_unregister_proto:
  996. proto_unregister(&alg_proto);
  997. goto out;
  998. }
  999. static void __exit af_alg_exit(void)
  1000. {
  1001. sock_unregister(PF_ALG);
  1002. proto_unregister(&alg_proto);
  1003. }
  1004. module_init(af_alg_init);
  1005. module_exit(af_alg_exit);
  1006. MODULE_LICENSE("GPL");
  1007. MODULE_ALIAS_NETPROTO(AF_ALG);