af_qmsgq.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. */
  5. #include <linux/compat.h>
  6. #include <linux/types.h>
  7. #include <linux/cred.h>
  8. #include <linux/list.h>
  9. #include <linux/module.h>
  10. #include <linux/mutex.h>
  11. #include <linux/net.h>
  12. #include <linux/poll.h>
  13. #include <linux/skbuff.h>
  14. #include <linux/socket.h>
  15. #include <net/sock.h>
  16. #include <net/af_vsock.h>
  17. #include "af_qmsgq.h"
  18. #ifndef AF_QMSGQ
  19. #define AF_QMSGQ 27
  20. #endif
  21. #ifndef PF_QMSGQ
  22. #define PF_QMSGQ AF_QMSGQ
  23. #endif
  24. struct qmsgq_cb {
  25. u32 src_cid;
  26. u32 src_port;
  27. u32 dst_cid;
  28. u32 dst_port;
  29. };
  30. static const struct qmsgq_endpoint *registered_ep;
  31. static DEFINE_MUTEX(qmsgq_register_mutex);
  32. /* auto-bind range */
  33. #define QMSGQ_MIN_EPH_SOCKET 0x4000
  34. #define QMSGQ_MAX_EPH_SOCKET 0x7fff
  35. #define QMSGQ_EPH_PORT_RANGE \
  36. XA_LIMIT(QMSGQ_MIN_EPH_SOCKET, QMSGQ_MAX_EPH_SOCKET)
  37. /* local port allocation management */
  38. static DEFINE_XARRAY_ALLOC(qmsgq_ports);
  39. u32 qmsgq_ports_next = QMSGQ_MIN_EPH_SOCKET;
  40. static DEFINE_SPINLOCK(qmsgq_port_lock);
  41. /* The default peer timeout indicates how long we will wait for a peer response
  42. * to a control message.
  43. */
  44. #define QMSGQ_DEFAULT_CONNECT_TIMEOUT (2 * HZ)
  45. #define QMSGQ_DEFAULT_BUFFER_SIZE (1024 * 256)
  46. #define QMSGQ_DEFAULT_BUFFER_MAX_SIZE (1024 * 256)
  47. #define QMSGQ_DEFAULT_BUFFER_MIN_SIZE 128
  48. static void qmsgq_deassign_ep(struct qmsgq_sock *qsk)
  49. {
  50. if (!qsk->ep)
  51. return;
  52. qsk->ep->destruct(qsk);
  53. module_put(qsk->ep->module);
  54. qsk->ep = NULL;
  55. }
  56. int qmsgq_assign_ep(struct qmsgq_sock *qsk, struct qmsgq_sock *psk)
  57. {
  58. const struct qmsgq_endpoint *new_ep;
  59. int ret;
  60. new_ep = registered_ep;
  61. if (qsk->ep) {
  62. if (qsk->ep == new_ep)
  63. return 0;
  64. qsk->ep->release(qsk);
  65. qmsgq_deassign_ep(qsk);
  66. }
  67. /* We increase the module refcnt to prevent the transport unloading
  68. * while there are open sockets assigned to it.
  69. */
  70. if (!new_ep || !try_module_get(new_ep->module))
  71. return -ENODEV;
  72. ret = new_ep->init(qsk, psk);
  73. if (ret) {
  74. module_put(new_ep->module);
  75. return ret;
  76. }
  77. qsk->ep = new_ep;
  78. return 0;
  79. }
  80. static bool qmsgq_find_cid(unsigned int cid)
  81. {
  82. if (registered_ep && cid == registered_ep->get_local_cid())
  83. return true;
  84. return false;
  85. }
  86. static bool sock_type_connectible(u16 type)
  87. {
  88. return (type == SOCK_STREAM) || (type == SOCK_SEQPACKET);
  89. }
  90. static struct qmsgq_sock *qmsgq_port_lookup(int port)
  91. {
  92. struct qmsgq_sock *qsk;
  93. unsigned long flags;
  94. spin_lock_irqsave(&qmsgq_port_lock, flags);
  95. qsk = xa_load(&qmsgq_ports, port);
  96. if (qsk)
  97. sock_hold(qsk_sk(qsk));
  98. spin_unlock_irqrestore(&qmsgq_port_lock, flags);
  99. return qsk;
  100. }
  101. static void qmsgq_port_put(struct qmsgq_sock *qsk)
  102. {
  103. sock_put(qsk_sk(qsk));
  104. }
  105. static void qmsgq_port_remove(struct qmsgq_sock *qsk)
  106. {
  107. int port = qsk->local_addr.svm_port;
  108. unsigned long flags;
  109. __sock_put(qsk_sk(qsk));
  110. spin_lock_irqsave(&qmsgq_port_lock, flags);
  111. xa_erase(&qmsgq_ports, port);
  112. spin_unlock_irqrestore(&qmsgq_port_lock, flags);
  113. }
  114. static int qmsgq_port_assign(struct qmsgq_sock *qsk, int *port)
  115. {
  116. int rc;
  117. if (!*port || *port < 0) {
  118. rc = xa_alloc_cyclic(&qmsgq_ports, port, qsk,
  119. QMSGQ_EPH_PORT_RANGE, &qmsgq_ports_next,
  120. GFP_ATOMIC);
  121. } else if (*port < QMSGQ_MIN_EPH_SOCKET && !capable(CAP_NET_ADMIN)) {
  122. rc = -EACCES;
  123. } else {
  124. rc = xa_insert(&qmsgq_ports, *port, qsk, GFP_ATOMIC);
  125. }
  126. if (rc == -EBUSY)
  127. return -EADDRINUSE;
  128. else if (rc < 0)
  129. return rc;
  130. sock_hold(qsk_sk(qsk));
  131. return 0;
  132. }
  133. static int qmsgq_send_shutdown(struct sock *sk, int mode)
  134. {
  135. struct qmsgq_sock *qsk = sk_qsk(sk);
  136. if (!qsk->ep)
  137. return -ENODEV;
  138. return qsk->ep->shutdown(qsk, mode);
  139. }
  140. static void qmsgq_connect_timeout(struct work_struct *work)
  141. {
  142. }
  143. static void qmsgq_pending_work(struct work_struct *work)
  144. {
  145. }
  146. /* Bind socket to address.
  147. *
  148. * Socket should be locked upon call.
  149. */
  150. static int __qmsgq_bind(struct socket *sock,
  151. const struct sockaddr_vm *addr, int zapped)
  152. {
  153. struct qmsgq_sock *qsk = sk_qsk(sock->sk);
  154. struct sock *sk = sock->sk;
  155. unsigned long flags;
  156. int port;
  157. int rc;
  158. /* rebinding ok */
  159. if (!zapped && addr->svm_port == qsk->local_addr.svm_port)
  160. return 0;
  161. if (addr->svm_cid != VMADDR_CID_ANY && !qmsgq_find_cid(addr->svm_cid))
  162. return -EADDRNOTAVAIL;
  163. spin_lock_irqsave(&qmsgq_port_lock, flags);
  164. port = addr->svm_port;
  165. rc = qmsgq_port_assign(qsk, &port);
  166. spin_unlock_irqrestore(&qmsgq_port_lock, flags);
  167. if (rc)
  168. return rc;
  169. /* unbind previous, if any */
  170. if (!zapped)
  171. qmsgq_port_remove(qsk);
  172. vsock_addr_init(&qsk->local_addr, VMADDR_CID_HOST, port);
  173. sock_reset_flag(sk, SOCK_ZAPPED);
  174. return 0;
  175. }
  176. /* Auto bind to an ephemeral port. */
  177. static int qmsgq_autobind(struct socket *sock)
  178. {
  179. struct sock *sk = sock->sk;
  180. struct sockaddr_vm addr;
  181. if (!sock_flag(sk, SOCK_ZAPPED))
  182. return 0;
  183. vsock_addr_init(&addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
  184. return __qmsgq_bind(sock, &addr, 1);
  185. }
  186. static int qmsgq_bind(struct socket *sock, struct sockaddr *addr, int len)
  187. {
  188. struct sockaddr_vm *vm_addr;
  189. struct sock *sk = sock->sk;
  190. int rc;
  191. if (vsock_addr_cast(addr, len, &vm_addr) != 0)
  192. return -EINVAL;
  193. lock_sock(sk);
  194. rc = __qmsgq_bind(sock, vm_addr, sock_flag(sk, SOCK_ZAPPED));
  195. release_sock(sk);
  196. return rc;
  197. }
  198. static int qmsgq_dgram_connect(struct socket *sock, struct sockaddr *addr, int addr_len, int flags)
  199. {
  200. struct sockaddr_vm *remote_addr;
  201. struct qmsgq_sock *qsk;
  202. struct sock *sk;
  203. int rc;
  204. sk = sock->sk;
  205. qsk = sk_qsk(sk);
  206. rc = vsock_addr_cast(addr, addr_len, &remote_addr);
  207. if (rc == -EAFNOSUPPORT && remote_addr->svm_family == AF_UNSPEC) {
  208. lock_sock(sk);
  209. vsock_addr_init(&qsk->remote_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
  210. sock->state = SS_UNCONNECTED;
  211. release_sock(sk);
  212. return 0;
  213. } else if (rc != 0) {
  214. return -EINVAL;
  215. }
  216. lock_sock(sk);
  217. rc = qmsgq_autobind(sock);
  218. if (rc)
  219. goto out;
  220. if (!qsk->ep->dgram_allow(remote_addr->svm_cid, remote_addr->svm_port)) {
  221. rc = -EINVAL;
  222. goto out;
  223. }
  224. memcpy(&qsk->remote_addr, remote_addr, sizeof(qsk->remote_addr));
  225. sock->state = SS_CONNECTED;
  226. out:
  227. release_sock(sk);
  228. return rc;
  229. }
  230. static int qmsgq_getname(struct socket *sock, struct sockaddr *addr, int peer)
  231. {
  232. struct sockaddr_vm *vm_addr = NULL;
  233. struct sock *sk = sock->sk;
  234. struct qmsgq_sock *qsk;
  235. int rc = 0;
  236. qsk = sk_qsk(sk);
  237. lock_sock(sk);
  238. if (peer) {
  239. if (sock->state != SS_CONNECTED) {
  240. rc = -ENOTCONN;
  241. goto out;
  242. }
  243. vm_addr = &qsk->remote_addr;
  244. } else {
  245. vm_addr = &qsk->local_addr;
  246. }
  247. if (!vm_addr) {
  248. rc = -EINVAL;
  249. goto out;
  250. }
  251. BUILD_BUG_ON(sizeof(*vm_addr) > 128);
  252. memcpy(addr, vm_addr, sizeof(*vm_addr));
  253. rc = sizeof(*vm_addr);
  254. out:
  255. release_sock(sk);
  256. return rc;
  257. }
  258. static int qmsgq_shutdown(struct socket *sock, int mode)
  259. {
  260. struct sock *sk;
  261. int rc;
  262. /* User level uses SHUT_RD (0) and SHUT_WR (1), but the kernel uses
  263. * RCV_SHUTDOWN (1) and SEND_SHUTDOWN (2), so we must increment mode
  264. * here like the other address families do. Note also that the
  265. * increment makes SHUT_RDWR (2) into RCV_SHUTDOWN | SEND_SHUTDOWN (3),
  266. * which is what we want.
  267. */
  268. mode++;
  269. if ((mode & ~SHUTDOWN_MASK) || !mode)
  270. return -EINVAL;
  271. /* If this is a connection oriented socket and it is not connected then
  272. * bail out immediately. If it is a DGRAM socket then we must first
  273. * kick the socket so that it wakes up from any sleeping calls, for
  274. * example recv(), and then afterwards return the error.
  275. */
  276. sk = sock->sk;
  277. lock_sock(sk);
  278. if (sock->state == SS_UNCONNECTED) {
  279. rc = -ENOTCONN;
  280. if (sock_type_connectible(sk->sk_type))
  281. goto out;
  282. } else {
  283. sock->state = SS_DISCONNECTING;
  284. rc = 0;
  285. }
  286. /* Receive and send shutdowns are treated alike. */
  287. mode = mode & (RCV_SHUTDOWN | SEND_SHUTDOWN);
  288. if (mode) {
  289. sk->sk_shutdown |= mode;
  290. sk->sk_state_change(sk);
  291. if (sock_type_connectible(sk->sk_type)) {
  292. sock_reset_flag(sk, SOCK_DONE);
  293. qmsgq_send_shutdown(sk, mode);
  294. }
  295. }
  296. out:
  297. release_sock(sk);
  298. return rc;
  299. }
  300. static __poll_t qmsgq_poll(struct file *file, struct socket *sock, poll_table *wait)
  301. {
  302. struct sock *sk = sock->sk;
  303. struct qmsgq_sock *qsk;
  304. __poll_t mask;
  305. qsk = sk_qsk(sk);
  306. poll_wait(file, sk_sleep(sk), wait);
  307. mask = 0;
  308. if (sk->sk_err)
  309. /* Signify that there has been an error on this socket. */
  310. mask |= EPOLLERR;
  311. /* INET sockets treat local write shutdown and peer write shutdown as a
  312. * case of EPOLLHUP set.
  313. */
  314. if (sk->sk_shutdown == SHUTDOWN_MASK ||
  315. ((sk->sk_shutdown & SEND_SHUTDOWN) &&
  316. (qsk->peer_shutdown & SEND_SHUTDOWN))) {
  317. mask |= EPOLLHUP;
  318. }
  319. if (sk->sk_shutdown & RCV_SHUTDOWN ||
  320. qsk->peer_shutdown & SEND_SHUTDOWN) {
  321. mask |= EPOLLRDHUP;
  322. }
  323. if (sock->type == SOCK_DGRAM) {
  324. /* For datagram sockets we can read if there is something in
  325. * the queue and write as long as the socket isn't shutdown for
  326. * sending.
  327. */
  328. if (!skb_queue_empty_lockless(&sk->sk_receive_queue) ||
  329. (sk->sk_shutdown & RCV_SHUTDOWN)) {
  330. mask |= EPOLLIN | EPOLLRDNORM;
  331. }
  332. if (!(sk->sk_shutdown & SEND_SHUTDOWN))
  333. mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
  334. } /* TODO Connected POLL */
  335. return mask;
  336. }
  337. static int qmsgq_dgram_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
  338. {
  339. const struct qmsgq_endpoint *ep;
  340. struct sockaddr_vm *remote_addr;
  341. struct sock *sk = sock->sk;
  342. struct qmsgq_sock *qsk;
  343. int rc = 0;
  344. if (msg->msg_flags & MSG_OOB)
  345. return -EOPNOTSUPP;
  346. qsk = sk_qsk(sk);
  347. lock_sock(sk);
  348. ep = qsk->ep;
  349. rc = qmsgq_autobind(sock);
  350. if (rc)
  351. goto out;
  352. if (msg->msg_name) {
  353. rc = vsock_addr_cast(msg->msg_name, msg->msg_namelen, &remote_addr);
  354. if (rc)
  355. goto out;
  356. } else if (sock->state == SS_CONNECTED) {
  357. remote_addr = &qsk->remote_addr;
  358. } else {
  359. rc = -EINVAL;
  360. goto out;
  361. }
  362. if (remote_addr->svm_cid == VMADDR_CID_ANY)
  363. remote_addr->svm_cid = ep->get_local_cid();
  364. if (!vsock_addr_bound(remote_addr)) {
  365. rc = -EINVAL;
  366. goto out;
  367. }
  368. if (!ep->dgram_allow(remote_addr->svm_cid, remote_addr->svm_port)) {
  369. rc = -EINVAL;
  370. goto out;
  371. }
  372. rc = ep->dgram_enqueue(qsk, remote_addr, msg, len);
  373. if (!rc)
  374. rc = len;
  375. out:
  376. release_sock(sk);
  377. return rc;
  378. }
  379. static int qmsgq_dgram_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, int flags)
  380. {
  381. DECLARE_SOCKADDR(struct sockaddr_vm *, vm_addr, msg->msg_name);
  382. struct sock *sk = sock->sk;
  383. struct qmsgq_sock *qsk;
  384. struct sk_buff *skb;
  385. struct qmsgq_cb *cb;
  386. int copied;
  387. int rc = 0;
  388. qsk = sk_qsk(sk);
  389. if (sock_flag(sk, SOCK_ZAPPED)) {
  390. pr_err("%s: Invalid socket error\n", __func__);
  391. return -EADDRNOTAVAIL;
  392. }
  393. skb = skb_recv_datagram(sk, flags, &rc);
  394. if (!skb)
  395. return rc;
  396. lock_sock(sk);
  397. cb = (struct qmsgq_cb *)skb->cb;
  398. copied = skb->len;
  399. if (copied > len) {
  400. copied = len;
  401. msg->msg_flags |= MSG_TRUNC;
  402. }
  403. /* Place the datagram payload in the user's iovec. */
  404. rc = skb_copy_datagram_msg(skb, 0, msg, copied);
  405. if (rc < 0) {
  406. pr_err("%s: skb_copy_datagram_msg failed: %d\n", __func__, rc);
  407. goto out;
  408. }
  409. rc = copied;
  410. if (vm_addr) {
  411. vsock_addr_init(vm_addr, VMADDR_CID_HOST, cb->src_port);
  412. msg->msg_namelen = sizeof(*vm_addr);
  413. }
  414. out:
  415. skb_free_datagram(sk, skb);
  416. release_sock(sk);
  417. return rc;
  418. }
  419. static void __qmsgq_release(struct sock *sk, int level)
  420. {
  421. if (sk) {
  422. struct qmsgq_sock *qsk = sk_qsk(sk);
  423. lock_sock_nested(sk, level);
  424. if (qsk->ep)
  425. qsk->ep->release(qsk);
  426. if (!sock_flag(sk, SOCK_DEAD))
  427. sk->sk_state_change(sk);
  428. if (!sock_flag(sk, SOCK_ZAPPED))
  429. qmsgq_port_remove(qsk);
  430. sock_orphan(sk);
  431. sk->sk_shutdown = SHUTDOWN_MASK;
  432. skb_queue_purge(&sk->sk_receive_queue);
  433. release_sock(sk);
  434. sock_put(sk);
  435. }
  436. }
  437. static int qmsgq_release(struct socket *sock)
  438. {
  439. __qmsgq_release(sock->sk, 0);
  440. sock->sk = NULL;
  441. sock->state = SS_FREE;
  442. return 0;
  443. }
  444. static const struct proto_ops qmsgq_dgram_ops = {
  445. .owner = THIS_MODULE,
  446. .family = AF_QMSGQ,
  447. .release = qmsgq_release,
  448. .bind = qmsgq_bind,
  449. .connect = qmsgq_dgram_connect,
  450. .socketpair = sock_no_socketpair,
  451. .accept = sock_no_accept,
  452. .getname = qmsgq_getname,
  453. .poll = qmsgq_poll,
  454. .ioctl = sock_no_ioctl,
  455. .listen = sock_no_listen,
  456. .shutdown = qmsgq_shutdown,
  457. .sendmsg = qmsgq_dgram_sendmsg,
  458. .recvmsg = qmsgq_dgram_recvmsg,
  459. .mmap = sock_no_mmap,
  460. .sendpage = sock_no_sendpage,
  461. };
  462. static struct proto qmsgq_proto = {
  463. .name = "QMSGQ",
  464. .owner = THIS_MODULE,
  465. .obj_size = sizeof(struct qmsgq_sock),
  466. };
  467. static void sk_qsk_destruct(struct sock *sk)
  468. {
  469. struct qmsgq_sock *qsk = sk_qsk(sk);
  470. qmsgq_deassign_ep(qsk);
  471. /* When clearing these addresses, there's no need to set the family and
  472. * possibly register the address family with the kernel.
  473. */
  474. vsock_addr_init(&qsk->local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
  475. vsock_addr_init(&qsk->remote_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
  476. put_cred(qsk->owner);
  477. }
  478. static int qmsgq_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
  479. {
  480. int err;
  481. err = sock_queue_rcv_skb(sk, skb);
  482. if (err)
  483. kfree_skb(skb);
  484. return err;
  485. }
  486. static struct sock *__qmsgq_create(struct net *net, struct socket *sock, struct sock *parent,
  487. gfp_t priority, unsigned short type, int kern)
  488. {
  489. struct qmsgq_sock *psk;
  490. struct qmsgq_sock *qsk;
  491. struct sock *sk;
  492. sk = sk_alloc(net, AF_QMSGQ, priority, &qmsgq_proto, kern);
  493. if (!sk)
  494. return NULL;
  495. sock_init_data(sock, sk);
  496. if (!sock)
  497. sk->sk_type = type;
  498. qsk = sk_qsk(sk);
  499. vsock_addr_init(&qsk->local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
  500. vsock_addr_init(&qsk->remote_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
  501. sk->sk_destruct = sk_qsk_destruct;
  502. sk->sk_backlog_rcv = qmsgq_queue_rcv_skb;
  503. sock_reset_flag(sk, SOCK_DONE);
  504. sock_set_flag(sk, SOCK_ZAPPED);
  505. INIT_LIST_HEAD(&qsk->bound_table);
  506. INIT_LIST_HEAD(&qsk->connected_table);
  507. qsk->listener = NULL;
  508. INIT_LIST_HEAD(&qsk->pending_links);
  509. INIT_LIST_HEAD(&qsk->accept_queue);
  510. qsk->rejected = false;
  511. qsk->sent_request = false;
  512. qsk->ignore_connecting_rst = false;
  513. qsk->peer_shutdown = 0;
  514. INIT_DELAYED_WORK(&qsk->connect_work, qmsgq_connect_timeout);
  515. INIT_DELAYED_WORK(&qsk->pending_work, qmsgq_pending_work);
  516. psk = parent ? sk_qsk(parent) : NULL;
  517. if (parent) {
  518. qsk->trusted = psk->trusted;
  519. qsk->owner = get_cred(psk->owner);
  520. qsk->connect_timeout = psk->connect_timeout;
  521. qsk->buffer_size = psk->buffer_size;
  522. qsk->buffer_min_size = psk->buffer_min_size;
  523. qsk->buffer_max_size = psk->buffer_max_size;
  524. security_sk_clone(parent, sk);
  525. } else {
  526. qsk->trusted = ns_capable_noaudit(&init_user_ns, CAP_NET_ADMIN);
  527. qsk->owner = get_current_cred();
  528. qsk->connect_timeout = QMSGQ_DEFAULT_CONNECT_TIMEOUT;
  529. qsk->buffer_size = QMSGQ_DEFAULT_BUFFER_SIZE;
  530. qsk->buffer_min_size = QMSGQ_DEFAULT_BUFFER_MIN_SIZE;
  531. qsk->buffer_max_size = QMSGQ_DEFAULT_BUFFER_MAX_SIZE;
  532. }
  533. return sk;
  534. }
  535. static int qmsgq_create(struct net *net, struct socket *sock,
  536. int protocol, int kern)
  537. {
  538. struct qmsgq_sock *qsk;
  539. struct sock *sk;
  540. int rc;
  541. if (!sock)
  542. return -EINVAL;
  543. if (protocol && protocol != PF_QMSGQ)
  544. return -EPROTONOSUPPORT;
  545. switch (sock->type) {
  546. case SOCK_DGRAM:
  547. sock->ops = &qmsgq_dgram_ops;
  548. break;
  549. default:
  550. return -ESOCKTNOSUPPORT;
  551. }
  552. sock->state = SS_UNCONNECTED;
  553. sk = __qmsgq_create(net, sock, NULL, GFP_KERNEL, 0, kern);
  554. if (!sk)
  555. return -ENOMEM;
  556. qsk = sk_qsk(sk);
  557. if (sock->type == SOCK_DGRAM) {
  558. rc = qmsgq_assign_ep(qsk, NULL);
  559. if (rc < 0) {
  560. sock_put(sk);
  561. return rc;
  562. }
  563. }
  564. return 0;
  565. }
  566. int qmsgq_post(const struct qmsgq_endpoint *ep, struct sockaddr_vm *src, struct sockaddr_vm *dst,
  567. void *data, int len)
  568. {
  569. struct qmsgq_sock *qsk;
  570. struct qmsgq_cb *cb;
  571. struct sk_buff *skb;
  572. int rc;
  573. skb = alloc_skb_with_frags(0, len, 0, &rc, GFP_KERNEL);
  574. if (!skb) {
  575. pr_err("%s: Unable to get skb with len:%lu\n", __func__, len);
  576. return -ENOMEM;
  577. }
  578. cb = (struct qmsgq_cb *)skb->cb;
  579. cb->src_cid = src->svm_cid;
  580. cb->src_port = src->svm_port;
  581. cb->dst_cid = dst->svm_cid;
  582. cb->dst_port = dst->svm_port;
  583. skb->data_len = len;
  584. skb->len = len;
  585. skb_store_bits(skb, 0, data, len);
  586. qsk = qmsgq_port_lookup(dst->svm_port);
  587. if (!qsk || qsk->ep != ep) {
  588. pr_err("%s: invalid dst port:%d\n", __func__, dst->svm_port);
  589. kfree_skb(skb);
  590. return -EINVAL;
  591. }
  592. if (sock_queue_rcv_skb(qsk_sk(qsk), skb)) {
  593. pr_err("%s: sock_queue_rcv_skb failed\n", __func__);
  594. qmsgq_port_put(qsk);
  595. kfree_skb(skb);
  596. return -EINVAL;
  597. }
  598. qmsgq_port_put(qsk);
  599. return 0;
  600. }
  601. EXPORT_SYMBOL(qmsgq_post);
  602. int qmsgq_endpoint_register(const struct qmsgq_endpoint *ep)
  603. {
  604. int rc = 0;
  605. if (!ep)
  606. return -EINVAL;
  607. mutex_lock(&qmsgq_register_mutex);
  608. if (registered_ep) {
  609. rc = -EBUSY;
  610. goto error;
  611. }
  612. registered_ep = ep;
  613. error:
  614. mutex_unlock(&qmsgq_register_mutex);
  615. return rc;
  616. }
  617. EXPORT_SYMBOL(qmsgq_endpoint_register);
  618. void qmsgq_endpoint_unregister(const struct qmsgq_endpoint *ep)
  619. {
  620. mutex_lock(&qmsgq_register_mutex);
  621. if (registered_ep == ep)
  622. ep = NULL;
  623. mutex_unlock(&qmsgq_register_mutex);
  624. }
  625. EXPORT_SYMBOL(qmsgq_endpoint_unregister);
  626. static const struct net_proto_family qmsgq_family = {
  627. .owner = THIS_MODULE,
  628. .family = AF_QMSGQ,
  629. .create = qmsgq_create,
  630. };
  631. static int __init qmsgq_proto_init(void)
  632. {
  633. int rc;
  634. registered_ep = NULL;
  635. rc = proto_register(&qmsgq_proto, 1);
  636. if (rc)
  637. return rc;
  638. rc = sock_register(&qmsgq_family);
  639. if (rc)
  640. goto err_proto;
  641. return 0;
  642. err_proto:
  643. proto_unregister(&qmsgq_proto);
  644. return rc;
  645. }
  646. static void __exit qmsgq_proto_fini(void)
  647. {
  648. sock_unregister(qmsgq_family.family);
  649. proto_unregister(&qmsgq_proto);
  650. }
  651. module_init(qmsgq_proto_init);
  652. module_exit(qmsgq_proto_fini);
  653. MODULE_DESCRIPTION("QTI Gunyah MSGQ Socket driver");
  654. MODULE_LICENSE("GPL");
  655. MODULE_ALIAS_NETPROTO(PF_QMSGQ);