pep.c 30 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * File: pep.c
  4. *
  5. * Phonet pipe protocol end point socket
  6. *
  7. * Copyright (C) 2008 Nokia Corporation.
  8. *
  9. * Author: Rémi Denis-Courmont
  10. */
  11. #include <linux/kernel.h>
  12. #include <linux/sched/signal.h>
  13. #include <linux/slab.h>
  14. #include <linux/socket.h>
  15. #include <net/sock.h>
  16. #include <net/tcp_states.h>
  17. #include <asm/ioctls.h>
  18. #include <linux/phonet.h>
  19. #include <linux/module.h>
  20. #include <net/phonet/phonet.h>
  21. #include <net/phonet/pep.h>
  22. #include <net/phonet/gprs.h>
  23. /* sk_state values:
  24. * TCP_CLOSE sock not in use yet
  25. * TCP_CLOSE_WAIT disconnected pipe
  26. * TCP_LISTEN listening pipe endpoint
  27. * TCP_SYN_RECV connected pipe in disabled state
  28. * TCP_ESTABLISHED connected pipe in enabled state
  29. *
  30. * pep_sock locking:
  31. * - sk_state, hlist: sock lock needed
  32. * - listener: read only
  33. * - pipe_handle: read only
  34. */
  35. #define CREDITS_MAX 10
  36. #define CREDITS_THR 7
  37. #define pep_sb_size(s) (((s) + 5) & ~3) /* 2-bytes head, 32-bits aligned */
  38. /* Get the next TLV sub-block. */
  39. static unsigned char *pep_get_sb(struct sk_buff *skb, u8 *ptype, u8 *plen,
  40. void *buf)
  41. {
  42. void *data = NULL;
  43. struct {
  44. u8 sb_type;
  45. u8 sb_len;
  46. } *ph, h;
  47. int buflen = *plen;
  48. ph = skb_header_pointer(skb, 0, 2, &h);
  49. if (ph == NULL || ph->sb_len < 2 || !pskb_may_pull(skb, ph->sb_len))
  50. return NULL;
  51. ph->sb_len -= 2;
  52. *ptype = ph->sb_type;
  53. *plen = ph->sb_len;
  54. if (buflen > ph->sb_len)
  55. buflen = ph->sb_len;
  56. data = skb_header_pointer(skb, 2, buflen, buf);
  57. __skb_pull(skb, 2 + ph->sb_len);
  58. return data;
  59. }
  60. static struct sk_buff *pep_alloc_skb(struct sock *sk, const void *payload,
  61. int len, gfp_t priority)
  62. {
  63. struct sk_buff *skb = alloc_skb(MAX_PNPIPE_HEADER + len, priority);
  64. if (!skb)
  65. return NULL;
  66. skb_set_owner_w(skb, sk);
  67. skb_reserve(skb, MAX_PNPIPE_HEADER);
  68. __skb_put(skb, len);
  69. skb_copy_to_linear_data(skb, payload, len);
  70. __skb_push(skb, sizeof(struct pnpipehdr));
  71. skb_reset_transport_header(skb);
  72. return skb;
  73. }
  74. static int pep_reply(struct sock *sk, struct sk_buff *oskb, u8 code,
  75. const void *data, int len, gfp_t priority)
  76. {
  77. const struct pnpipehdr *oph = pnp_hdr(oskb);
  78. struct pnpipehdr *ph;
  79. struct sk_buff *skb;
  80. struct sockaddr_pn peer;
  81. skb = pep_alloc_skb(sk, data, len, priority);
  82. if (!skb)
  83. return -ENOMEM;
  84. ph = pnp_hdr(skb);
  85. ph->utid = oph->utid;
  86. ph->message_id = oph->message_id + 1; /* REQ -> RESP */
  87. ph->pipe_handle = oph->pipe_handle;
  88. ph->error_code = code;
  89. pn_skb_get_src_sockaddr(oskb, &peer);
  90. return pn_skb_send(sk, skb, &peer);
  91. }
  92. static int pep_indicate(struct sock *sk, u8 id, u8 code,
  93. const void *data, int len, gfp_t priority)
  94. {
  95. struct pep_sock *pn = pep_sk(sk);
  96. struct pnpipehdr *ph;
  97. struct sk_buff *skb;
  98. skb = pep_alloc_skb(sk, data, len, priority);
  99. if (!skb)
  100. return -ENOMEM;
  101. ph = pnp_hdr(skb);
  102. ph->utid = 0;
  103. ph->message_id = id;
  104. ph->pipe_handle = pn->pipe_handle;
  105. ph->error_code = code;
  106. return pn_skb_send(sk, skb, NULL);
  107. }
  108. #define PAD 0x00
  109. static int pipe_handler_request(struct sock *sk, u8 id, u8 code,
  110. const void *data, int len)
  111. {
  112. struct pep_sock *pn = pep_sk(sk);
  113. struct pnpipehdr *ph;
  114. struct sk_buff *skb;
  115. skb = pep_alloc_skb(sk, data, len, GFP_KERNEL);
  116. if (!skb)
  117. return -ENOMEM;
  118. ph = pnp_hdr(skb);
  119. ph->utid = id; /* whatever */
  120. ph->message_id = id;
  121. ph->pipe_handle = pn->pipe_handle;
  122. ph->error_code = code;
  123. return pn_skb_send(sk, skb, NULL);
  124. }
  125. static int pipe_handler_send_created_ind(struct sock *sk)
  126. {
  127. struct pep_sock *pn = pep_sk(sk);
  128. u8 data[4] = {
  129. PN_PIPE_SB_NEGOTIATED_FC, pep_sb_size(2),
  130. pn->tx_fc, pn->rx_fc,
  131. };
  132. return pep_indicate(sk, PNS_PIPE_CREATED_IND, 1 /* sub-blocks */,
  133. data, 4, GFP_ATOMIC);
  134. }
  135. static int pep_accept_conn(struct sock *sk, struct sk_buff *skb)
  136. {
  137. static const u8 data[20] = {
  138. PAD, PAD, PAD, 2 /* sub-blocks */,
  139. PN_PIPE_SB_REQUIRED_FC_TX, pep_sb_size(5), 3, PAD,
  140. PN_MULTI_CREDIT_FLOW_CONTROL,
  141. PN_ONE_CREDIT_FLOW_CONTROL,
  142. PN_LEGACY_FLOW_CONTROL,
  143. PAD,
  144. PN_PIPE_SB_PREFERRED_FC_RX, pep_sb_size(5), 3, PAD,
  145. PN_MULTI_CREDIT_FLOW_CONTROL,
  146. PN_ONE_CREDIT_FLOW_CONTROL,
  147. PN_LEGACY_FLOW_CONTROL,
  148. PAD,
  149. };
  150. might_sleep();
  151. return pep_reply(sk, skb, PN_PIPE_NO_ERROR, data, sizeof(data),
  152. GFP_KERNEL);
  153. }
  154. static int pep_reject_conn(struct sock *sk, struct sk_buff *skb, u8 code,
  155. gfp_t priority)
  156. {
  157. static const u8 data[4] = { PAD, PAD, PAD, 0 /* sub-blocks */ };
  158. WARN_ON(code == PN_PIPE_NO_ERROR);
  159. return pep_reply(sk, skb, code, data, sizeof(data), priority);
  160. }
  161. /* Control requests are not sent by the pipe service and have a specific
  162. * message format. */
  163. static int pep_ctrlreq_error(struct sock *sk, struct sk_buff *oskb, u8 code,
  164. gfp_t priority)
  165. {
  166. const struct pnpipehdr *oph = pnp_hdr(oskb);
  167. struct sk_buff *skb;
  168. struct pnpipehdr *ph;
  169. struct sockaddr_pn dst;
  170. u8 data[4] = {
  171. oph->pep_type, /* PEP type */
  172. code, /* error code, at an unusual offset */
  173. PAD, PAD,
  174. };
  175. skb = pep_alloc_skb(sk, data, 4, priority);
  176. if (!skb)
  177. return -ENOMEM;
  178. ph = pnp_hdr(skb);
  179. ph->utid = oph->utid;
  180. ph->message_id = PNS_PEP_CTRL_RESP;
  181. ph->pipe_handle = oph->pipe_handle;
  182. ph->data0 = oph->data[0]; /* CTRL id */
  183. pn_skb_get_src_sockaddr(oskb, &dst);
  184. return pn_skb_send(sk, skb, &dst);
  185. }
  186. static int pipe_snd_status(struct sock *sk, u8 type, u8 status, gfp_t priority)
  187. {
  188. u8 data[4] = { type, PAD, PAD, status };
  189. return pep_indicate(sk, PNS_PEP_STATUS_IND, PN_PEP_TYPE_COMMON,
  190. data, 4, priority);
  191. }
  192. /* Send our RX flow control information to the sender.
  193. * Socket must be locked. */
  194. static void pipe_grant_credits(struct sock *sk, gfp_t priority)
  195. {
  196. struct pep_sock *pn = pep_sk(sk);
  197. BUG_ON(sk->sk_state != TCP_ESTABLISHED);
  198. switch (pn->rx_fc) {
  199. case PN_LEGACY_FLOW_CONTROL: /* TODO */
  200. break;
  201. case PN_ONE_CREDIT_FLOW_CONTROL:
  202. if (pipe_snd_status(sk, PN_PEP_IND_FLOW_CONTROL,
  203. PEP_IND_READY, priority) == 0)
  204. pn->rx_credits = 1;
  205. break;
  206. case PN_MULTI_CREDIT_FLOW_CONTROL:
  207. if ((pn->rx_credits + CREDITS_THR) > CREDITS_MAX)
  208. break;
  209. if (pipe_snd_status(sk, PN_PEP_IND_ID_MCFC_GRANT_CREDITS,
  210. CREDITS_MAX - pn->rx_credits,
  211. priority) == 0)
  212. pn->rx_credits = CREDITS_MAX;
  213. break;
  214. }
  215. }
  216. static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb)
  217. {
  218. struct pep_sock *pn = pep_sk(sk);
  219. struct pnpipehdr *hdr;
  220. int wake = 0;
  221. if (!pskb_may_pull(skb, sizeof(*hdr) + 4))
  222. return -EINVAL;
  223. hdr = pnp_hdr(skb);
  224. if (hdr->pep_type != PN_PEP_TYPE_COMMON) {
  225. net_dbg_ratelimited("Phonet unknown PEP type: %u\n",
  226. (unsigned int)hdr->pep_type);
  227. return -EOPNOTSUPP;
  228. }
  229. switch (hdr->data[0]) {
  230. case PN_PEP_IND_FLOW_CONTROL:
  231. switch (pn->tx_fc) {
  232. case PN_LEGACY_FLOW_CONTROL:
  233. switch (hdr->data[3]) {
  234. case PEP_IND_BUSY:
  235. atomic_set(&pn->tx_credits, 0);
  236. break;
  237. case PEP_IND_READY:
  238. atomic_set(&pn->tx_credits, wake = 1);
  239. break;
  240. }
  241. break;
  242. case PN_ONE_CREDIT_FLOW_CONTROL:
  243. if (hdr->data[3] == PEP_IND_READY)
  244. atomic_set(&pn->tx_credits, wake = 1);
  245. break;
  246. }
  247. break;
  248. case PN_PEP_IND_ID_MCFC_GRANT_CREDITS:
  249. if (pn->tx_fc != PN_MULTI_CREDIT_FLOW_CONTROL)
  250. break;
  251. atomic_add(wake = hdr->data[3], &pn->tx_credits);
  252. break;
  253. default:
  254. net_dbg_ratelimited("Phonet unknown PEP indication: %u\n",
  255. (unsigned int)hdr->data[0]);
  256. return -EOPNOTSUPP;
  257. }
  258. if (wake)
  259. sk->sk_write_space(sk);
  260. return 0;
  261. }
  262. static int pipe_rcv_created(struct sock *sk, struct sk_buff *skb)
  263. {
  264. struct pep_sock *pn = pep_sk(sk);
  265. struct pnpipehdr *hdr = pnp_hdr(skb);
  266. u8 n_sb = hdr->data0;
  267. pn->rx_fc = pn->tx_fc = PN_LEGACY_FLOW_CONTROL;
  268. __skb_pull(skb, sizeof(*hdr));
  269. while (n_sb > 0) {
  270. u8 type, buf[2], len = sizeof(buf);
  271. u8 *data = pep_get_sb(skb, &type, &len, buf);
  272. if (data == NULL)
  273. return -EINVAL;
  274. switch (type) {
  275. case PN_PIPE_SB_NEGOTIATED_FC:
  276. if (len < 2 || (data[0] | data[1]) > 3)
  277. break;
  278. pn->tx_fc = data[0] & 3;
  279. pn->rx_fc = data[1] & 3;
  280. break;
  281. }
  282. n_sb--;
  283. }
  284. return 0;
  285. }
  286. /* Queue an skb to a connected sock.
  287. * Socket lock must be held. */
  288. static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
  289. {
  290. struct pep_sock *pn = pep_sk(sk);
  291. struct pnpipehdr *hdr = pnp_hdr(skb);
  292. struct sk_buff_head *queue;
  293. int err = 0;
  294. BUG_ON(sk->sk_state == TCP_CLOSE_WAIT);
  295. switch (hdr->message_id) {
  296. case PNS_PEP_CONNECT_REQ:
  297. pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE, GFP_ATOMIC);
  298. break;
  299. case PNS_PEP_DISCONNECT_REQ:
  300. pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
  301. sk->sk_state = TCP_CLOSE_WAIT;
  302. if (!sock_flag(sk, SOCK_DEAD))
  303. sk->sk_state_change(sk);
  304. break;
  305. case PNS_PEP_ENABLE_REQ:
  306. /* Wait for PNS_PIPE_(ENABLED|REDIRECTED)_IND */
  307. pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
  308. break;
  309. case PNS_PEP_RESET_REQ:
  310. switch (hdr->state_after_reset) {
  311. case PN_PIPE_DISABLE:
  312. pn->init_enable = 0;
  313. break;
  314. case PN_PIPE_ENABLE:
  315. pn->init_enable = 1;
  316. break;
  317. default: /* not allowed to send an error here!? */
  318. err = -EINVAL;
  319. goto out;
  320. }
  321. fallthrough;
  322. case PNS_PEP_DISABLE_REQ:
  323. atomic_set(&pn->tx_credits, 0);
  324. pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
  325. break;
  326. case PNS_PEP_CTRL_REQ:
  327. if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
  328. atomic_inc(&sk->sk_drops);
  329. break;
  330. }
  331. __skb_pull(skb, 4);
  332. queue = &pn->ctrlreq_queue;
  333. goto queue;
  334. case PNS_PIPE_ALIGNED_DATA:
  335. __skb_pull(skb, 1);
  336. fallthrough;
  337. case PNS_PIPE_DATA:
  338. __skb_pull(skb, 3); /* Pipe data header */
  339. if (!pn_flow_safe(pn->rx_fc)) {
  340. err = sock_queue_rcv_skb(sk, skb);
  341. if (!err)
  342. return NET_RX_SUCCESS;
  343. err = -ENOBUFS;
  344. break;
  345. }
  346. if (pn->rx_credits == 0) {
  347. atomic_inc(&sk->sk_drops);
  348. err = -ENOBUFS;
  349. break;
  350. }
  351. pn->rx_credits--;
  352. queue = &sk->sk_receive_queue;
  353. goto queue;
  354. case PNS_PEP_STATUS_IND:
  355. pipe_rcv_status(sk, skb);
  356. break;
  357. case PNS_PIPE_REDIRECTED_IND:
  358. err = pipe_rcv_created(sk, skb);
  359. break;
  360. case PNS_PIPE_CREATED_IND:
  361. err = pipe_rcv_created(sk, skb);
  362. if (err)
  363. break;
  364. fallthrough;
  365. case PNS_PIPE_RESET_IND:
  366. if (!pn->init_enable)
  367. break;
  368. fallthrough;
  369. case PNS_PIPE_ENABLED_IND:
  370. if (!pn_flow_safe(pn->tx_fc)) {
  371. atomic_set(&pn->tx_credits, 1);
  372. sk->sk_write_space(sk);
  373. }
  374. if (sk->sk_state == TCP_ESTABLISHED)
  375. break; /* Nothing to do */
  376. sk->sk_state = TCP_ESTABLISHED;
  377. pipe_grant_credits(sk, GFP_ATOMIC);
  378. break;
  379. case PNS_PIPE_DISABLED_IND:
  380. sk->sk_state = TCP_SYN_RECV;
  381. pn->rx_credits = 0;
  382. break;
  383. default:
  384. net_dbg_ratelimited("Phonet unknown PEP message: %u\n",
  385. hdr->message_id);
  386. err = -EINVAL;
  387. }
  388. out:
  389. kfree_skb(skb);
  390. return (err == -ENOBUFS) ? NET_RX_DROP : NET_RX_SUCCESS;
  391. queue:
  392. skb->dev = NULL;
  393. skb_set_owner_r(skb, sk);
  394. skb_queue_tail(queue, skb);
  395. if (!sock_flag(sk, SOCK_DEAD))
  396. sk->sk_data_ready(sk);
  397. return NET_RX_SUCCESS;
  398. }
  399. /* Destroy connected sock. */
  400. static void pipe_destruct(struct sock *sk)
  401. {
  402. struct pep_sock *pn = pep_sk(sk);
  403. skb_queue_purge(&sk->sk_receive_queue);
  404. skb_queue_purge(&pn->ctrlreq_queue);
  405. }
  406. static u8 pipe_negotiate_fc(const u8 *fcs, unsigned int n)
  407. {
  408. unsigned int i;
  409. u8 final_fc = PN_NO_FLOW_CONTROL;
  410. for (i = 0; i < n; i++) {
  411. u8 fc = fcs[i];
  412. if (fc > final_fc && fc < PN_MAX_FLOW_CONTROL)
  413. final_fc = fc;
  414. }
  415. return final_fc;
  416. }
  417. static int pep_connresp_rcv(struct sock *sk, struct sk_buff *skb)
  418. {
  419. struct pep_sock *pn = pep_sk(sk);
  420. struct pnpipehdr *hdr;
  421. u8 n_sb;
  422. if (!pskb_pull(skb, sizeof(*hdr) + 4))
  423. return -EINVAL;
  424. hdr = pnp_hdr(skb);
  425. if (hdr->error_code != PN_PIPE_NO_ERROR)
  426. return -ECONNREFUSED;
  427. /* Parse sub-blocks */
  428. n_sb = hdr->data[3];
  429. while (n_sb > 0) {
  430. u8 type, buf[6], len = sizeof(buf);
  431. const u8 *data = pep_get_sb(skb, &type, &len, buf);
  432. if (data == NULL)
  433. return -EINVAL;
  434. switch (type) {
  435. case PN_PIPE_SB_REQUIRED_FC_TX:
  436. if (len < 2 || len < data[0])
  437. break;
  438. pn->tx_fc = pipe_negotiate_fc(data + 2, len - 2);
  439. break;
  440. case PN_PIPE_SB_PREFERRED_FC_RX:
  441. if (len < 2 || len < data[0])
  442. break;
  443. pn->rx_fc = pipe_negotiate_fc(data + 2, len - 2);
  444. break;
  445. }
  446. n_sb--;
  447. }
  448. return pipe_handler_send_created_ind(sk);
  449. }
  450. static int pep_enableresp_rcv(struct sock *sk, struct sk_buff *skb)
  451. {
  452. struct pnpipehdr *hdr = pnp_hdr(skb);
  453. if (hdr->error_code != PN_PIPE_NO_ERROR)
  454. return -ECONNREFUSED;
  455. return pep_indicate(sk, PNS_PIPE_ENABLED_IND, 0 /* sub-blocks */,
  456. NULL, 0, GFP_ATOMIC);
  457. }
  458. static void pipe_start_flow_control(struct sock *sk)
  459. {
  460. struct pep_sock *pn = pep_sk(sk);
  461. if (!pn_flow_safe(pn->tx_fc)) {
  462. atomic_set(&pn->tx_credits, 1);
  463. sk->sk_write_space(sk);
  464. }
  465. pipe_grant_credits(sk, GFP_ATOMIC);
  466. }
  467. /* Queue an skb to an actively connected sock.
  468. * Socket lock must be held. */
  469. static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb)
  470. {
  471. struct pep_sock *pn = pep_sk(sk);
  472. struct pnpipehdr *hdr = pnp_hdr(skb);
  473. int err = NET_RX_SUCCESS;
  474. switch (hdr->message_id) {
  475. case PNS_PIPE_ALIGNED_DATA:
  476. __skb_pull(skb, 1);
  477. fallthrough;
  478. case PNS_PIPE_DATA:
  479. __skb_pull(skb, 3); /* Pipe data header */
  480. if (!pn_flow_safe(pn->rx_fc)) {
  481. err = sock_queue_rcv_skb(sk, skb);
  482. if (!err)
  483. return NET_RX_SUCCESS;
  484. err = NET_RX_DROP;
  485. break;
  486. }
  487. if (pn->rx_credits == 0) {
  488. atomic_inc(&sk->sk_drops);
  489. err = NET_RX_DROP;
  490. break;
  491. }
  492. pn->rx_credits--;
  493. skb->dev = NULL;
  494. skb_set_owner_r(skb, sk);
  495. skb_queue_tail(&sk->sk_receive_queue, skb);
  496. if (!sock_flag(sk, SOCK_DEAD))
  497. sk->sk_data_ready(sk);
  498. return NET_RX_SUCCESS;
  499. case PNS_PEP_CONNECT_RESP:
  500. if (sk->sk_state != TCP_SYN_SENT)
  501. break;
  502. if (!sock_flag(sk, SOCK_DEAD))
  503. sk->sk_state_change(sk);
  504. if (pep_connresp_rcv(sk, skb)) {
  505. sk->sk_state = TCP_CLOSE_WAIT;
  506. break;
  507. }
  508. if (pn->init_enable == PN_PIPE_DISABLE)
  509. sk->sk_state = TCP_SYN_RECV;
  510. else {
  511. sk->sk_state = TCP_ESTABLISHED;
  512. pipe_start_flow_control(sk);
  513. }
  514. break;
  515. case PNS_PEP_ENABLE_RESP:
  516. if (sk->sk_state != TCP_SYN_SENT)
  517. break;
  518. if (pep_enableresp_rcv(sk, skb)) {
  519. sk->sk_state = TCP_CLOSE_WAIT;
  520. break;
  521. }
  522. sk->sk_state = TCP_ESTABLISHED;
  523. pipe_start_flow_control(sk);
  524. break;
  525. case PNS_PEP_DISCONNECT_RESP:
  526. /* sock should already be dead, nothing to do */
  527. break;
  528. case PNS_PEP_STATUS_IND:
  529. pipe_rcv_status(sk, skb);
  530. break;
  531. }
  532. kfree_skb(skb);
  533. return err;
  534. }
  535. /* Listening sock must be locked */
  536. static struct sock *pep_find_pipe(const struct hlist_head *hlist,
  537. const struct sockaddr_pn *dst,
  538. u8 pipe_handle)
  539. {
  540. struct sock *sknode;
  541. u16 dobj = pn_sockaddr_get_object(dst);
  542. sk_for_each(sknode, hlist) {
  543. struct pep_sock *pnnode = pep_sk(sknode);
  544. /* Ports match, but addresses might not: */
  545. if (pnnode->pn_sk.sobject != dobj)
  546. continue;
  547. if (pnnode->pipe_handle != pipe_handle)
  548. continue;
  549. if (sknode->sk_state == TCP_CLOSE_WAIT)
  550. continue;
  551. sock_hold(sknode);
  552. return sknode;
  553. }
  554. return NULL;
  555. }
  556. /*
  557. * Deliver an skb to a listening sock.
  558. * Socket lock must be held.
  559. * We then queue the skb to the right connected sock (if any).
  560. */
  561. static int pep_do_rcv(struct sock *sk, struct sk_buff *skb)
  562. {
  563. struct pep_sock *pn = pep_sk(sk);
  564. struct sock *sknode;
  565. struct pnpipehdr *hdr;
  566. struct sockaddr_pn dst;
  567. u8 pipe_handle;
  568. if (!pskb_may_pull(skb, sizeof(*hdr)))
  569. goto drop;
  570. hdr = pnp_hdr(skb);
  571. pipe_handle = hdr->pipe_handle;
  572. if (pipe_handle == PN_PIPE_INVALID_HANDLE)
  573. goto drop;
  574. pn_skb_get_dst_sockaddr(skb, &dst);
  575. /* Look for an existing pipe handle */
  576. sknode = pep_find_pipe(&pn->hlist, &dst, pipe_handle);
  577. if (sknode)
  578. return sk_receive_skb(sknode, skb, 1);
  579. switch (hdr->message_id) {
  580. case PNS_PEP_CONNECT_REQ:
  581. if (sk->sk_state != TCP_LISTEN || sk_acceptq_is_full(sk)) {
  582. pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE,
  583. GFP_ATOMIC);
  584. break;
  585. }
  586. skb_queue_head(&sk->sk_receive_queue, skb);
  587. sk_acceptq_added(sk);
  588. if (!sock_flag(sk, SOCK_DEAD))
  589. sk->sk_data_ready(sk);
  590. return NET_RX_SUCCESS;
  591. case PNS_PEP_DISCONNECT_REQ:
  592. pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
  593. break;
  594. case PNS_PEP_CTRL_REQ:
  595. pep_ctrlreq_error(sk, skb, PN_PIPE_INVALID_HANDLE, GFP_ATOMIC);
  596. break;
  597. case PNS_PEP_RESET_REQ:
  598. case PNS_PEP_ENABLE_REQ:
  599. case PNS_PEP_DISABLE_REQ:
  600. /* invalid handle is not even allowed here! */
  601. break;
  602. default:
  603. if ((1 << sk->sk_state)
  604. & ~(TCPF_CLOSE|TCPF_LISTEN|TCPF_CLOSE_WAIT))
  605. /* actively connected socket */
  606. return pipe_handler_do_rcv(sk, skb);
  607. }
  608. drop:
  609. kfree_skb(skb);
  610. return NET_RX_SUCCESS;
  611. }
  612. static int pipe_do_remove(struct sock *sk)
  613. {
  614. struct pep_sock *pn = pep_sk(sk);
  615. struct pnpipehdr *ph;
  616. struct sk_buff *skb;
  617. skb = pep_alloc_skb(sk, NULL, 0, GFP_KERNEL);
  618. if (!skb)
  619. return -ENOMEM;
  620. ph = pnp_hdr(skb);
  621. ph->utid = 0;
  622. ph->message_id = PNS_PIPE_REMOVE_REQ;
  623. ph->pipe_handle = pn->pipe_handle;
  624. ph->data0 = PAD;
  625. return pn_skb_send(sk, skb, NULL);
  626. }
  627. /* associated socket ceases to exist */
  628. static void pep_sock_close(struct sock *sk, long timeout)
  629. {
  630. struct pep_sock *pn = pep_sk(sk);
  631. int ifindex = 0;
  632. sock_hold(sk); /* keep a reference after sk_common_release() */
  633. sk_common_release(sk);
  634. lock_sock(sk);
  635. if ((1 << sk->sk_state) & (TCPF_SYN_RECV|TCPF_ESTABLISHED)) {
  636. if (sk->sk_backlog_rcv == pipe_do_rcv)
  637. /* Forcefully remove dangling Phonet pipe */
  638. pipe_do_remove(sk);
  639. else
  640. pipe_handler_request(sk, PNS_PEP_DISCONNECT_REQ, PAD,
  641. NULL, 0);
  642. }
  643. sk->sk_state = TCP_CLOSE;
  644. ifindex = pn->ifindex;
  645. pn->ifindex = 0;
  646. release_sock(sk);
  647. if (ifindex)
  648. gprs_detach(sk);
  649. sock_put(sk);
  650. }
  651. static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp,
  652. bool kern)
  653. {
  654. struct pep_sock *pn = pep_sk(sk), *newpn;
  655. struct sock *newsk = NULL;
  656. struct sk_buff *skb;
  657. struct pnpipehdr *hdr;
  658. struct sockaddr_pn dst, src;
  659. int err;
  660. u16 peer_type;
  661. u8 pipe_handle, enabled, n_sb;
  662. u8 aligned = 0;
  663. skb = skb_recv_datagram(sk, (flags & O_NONBLOCK) ? MSG_DONTWAIT : 0,
  664. errp);
  665. if (!skb)
  666. return NULL;
  667. lock_sock(sk);
  668. if (sk->sk_state != TCP_LISTEN) {
  669. err = -EINVAL;
  670. goto drop;
  671. }
  672. sk_acceptq_removed(sk);
  673. err = -EPROTO;
  674. if (!pskb_may_pull(skb, sizeof(*hdr) + 4))
  675. goto drop;
  676. hdr = pnp_hdr(skb);
  677. pipe_handle = hdr->pipe_handle;
  678. switch (hdr->state_after_connect) {
  679. case PN_PIPE_DISABLE:
  680. enabled = 0;
  681. break;
  682. case PN_PIPE_ENABLE:
  683. enabled = 1;
  684. break;
  685. default:
  686. pep_reject_conn(sk, skb, PN_PIPE_ERR_INVALID_PARAM,
  687. GFP_KERNEL);
  688. goto drop;
  689. }
  690. peer_type = hdr->other_pep_type << 8;
  691. /* Parse sub-blocks (options) */
  692. n_sb = hdr->data[3];
  693. while (n_sb > 0) {
  694. u8 type, buf[1], len = sizeof(buf);
  695. const u8 *data = pep_get_sb(skb, &type, &len, buf);
  696. if (data == NULL)
  697. goto drop;
  698. switch (type) {
  699. case PN_PIPE_SB_CONNECT_REQ_PEP_SUB_TYPE:
  700. if (len < 1)
  701. goto drop;
  702. peer_type = (peer_type & 0xff00) | data[0];
  703. break;
  704. case PN_PIPE_SB_ALIGNED_DATA:
  705. aligned = data[0] != 0;
  706. break;
  707. }
  708. n_sb--;
  709. }
  710. /* Check for duplicate pipe handle */
  711. newsk = pep_find_pipe(&pn->hlist, &dst, pipe_handle);
  712. if (unlikely(newsk)) {
  713. __sock_put(newsk);
  714. newsk = NULL;
  715. pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE, GFP_KERNEL);
  716. goto drop;
  717. }
  718. /* Create a new to-be-accepted sock */
  719. newsk = sk_alloc(sock_net(sk), PF_PHONET, GFP_KERNEL, sk->sk_prot,
  720. kern);
  721. if (!newsk) {
  722. pep_reject_conn(sk, skb, PN_PIPE_ERR_OVERLOAD, GFP_KERNEL);
  723. err = -ENOBUFS;
  724. goto drop;
  725. }
  726. sock_init_data(NULL, newsk);
  727. newsk->sk_state = TCP_SYN_RECV;
  728. newsk->sk_backlog_rcv = pipe_do_rcv;
  729. newsk->sk_protocol = sk->sk_protocol;
  730. newsk->sk_destruct = pipe_destruct;
  731. newpn = pep_sk(newsk);
  732. pn_skb_get_dst_sockaddr(skb, &dst);
  733. pn_skb_get_src_sockaddr(skb, &src);
  734. newpn->pn_sk.sobject = pn_sockaddr_get_object(&dst);
  735. newpn->pn_sk.dobject = pn_sockaddr_get_object(&src);
  736. newpn->pn_sk.resource = pn_sockaddr_get_resource(&dst);
  737. sock_hold(sk);
  738. newpn->listener = sk;
  739. skb_queue_head_init(&newpn->ctrlreq_queue);
  740. newpn->pipe_handle = pipe_handle;
  741. atomic_set(&newpn->tx_credits, 0);
  742. newpn->ifindex = 0;
  743. newpn->peer_type = peer_type;
  744. newpn->rx_credits = 0;
  745. newpn->rx_fc = newpn->tx_fc = PN_LEGACY_FLOW_CONTROL;
  746. newpn->init_enable = enabled;
  747. newpn->aligned = aligned;
  748. err = pep_accept_conn(newsk, skb);
  749. if (err) {
  750. __sock_put(sk);
  751. sock_put(newsk);
  752. newsk = NULL;
  753. goto drop;
  754. }
  755. sk_add_node(newsk, &pn->hlist);
  756. drop:
  757. release_sock(sk);
  758. kfree_skb(skb);
  759. *errp = err;
  760. return newsk;
  761. }
  762. static int pep_sock_connect(struct sock *sk, struct sockaddr *addr, int len)
  763. {
  764. struct pep_sock *pn = pep_sk(sk);
  765. int err;
  766. u8 data[4] = { 0 /* sub-blocks */, PAD, PAD, PAD };
  767. if (pn->pipe_handle == PN_PIPE_INVALID_HANDLE)
  768. pn->pipe_handle = 1; /* anything but INVALID_HANDLE */
  769. err = pipe_handler_request(sk, PNS_PEP_CONNECT_REQ,
  770. pn->init_enable, data, 4);
  771. if (err) {
  772. pn->pipe_handle = PN_PIPE_INVALID_HANDLE;
  773. return err;
  774. }
  775. sk->sk_state = TCP_SYN_SENT;
  776. return 0;
  777. }
  778. static int pep_sock_enable(struct sock *sk, struct sockaddr *addr, int len)
  779. {
  780. int err;
  781. err = pipe_handler_request(sk, PNS_PEP_ENABLE_REQ, PAD,
  782. NULL, 0);
  783. if (err)
  784. return err;
  785. sk->sk_state = TCP_SYN_SENT;
  786. return 0;
  787. }
  788. static int pep_ioctl(struct sock *sk, int cmd, unsigned long arg)
  789. {
  790. struct pep_sock *pn = pep_sk(sk);
  791. int answ;
  792. int ret = -ENOIOCTLCMD;
  793. switch (cmd) {
  794. case SIOCINQ:
  795. if (sk->sk_state == TCP_LISTEN) {
  796. ret = -EINVAL;
  797. break;
  798. }
  799. lock_sock(sk);
  800. if (sock_flag(sk, SOCK_URGINLINE) &&
  801. !skb_queue_empty(&pn->ctrlreq_queue))
  802. answ = skb_peek(&pn->ctrlreq_queue)->len;
  803. else if (!skb_queue_empty(&sk->sk_receive_queue))
  804. answ = skb_peek(&sk->sk_receive_queue)->len;
  805. else
  806. answ = 0;
  807. release_sock(sk);
  808. ret = put_user(answ, (int __user *)arg);
  809. break;
  810. case SIOCPNENABLEPIPE:
  811. lock_sock(sk);
  812. if (sk->sk_state == TCP_SYN_SENT)
  813. ret = -EBUSY;
  814. else if (sk->sk_state == TCP_ESTABLISHED)
  815. ret = -EISCONN;
  816. else if (!pn->pn_sk.sobject)
  817. ret = -EADDRNOTAVAIL;
  818. else
  819. ret = pep_sock_enable(sk, NULL, 0);
  820. release_sock(sk);
  821. break;
  822. }
  823. return ret;
  824. }
  825. static int pep_init(struct sock *sk)
  826. {
  827. struct pep_sock *pn = pep_sk(sk);
  828. sk->sk_destruct = pipe_destruct;
  829. INIT_HLIST_HEAD(&pn->hlist);
  830. pn->listener = NULL;
  831. skb_queue_head_init(&pn->ctrlreq_queue);
  832. atomic_set(&pn->tx_credits, 0);
  833. pn->ifindex = 0;
  834. pn->peer_type = 0;
  835. pn->pipe_handle = PN_PIPE_INVALID_HANDLE;
  836. pn->rx_credits = 0;
  837. pn->rx_fc = pn->tx_fc = PN_LEGACY_FLOW_CONTROL;
  838. pn->init_enable = 1;
  839. pn->aligned = 0;
  840. return 0;
  841. }
  842. static int pep_setsockopt(struct sock *sk, int level, int optname,
  843. sockptr_t optval, unsigned int optlen)
  844. {
  845. struct pep_sock *pn = pep_sk(sk);
  846. int val = 0, err = 0;
  847. if (level != SOL_PNPIPE)
  848. return -ENOPROTOOPT;
  849. if (optlen >= sizeof(int)) {
  850. if (copy_from_sockptr(&val, optval, sizeof(int)))
  851. return -EFAULT;
  852. }
  853. lock_sock(sk);
  854. switch (optname) {
  855. case PNPIPE_ENCAP:
  856. if (val && val != PNPIPE_ENCAP_IP) {
  857. err = -EINVAL;
  858. break;
  859. }
  860. if (!pn->ifindex == !val)
  861. break; /* Nothing to do! */
  862. if (!capable(CAP_NET_ADMIN)) {
  863. err = -EPERM;
  864. break;
  865. }
  866. if (val) {
  867. release_sock(sk);
  868. err = gprs_attach(sk);
  869. if (err > 0) {
  870. pn->ifindex = err;
  871. err = 0;
  872. }
  873. } else {
  874. pn->ifindex = 0;
  875. release_sock(sk);
  876. gprs_detach(sk);
  877. err = 0;
  878. }
  879. goto out_norel;
  880. case PNPIPE_HANDLE:
  881. if ((sk->sk_state == TCP_CLOSE) &&
  882. (val >= 0) && (val < PN_PIPE_INVALID_HANDLE))
  883. pn->pipe_handle = val;
  884. else
  885. err = -EINVAL;
  886. break;
  887. case PNPIPE_INITSTATE:
  888. pn->init_enable = !!val;
  889. break;
  890. default:
  891. err = -ENOPROTOOPT;
  892. }
  893. release_sock(sk);
  894. out_norel:
  895. return err;
  896. }
  897. static int pep_getsockopt(struct sock *sk, int level, int optname,
  898. char __user *optval, int __user *optlen)
  899. {
  900. struct pep_sock *pn = pep_sk(sk);
  901. int len, val;
  902. if (level != SOL_PNPIPE)
  903. return -ENOPROTOOPT;
  904. if (get_user(len, optlen))
  905. return -EFAULT;
  906. switch (optname) {
  907. case PNPIPE_ENCAP:
  908. val = pn->ifindex ? PNPIPE_ENCAP_IP : PNPIPE_ENCAP_NONE;
  909. break;
  910. case PNPIPE_IFINDEX:
  911. val = pn->ifindex;
  912. break;
  913. case PNPIPE_HANDLE:
  914. val = pn->pipe_handle;
  915. if (val == PN_PIPE_INVALID_HANDLE)
  916. return -EINVAL;
  917. break;
  918. case PNPIPE_INITSTATE:
  919. val = pn->init_enable;
  920. break;
  921. default:
  922. return -ENOPROTOOPT;
  923. }
  924. len = min_t(unsigned int, sizeof(int), len);
  925. if (put_user(len, optlen))
  926. return -EFAULT;
  927. if (put_user(val, (int __user *) optval))
  928. return -EFAULT;
  929. return 0;
  930. }
  931. static int pipe_skb_send(struct sock *sk, struct sk_buff *skb)
  932. {
  933. struct pep_sock *pn = pep_sk(sk);
  934. struct pnpipehdr *ph;
  935. int err;
  936. if (pn_flow_safe(pn->tx_fc) &&
  937. !atomic_add_unless(&pn->tx_credits, -1, 0)) {
  938. kfree_skb(skb);
  939. return -ENOBUFS;
  940. }
  941. skb_push(skb, 3 + pn->aligned);
  942. skb_reset_transport_header(skb);
  943. ph = pnp_hdr(skb);
  944. ph->utid = 0;
  945. if (pn->aligned) {
  946. ph->message_id = PNS_PIPE_ALIGNED_DATA;
  947. ph->data0 = 0; /* padding */
  948. } else
  949. ph->message_id = PNS_PIPE_DATA;
  950. ph->pipe_handle = pn->pipe_handle;
  951. err = pn_skb_send(sk, skb, NULL);
  952. if (err && pn_flow_safe(pn->tx_fc))
  953. atomic_inc(&pn->tx_credits);
  954. return err;
  955. }
  956. static int pep_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
  957. {
  958. struct pep_sock *pn = pep_sk(sk);
  959. struct sk_buff *skb;
  960. long timeo;
  961. int flags = msg->msg_flags;
  962. int err, done;
  963. if (len > USHRT_MAX)
  964. return -EMSGSIZE;
  965. if ((msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_NOSIGNAL|
  966. MSG_CMSG_COMPAT)) ||
  967. !(msg->msg_flags & MSG_EOR))
  968. return -EOPNOTSUPP;
  969. skb = sock_alloc_send_skb(sk, MAX_PNPIPE_HEADER + len,
  970. flags & MSG_DONTWAIT, &err);
  971. if (!skb)
  972. return err;
  973. skb_reserve(skb, MAX_PHONET_HEADER + 3 + pn->aligned);
  974. err = memcpy_from_msg(skb_put(skb, len), msg, len);
  975. if (err < 0)
  976. goto outfree;
  977. lock_sock(sk);
  978. timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
  979. if ((1 << sk->sk_state) & (TCPF_LISTEN|TCPF_CLOSE)) {
  980. err = -ENOTCONN;
  981. goto out;
  982. }
  983. if (sk->sk_state != TCP_ESTABLISHED) {
  984. /* Wait until the pipe gets to enabled state */
  985. disabled:
  986. err = sk_stream_wait_connect(sk, &timeo);
  987. if (err)
  988. goto out;
  989. if (sk->sk_state == TCP_CLOSE_WAIT) {
  990. err = -ECONNRESET;
  991. goto out;
  992. }
  993. }
  994. BUG_ON(sk->sk_state != TCP_ESTABLISHED);
  995. /* Wait until flow control allows TX */
  996. done = atomic_read(&pn->tx_credits);
  997. while (!done) {
  998. DEFINE_WAIT_FUNC(wait, woken_wake_function);
  999. if (!timeo) {
  1000. err = -EAGAIN;
  1001. goto out;
  1002. }
  1003. if (signal_pending(current)) {
  1004. err = sock_intr_errno(timeo);
  1005. goto out;
  1006. }
  1007. add_wait_queue(sk_sleep(sk), &wait);
  1008. done = sk_wait_event(sk, &timeo, atomic_read(&pn->tx_credits), &wait);
  1009. remove_wait_queue(sk_sleep(sk), &wait);
  1010. if (sk->sk_state != TCP_ESTABLISHED)
  1011. goto disabled;
  1012. }
  1013. err = pipe_skb_send(sk, skb);
  1014. if (err >= 0)
  1015. err = len; /* success! */
  1016. skb = NULL;
  1017. out:
  1018. release_sock(sk);
  1019. outfree:
  1020. kfree_skb(skb);
  1021. return err;
  1022. }
  1023. int pep_writeable(struct sock *sk)
  1024. {
  1025. struct pep_sock *pn = pep_sk(sk);
  1026. return atomic_read(&pn->tx_credits);
  1027. }
  1028. int pep_write(struct sock *sk, struct sk_buff *skb)
  1029. {
  1030. struct sk_buff *rskb, *fs;
  1031. int flen = 0;
  1032. if (pep_sk(sk)->aligned)
  1033. return pipe_skb_send(sk, skb);
  1034. rskb = alloc_skb(MAX_PNPIPE_HEADER, GFP_ATOMIC);
  1035. if (!rskb) {
  1036. kfree_skb(skb);
  1037. return -ENOMEM;
  1038. }
  1039. skb_shinfo(rskb)->frag_list = skb;
  1040. rskb->len += skb->len;
  1041. rskb->data_len += rskb->len;
  1042. rskb->truesize += rskb->len;
  1043. /* Avoid nested fragments */
  1044. skb_walk_frags(skb, fs)
  1045. flen += fs->len;
  1046. skb->next = skb_shinfo(skb)->frag_list;
  1047. skb_frag_list_init(skb);
  1048. skb->len -= flen;
  1049. skb->data_len -= flen;
  1050. skb->truesize -= flen;
  1051. skb_reserve(rskb, MAX_PHONET_HEADER + 3);
  1052. return pipe_skb_send(sk, rskb);
  1053. }
  1054. struct sk_buff *pep_read(struct sock *sk)
  1055. {
  1056. struct sk_buff *skb = skb_dequeue(&sk->sk_receive_queue);
  1057. if (sk->sk_state == TCP_ESTABLISHED)
  1058. pipe_grant_credits(sk, GFP_ATOMIC);
  1059. return skb;
  1060. }
  1061. static int pep_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
  1062. int flags, int *addr_len)
  1063. {
  1064. struct sk_buff *skb;
  1065. int err;
  1066. if (flags & ~(MSG_OOB|MSG_PEEK|MSG_TRUNC|MSG_DONTWAIT|MSG_WAITALL|
  1067. MSG_NOSIGNAL|MSG_CMSG_COMPAT))
  1068. return -EOPNOTSUPP;
  1069. if (unlikely(1 << sk->sk_state & (TCPF_LISTEN | TCPF_CLOSE)))
  1070. return -ENOTCONN;
  1071. if ((flags & MSG_OOB) || sock_flag(sk, SOCK_URGINLINE)) {
  1072. /* Dequeue and acknowledge control request */
  1073. struct pep_sock *pn = pep_sk(sk);
  1074. if (flags & MSG_PEEK)
  1075. return -EOPNOTSUPP;
  1076. skb = skb_dequeue(&pn->ctrlreq_queue);
  1077. if (skb) {
  1078. pep_ctrlreq_error(sk, skb, PN_PIPE_NO_ERROR,
  1079. GFP_KERNEL);
  1080. msg->msg_flags |= MSG_OOB;
  1081. goto copy;
  1082. }
  1083. if (flags & MSG_OOB)
  1084. return -EINVAL;
  1085. }
  1086. skb = skb_recv_datagram(sk, flags, &err);
  1087. lock_sock(sk);
  1088. if (skb == NULL) {
  1089. if (err == -ENOTCONN && sk->sk_state == TCP_CLOSE_WAIT)
  1090. err = -ECONNRESET;
  1091. release_sock(sk);
  1092. return err;
  1093. }
  1094. if (sk->sk_state == TCP_ESTABLISHED)
  1095. pipe_grant_credits(sk, GFP_KERNEL);
  1096. release_sock(sk);
  1097. copy:
  1098. msg->msg_flags |= MSG_EOR;
  1099. if (skb->len > len)
  1100. msg->msg_flags |= MSG_TRUNC;
  1101. else
  1102. len = skb->len;
  1103. err = skb_copy_datagram_msg(skb, 0, msg, len);
  1104. if (!err)
  1105. err = (flags & MSG_TRUNC) ? skb->len : len;
  1106. skb_free_datagram(sk, skb);
  1107. return err;
  1108. }
  1109. static void pep_sock_unhash(struct sock *sk)
  1110. {
  1111. struct pep_sock *pn = pep_sk(sk);
  1112. struct sock *skparent = NULL;
  1113. lock_sock(sk);
  1114. if (pn->listener != NULL) {
  1115. skparent = pn->listener;
  1116. pn->listener = NULL;
  1117. release_sock(sk);
  1118. pn = pep_sk(skparent);
  1119. lock_sock(skparent);
  1120. sk_del_node_init(sk);
  1121. sk = skparent;
  1122. }
  1123. /* Unhash a listening sock only when it is closed
  1124. * and all of its active connected pipes are closed. */
  1125. if (hlist_empty(&pn->hlist))
  1126. pn_sock_unhash(&pn->pn_sk.sk);
  1127. release_sock(sk);
  1128. if (skparent)
  1129. sock_put(skparent);
  1130. }
  1131. static struct proto pep_proto = {
  1132. .close = pep_sock_close,
  1133. .accept = pep_sock_accept,
  1134. .connect = pep_sock_connect,
  1135. .ioctl = pep_ioctl,
  1136. .init = pep_init,
  1137. .setsockopt = pep_setsockopt,
  1138. .getsockopt = pep_getsockopt,
  1139. .sendmsg = pep_sendmsg,
  1140. .recvmsg = pep_recvmsg,
  1141. .backlog_rcv = pep_do_rcv,
  1142. .hash = pn_sock_hash,
  1143. .unhash = pep_sock_unhash,
  1144. .get_port = pn_sock_get_port,
  1145. .obj_size = sizeof(struct pep_sock),
  1146. .owner = THIS_MODULE,
  1147. .name = "PNPIPE",
  1148. };
  1149. static const struct phonet_protocol pep_pn_proto = {
  1150. .ops = &phonet_stream_ops,
  1151. .prot = &pep_proto,
  1152. .sock_type = SOCK_SEQPACKET,
  1153. };
  1154. static int __init pep_register(void)
  1155. {
  1156. return phonet_proto_register(PN_PROTO_PIPE, &pep_pn_proto);
  1157. }
  1158. static void __exit pep_unregister(void)
  1159. {
  1160. phonet_proto_unregister(PN_PROTO_PIPE, &pep_pn_proto);
  1161. }
  1162. module_init(pep_register);
  1163. module_exit(pep_unregister);
  1164. MODULE_AUTHOR("Remi Denis-Courmont, Nokia");
  1165. MODULE_DESCRIPTION("Phonet pipe protocol");
  1166. MODULE_LICENSE("GPL");
  1167. MODULE_ALIAS_NET_PF_PROTO(PF_PHONET, PN_PROTO_PIPE);