output.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /* RxRPC packet transmission
  3. *
  4. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  5. * Written by David Howells ([email protected])
  6. */
  7. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  8. #include <linux/net.h>
  9. #include <linux/gfp.h>
  10. #include <linux/skbuff.h>
  11. #include <linux/export.h>
  12. #include <net/sock.h>
  13. #include <net/af_rxrpc.h>
  14. #include "ar-internal.h"
  15. struct rxrpc_ack_buffer {
  16. struct rxrpc_wire_header whdr;
  17. struct rxrpc_ackpacket ack;
  18. u8 acks[255];
  19. u8 pad[3];
  20. struct rxrpc_ackinfo ackinfo;
  21. };
  22. struct rxrpc_abort_buffer {
  23. struct rxrpc_wire_header whdr;
  24. __be32 abort_code;
  25. };
  26. static const char rxrpc_keepalive_string[] = "";
  27. /*
  28. * Increase Tx backoff on transmission failure and clear it on success.
  29. */
  30. static void rxrpc_tx_backoff(struct rxrpc_call *call, int ret)
  31. {
  32. if (ret < 0) {
  33. u16 tx_backoff = READ_ONCE(call->tx_backoff);
  34. if (tx_backoff < HZ)
  35. WRITE_ONCE(call->tx_backoff, tx_backoff + 1);
  36. } else {
  37. WRITE_ONCE(call->tx_backoff, 0);
  38. }
  39. }
  40. /*
  41. * Arrange for a keepalive ping a certain time after we last transmitted. This
  42. * lets the far side know we're still interested in this call and helps keep
  43. * the route through any intervening firewall open.
  44. *
  45. * Receiving a response to the ping will prevent the ->expect_rx_by timer from
  46. * expiring.
  47. */
  48. static void rxrpc_set_keepalive(struct rxrpc_call *call)
  49. {
  50. unsigned long now = jiffies, keepalive_at = call->next_rx_timo / 6;
  51. keepalive_at += now;
  52. WRITE_ONCE(call->keepalive_at, keepalive_at);
  53. rxrpc_reduce_call_timer(call, keepalive_at, now,
  54. rxrpc_timer_set_for_keepalive);
  55. }
  56. /*
  57. * Fill out an ACK packet.
  58. */
  59. static size_t rxrpc_fill_out_ack(struct rxrpc_connection *conn,
  60. struct rxrpc_call *call,
  61. struct rxrpc_ack_buffer *pkt,
  62. rxrpc_seq_t *_hard_ack,
  63. rxrpc_seq_t *_top,
  64. u8 reason)
  65. {
  66. rxrpc_serial_t serial;
  67. unsigned int tmp;
  68. rxrpc_seq_t hard_ack, top, seq;
  69. int ix;
  70. u32 mtu, jmax;
  71. u8 *ackp = pkt->acks;
  72. tmp = atomic_xchg(&call->ackr_nr_unacked, 0);
  73. tmp |= atomic_xchg(&call->ackr_nr_consumed, 0);
  74. if (!tmp && (reason == RXRPC_ACK_DELAY ||
  75. reason == RXRPC_ACK_IDLE))
  76. return 0;
  77. /* Barrier against rxrpc_input_data(). */
  78. serial = call->ackr_serial;
  79. hard_ack = READ_ONCE(call->rx_hard_ack);
  80. top = smp_load_acquire(&call->rx_top);
  81. *_hard_ack = hard_ack;
  82. *_top = top;
  83. pkt->ack.bufferSpace = htons(0);
  84. pkt->ack.maxSkew = htons(0);
  85. pkt->ack.firstPacket = htonl(hard_ack + 1);
  86. pkt->ack.previousPacket = htonl(call->ackr_highest_seq);
  87. pkt->ack.serial = htonl(serial);
  88. pkt->ack.reason = reason;
  89. pkt->ack.nAcks = top - hard_ack;
  90. if (reason == RXRPC_ACK_PING)
  91. pkt->whdr.flags |= RXRPC_REQUEST_ACK;
  92. if (after(top, hard_ack)) {
  93. seq = hard_ack + 1;
  94. do {
  95. ix = seq & RXRPC_RXTX_BUFF_MASK;
  96. if (call->rxtx_buffer[ix])
  97. *ackp++ = RXRPC_ACK_TYPE_ACK;
  98. else
  99. *ackp++ = RXRPC_ACK_TYPE_NACK;
  100. seq++;
  101. } while (before_eq(seq, top));
  102. }
  103. mtu = conn->params.peer->if_mtu;
  104. mtu -= conn->params.peer->hdrsize;
  105. jmax = (call->nr_jumbo_bad > 3) ? 1 : rxrpc_rx_jumbo_max;
  106. pkt->ackinfo.rxMTU = htonl(rxrpc_rx_mtu);
  107. pkt->ackinfo.maxMTU = htonl(mtu);
  108. pkt->ackinfo.rwind = htonl(call->rx_winsize);
  109. pkt->ackinfo.jumbo_max = htonl(jmax);
  110. *ackp++ = 0;
  111. *ackp++ = 0;
  112. *ackp++ = 0;
  113. return top - hard_ack + 3;
  114. }
  115. /*
  116. * Record the beginning of an RTT probe.
  117. */
  118. static int rxrpc_begin_rtt_probe(struct rxrpc_call *call, rxrpc_serial_t serial,
  119. enum rxrpc_rtt_tx_trace why)
  120. {
  121. unsigned long avail = call->rtt_avail;
  122. int rtt_slot = 9;
  123. if (!(avail & RXRPC_CALL_RTT_AVAIL_MASK))
  124. goto no_slot;
  125. rtt_slot = __ffs(avail & RXRPC_CALL_RTT_AVAIL_MASK);
  126. if (!test_and_clear_bit(rtt_slot, &call->rtt_avail))
  127. goto no_slot;
  128. call->rtt_serial[rtt_slot] = serial;
  129. call->rtt_sent_at[rtt_slot] = ktime_get_real();
  130. smp_wmb(); /* Write data before avail bit */
  131. set_bit(rtt_slot + RXRPC_CALL_RTT_PEND_SHIFT, &call->rtt_avail);
  132. trace_rxrpc_rtt_tx(call, why, rtt_slot, serial);
  133. return rtt_slot;
  134. no_slot:
  135. trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_no_slot, rtt_slot, serial);
  136. return -1;
  137. }
  138. /*
  139. * Cancel an RTT probe.
  140. */
  141. static void rxrpc_cancel_rtt_probe(struct rxrpc_call *call,
  142. rxrpc_serial_t serial, int rtt_slot)
  143. {
  144. if (rtt_slot != -1) {
  145. clear_bit(rtt_slot + RXRPC_CALL_RTT_PEND_SHIFT, &call->rtt_avail);
  146. smp_wmb(); /* Clear pending bit before setting slot */
  147. set_bit(rtt_slot, &call->rtt_avail);
  148. trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_cancel, rtt_slot, serial);
  149. }
  150. }
  151. /*
  152. * Send an ACK call packet.
  153. */
  154. int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
  155. rxrpc_serial_t *_serial)
  156. {
  157. struct rxrpc_connection *conn;
  158. struct rxrpc_ack_buffer *pkt;
  159. struct msghdr msg;
  160. struct kvec iov[2];
  161. rxrpc_serial_t serial;
  162. rxrpc_seq_t hard_ack, top;
  163. size_t len, n;
  164. int ret, rtt_slot = -1;
  165. u8 reason;
  166. if (test_bit(RXRPC_CALL_DISCONNECTED, &call->flags))
  167. return -ECONNRESET;
  168. pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
  169. if (!pkt)
  170. return -ENOMEM;
  171. conn = call->conn;
  172. msg.msg_name = &call->peer->srx.transport;
  173. msg.msg_namelen = call->peer->srx.transport_len;
  174. msg.msg_control = NULL;
  175. msg.msg_controllen = 0;
  176. msg.msg_flags = 0;
  177. pkt->whdr.epoch = htonl(conn->proto.epoch);
  178. pkt->whdr.cid = htonl(call->cid);
  179. pkt->whdr.callNumber = htonl(call->call_id);
  180. pkt->whdr.seq = 0;
  181. pkt->whdr.type = RXRPC_PACKET_TYPE_ACK;
  182. pkt->whdr.flags = RXRPC_SLOW_START_OK | conn->out_clientflag;
  183. pkt->whdr.userStatus = 0;
  184. pkt->whdr.securityIndex = call->security_ix;
  185. pkt->whdr._rsvd = 0;
  186. pkt->whdr.serviceId = htons(call->service_id);
  187. spin_lock_bh(&call->lock);
  188. if (ping) {
  189. reason = RXRPC_ACK_PING;
  190. } else {
  191. reason = call->ackr_reason;
  192. if (!call->ackr_reason) {
  193. spin_unlock_bh(&call->lock);
  194. ret = 0;
  195. goto out;
  196. }
  197. call->ackr_reason = 0;
  198. }
  199. n = rxrpc_fill_out_ack(conn, call, pkt, &hard_ack, &top, reason);
  200. spin_unlock_bh(&call->lock);
  201. if (n == 0) {
  202. kfree(pkt);
  203. return 0;
  204. }
  205. iov[0].iov_base = pkt;
  206. iov[0].iov_len = sizeof(pkt->whdr) + sizeof(pkt->ack) + n;
  207. iov[1].iov_base = &pkt->ackinfo;
  208. iov[1].iov_len = sizeof(pkt->ackinfo);
  209. len = iov[0].iov_len + iov[1].iov_len;
  210. serial = atomic_inc_return(&conn->serial);
  211. pkt->whdr.serial = htonl(serial);
  212. trace_rxrpc_tx_ack(call->debug_id, serial,
  213. ntohl(pkt->ack.firstPacket),
  214. ntohl(pkt->ack.serial),
  215. pkt->ack.reason, pkt->ack.nAcks);
  216. if (_serial)
  217. *_serial = serial;
  218. if (ping)
  219. rtt_slot = rxrpc_begin_rtt_probe(call, serial, rxrpc_rtt_tx_ping);
  220. ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len);
  221. conn->params.peer->last_tx_at = ktime_get_seconds();
  222. if (ret < 0)
  223. trace_rxrpc_tx_fail(call->debug_id, serial, ret,
  224. rxrpc_tx_point_call_ack);
  225. else
  226. trace_rxrpc_tx_packet(call->debug_id, &pkt->whdr,
  227. rxrpc_tx_point_call_ack);
  228. rxrpc_tx_backoff(call, ret);
  229. if (call->state < RXRPC_CALL_COMPLETE) {
  230. if (ret < 0) {
  231. rxrpc_cancel_rtt_probe(call, serial, rtt_slot);
  232. rxrpc_propose_ACK(call, pkt->ack.reason,
  233. ntohl(pkt->ack.serial),
  234. false, true,
  235. rxrpc_propose_ack_retry_tx);
  236. }
  237. rxrpc_set_keepalive(call);
  238. }
  239. out:
  240. kfree(pkt);
  241. return ret;
  242. }
  243. /*
  244. * Send an ABORT call packet.
  245. */
  246. int rxrpc_send_abort_packet(struct rxrpc_call *call)
  247. {
  248. struct rxrpc_connection *conn;
  249. struct rxrpc_abort_buffer pkt;
  250. struct msghdr msg;
  251. struct kvec iov[1];
  252. rxrpc_serial_t serial;
  253. int ret;
  254. /* Don't bother sending aborts for a client call once the server has
  255. * hard-ACK'd all of its request data. After that point, we're not
  256. * going to stop the operation proceeding, and whilst we might limit
  257. * the reply, it's not worth it if we can send a new call on the same
  258. * channel instead, thereby closing off this call.
  259. */
  260. if (rxrpc_is_client_call(call) &&
  261. test_bit(RXRPC_CALL_TX_LAST, &call->flags))
  262. return 0;
  263. if (test_bit(RXRPC_CALL_DISCONNECTED, &call->flags))
  264. return -ECONNRESET;
  265. conn = call->conn;
  266. msg.msg_name = &call->peer->srx.transport;
  267. msg.msg_namelen = call->peer->srx.transport_len;
  268. msg.msg_control = NULL;
  269. msg.msg_controllen = 0;
  270. msg.msg_flags = 0;
  271. pkt.whdr.epoch = htonl(conn->proto.epoch);
  272. pkt.whdr.cid = htonl(call->cid);
  273. pkt.whdr.callNumber = htonl(call->call_id);
  274. pkt.whdr.seq = 0;
  275. pkt.whdr.type = RXRPC_PACKET_TYPE_ABORT;
  276. pkt.whdr.flags = conn->out_clientflag;
  277. pkt.whdr.userStatus = 0;
  278. pkt.whdr.securityIndex = call->security_ix;
  279. pkt.whdr._rsvd = 0;
  280. pkt.whdr.serviceId = htons(call->service_id);
  281. pkt.abort_code = htonl(call->abort_code);
  282. iov[0].iov_base = &pkt;
  283. iov[0].iov_len = sizeof(pkt);
  284. serial = atomic_inc_return(&conn->serial);
  285. pkt.whdr.serial = htonl(serial);
  286. ret = kernel_sendmsg(conn->params.local->socket,
  287. &msg, iov, 1, sizeof(pkt));
  288. conn->params.peer->last_tx_at = ktime_get_seconds();
  289. if (ret < 0)
  290. trace_rxrpc_tx_fail(call->debug_id, serial, ret,
  291. rxrpc_tx_point_call_abort);
  292. else
  293. trace_rxrpc_tx_packet(call->debug_id, &pkt.whdr,
  294. rxrpc_tx_point_call_abort);
  295. rxrpc_tx_backoff(call, ret);
  296. return ret;
  297. }
  298. /*
  299. * send a packet through the transport endpoint
  300. */
  301. int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
  302. bool retrans)
  303. {
  304. struct rxrpc_connection *conn = call->conn;
  305. struct rxrpc_wire_header whdr;
  306. struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
  307. struct msghdr msg;
  308. struct kvec iov[2];
  309. rxrpc_serial_t serial;
  310. size_t len;
  311. int ret, rtt_slot = -1;
  312. _enter(",{%d}", skb->len);
  313. if (hlist_unhashed(&call->error_link)) {
  314. spin_lock_bh(&call->peer->lock);
  315. hlist_add_head_rcu(&call->error_link, &call->peer->error_targets);
  316. spin_unlock_bh(&call->peer->lock);
  317. }
  318. /* Each transmission of a Tx packet needs a new serial number */
  319. serial = atomic_inc_return(&conn->serial);
  320. whdr.epoch = htonl(conn->proto.epoch);
  321. whdr.cid = htonl(call->cid);
  322. whdr.callNumber = htonl(call->call_id);
  323. whdr.seq = htonl(sp->hdr.seq);
  324. whdr.serial = htonl(serial);
  325. whdr.type = RXRPC_PACKET_TYPE_DATA;
  326. whdr.flags = sp->hdr.flags;
  327. whdr.userStatus = 0;
  328. whdr.securityIndex = call->security_ix;
  329. whdr._rsvd = htons(sp->hdr._rsvd);
  330. whdr.serviceId = htons(call->service_id);
  331. if (test_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags) &&
  332. sp->hdr.seq == 1)
  333. whdr.userStatus = RXRPC_USERSTATUS_SERVICE_UPGRADE;
  334. iov[0].iov_base = &whdr;
  335. iov[0].iov_len = sizeof(whdr);
  336. iov[1].iov_base = skb->head;
  337. iov[1].iov_len = skb->len;
  338. len = iov[0].iov_len + iov[1].iov_len;
  339. msg.msg_name = &call->peer->srx.transport;
  340. msg.msg_namelen = call->peer->srx.transport_len;
  341. msg.msg_control = NULL;
  342. msg.msg_controllen = 0;
  343. msg.msg_flags = 0;
  344. /* If our RTT cache needs working on, request an ACK. Also request
  345. * ACKs if a DATA packet appears to have been lost.
  346. *
  347. * However, we mustn't request an ACK on the last reply packet of a
  348. * service call, lest OpenAFS incorrectly send us an ACK with some
  349. * soft-ACKs in it and then never follow up with a proper hard ACK.
  350. */
  351. if ((!(sp->hdr.flags & RXRPC_LAST_PACKET) ||
  352. rxrpc_to_server(sp)
  353. ) &&
  354. (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events) ||
  355. retrans ||
  356. call->cong_mode == RXRPC_CALL_SLOW_START ||
  357. (call->peer->rtt_count < 3 && sp->hdr.seq & 1) ||
  358. ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000),
  359. ktime_get_real())))
  360. whdr.flags |= RXRPC_REQUEST_ACK;
  361. if (IS_ENABLED(CONFIG_AF_RXRPC_INJECT_LOSS)) {
  362. static int lose;
  363. if ((lose++ & 7) == 7) {
  364. ret = 0;
  365. trace_rxrpc_tx_data(call, sp->hdr.seq, serial,
  366. whdr.flags, retrans, true);
  367. goto done;
  368. }
  369. }
  370. trace_rxrpc_tx_data(call, sp->hdr.seq, serial, whdr.flags, retrans,
  371. false);
  372. /* send the packet with the don't fragment bit set if we currently
  373. * think it's small enough */
  374. if (iov[1].iov_len >= call->peer->maxdata)
  375. goto send_fragmentable;
  376. down_read(&conn->params.local->defrag_sem);
  377. sp->hdr.serial = serial;
  378. smp_wmb(); /* Set serial before timestamp */
  379. skb->tstamp = ktime_get_real();
  380. if (whdr.flags & RXRPC_REQUEST_ACK)
  381. rtt_slot = rxrpc_begin_rtt_probe(call, serial, rxrpc_rtt_tx_data);
  382. /* send the packet by UDP
  383. * - returns -EMSGSIZE if UDP would have to fragment the packet
  384. * to go out of the interface
  385. * - in which case, we'll have processed the ICMP error
  386. * message and update the peer record
  387. */
  388. ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len);
  389. conn->params.peer->last_tx_at = ktime_get_seconds();
  390. up_read(&conn->params.local->defrag_sem);
  391. if (ret < 0) {
  392. rxrpc_cancel_rtt_probe(call, serial, rtt_slot);
  393. trace_rxrpc_tx_fail(call->debug_id, serial, ret,
  394. rxrpc_tx_point_call_data_nofrag);
  395. } else {
  396. trace_rxrpc_tx_packet(call->debug_id, &whdr,
  397. rxrpc_tx_point_call_data_nofrag);
  398. }
  399. rxrpc_tx_backoff(call, ret);
  400. if (ret == -EMSGSIZE)
  401. goto send_fragmentable;
  402. done:
  403. if (ret >= 0) {
  404. if (whdr.flags & RXRPC_REQUEST_ACK) {
  405. call->peer->rtt_last_req = skb->tstamp;
  406. if (call->peer->rtt_count > 1) {
  407. unsigned long nowj = jiffies, ack_lost_at;
  408. ack_lost_at = rxrpc_get_rto_backoff(call->peer, false);
  409. ack_lost_at += nowj;
  410. WRITE_ONCE(call->ack_lost_at, ack_lost_at);
  411. rxrpc_reduce_call_timer(call, ack_lost_at, nowj,
  412. rxrpc_timer_set_for_lost_ack);
  413. }
  414. }
  415. if (sp->hdr.seq == 1 &&
  416. !test_and_set_bit(RXRPC_CALL_BEGAN_RX_TIMER,
  417. &call->flags)) {
  418. unsigned long nowj = jiffies, expect_rx_by;
  419. expect_rx_by = nowj + call->next_rx_timo;
  420. WRITE_ONCE(call->expect_rx_by, expect_rx_by);
  421. rxrpc_reduce_call_timer(call, expect_rx_by, nowj,
  422. rxrpc_timer_set_for_normal);
  423. }
  424. rxrpc_set_keepalive(call);
  425. } else {
  426. /* Cancel the call if the initial transmission fails,
  427. * particularly if that's due to network routing issues that
  428. * aren't going away anytime soon. The layer above can arrange
  429. * the retransmission.
  430. */
  431. if (!test_and_set_bit(RXRPC_CALL_BEGAN_RX_TIMER, &call->flags))
  432. rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
  433. RX_USER_ABORT, ret);
  434. }
  435. _leave(" = %d [%u]", ret, call->peer->maxdata);
  436. return ret;
  437. send_fragmentable:
  438. /* attempt to send this message with fragmentation enabled */
  439. _debug("send fragment");
  440. down_write(&conn->params.local->defrag_sem);
  441. sp->hdr.serial = serial;
  442. smp_wmb(); /* Set serial before timestamp */
  443. skb->tstamp = ktime_get_real();
  444. if (whdr.flags & RXRPC_REQUEST_ACK)
  445. rtt_slot = rxrpc_begin_rtt_probe(call, serial, rxrpc_rtt_tx_data);
  446. switch (conn->params.local->srx.transport.family) {
  447. case AF_INET6:
  448. case AF_INET:
  449. ip_sock_set_mtu_discover(conn->params.local->socket->sk,
  450. IP_PMTUDISC_DONT);
  451. ret = kernel_sendmsg(conn->params.local->socket, &msg,
  452. iov, 2, len);
  453. conn->params.peer->last_tx_at = ktime_get_seconds();
  454. ip_sock_set_mtu_discover(conn->params.local->socket->sk,
  455. IP_PMTUDISC_DO);
  456. break;
  457. default:
  458. BUG();
  459. }
  460. if (ret < 0) {
  461. rxrpc_cancel_rtt_probe(call, serial, rtt_slot);
  462. trace_rxrpc_tx_fail(call->debug_id, serial, ret,
  463. rxrpc_tx_point_call_data_frag);
  464. } else {
  465. trace_rxrpc_tx_packet(call->debug_id, &whdr,
  466. rxrpc_tx_point_call_data_frag);
  467. }
  468. rxrpc_tx_backoff(call, ret);
  469. up_write(&conn->params.local->defrag_sem);
  470. goto done;
  471. }
  472. /*
  473. * reject packets through the local endpoint
  474. */
  475. void rxrpc_reject_packets(struct rxrpc_local *local)
  476. {
  477. struct sockaddr_rxrpc srx;
  478. struct rxrpc_skb_priv *sp;
  479. struct rxrpc_wire_header whdr;
  480. struct sk_buff *skb;
  481. struct msghdr msg;
  482. struct kvec iov[2];
  483. size_t size;
  484. __be32 code;
  485. int ret, ioc;
  486. _enter("%d", local->debug_id);
  487. iov[0].iov_base = &whdr;
  488. iov[0].iov_len = sizeof(whdr);
  489. iov[1].iov_base = &code;
  490. iov[1].iov_len = sizeof(code);
  491. msg.msg_name = &srx.transport;
  492. msg.msg_control = NULL;
  493. msg.msg_controllen = 0;
  494. msg.msg_flags = 0;
  495. memset(&whdr, 0, sizeof(whdr));
  496. while ((skb = skb_dequeue(&local->reject_queue))) {
  497. rxrpc_see_skb(skb, rxrpc_skb_seen);
  498. sp = rxrpc_skb(skb);
  499. switch (skb->mark) {
  500. case RXRPC_SKB_MARK_REJECT_BUSY:
  501. whdr.type = RXRPC_PACKET_TYPE_BUSY;
  502. size = sizeof(whdr);
  503. ioc = 1;
  504. break;
  505. case RXRPC_SKB_MARK_REJECT_ABORT:
  506. whdr.type = RXRPC_PACKET_TYPE_ABORT;
  507. code = htonl(skb->priority);
  508. size = sizeof(whdr) + sizeof(code);
  509. ioc = 2;
  510. break;
  511. default:
  512. rxrpc_free_skb(skb, rxrpc_skb_freed);
  513. continue;
  514. }
  515. if (rxrpc_extract_addr_from_skb(&srx, skb) == 0) {
  516. msg.msg_namelen = srx.transport_len;
  517. whdr.epoch = htonl(sp->hdr.epoch);
  518. whdr.cid = htonl(sp->hdr.cid);
  519. whdr.callNumber = htonl(sp->hdr.callNumber);
  520. whdr.serviceId = htons(sp->hdr.serviceId);
  521. whdr.flags = sp->hdr.flags;
  522. whdr.flags ^= RXRPC_CLIENT_INITIATED;
  523. whdr.flags &= RXRPC_CLIENT_INITIATED;
  524. ret = kernel_sendmsg(local->socket, &msg,
  525. iov, ioc, size);
  526. if (ret < 0)
  527. trace_rxrpc_tx_fail(local->debug_id, 0, ret,
  528. rxrpc_tx_point_reject);
  529. else
  530. trace_rxrpc_tx_packet(local->debug_id, &whdr,
  531. rxrpc_tx_point_reject);
  532. }
  533. rxrpc_free_skb(skb, rxrpc_skb_freed);
  534. }
  535. _leave("");
  536. }
  537. /*
  538. * Send a VERSION reply to a peer as a keepalive.
  539. */
  540. void rxrpc_send_keepalive(struct rxrpc_peer *peer)
  541. {
  542. struct rxrpc_wire_header whdr;
  543. struct msghdr msg;
  544. struct kvec iov[2];
  545. size_t len;
  546. int ret;
  547. _enter("");
  548. msg.msg_name = &peer->srx.transport;
  549. msg.msg_namelen = peer->srx.transport_len;
  550. msg.msg_control = NULL;
  551. msg.msg_controllen = 0;
  552. msg.msg_flags = 0;
  553. whdr.epoch = htonl(peer->local->rxnet->epoch);
  554. whdr.cid = 0;
  555. whdr.callNumber = 0;
  556. whdr.seq = 0;
  557. whdr.serial = 0;
  558. whdr.type = RXRPC_PACKET_TYPE_VERSION; /* Not client-initiated */
  559. whdr.flags = RXRPC_LAST_PACKET;
  560. whdr.userStatus = 0;
  561. whdr.securityIndex = 0;
  562. whdr._rsvd = 0;
  563. whdr.serviceId = 0;
  564. iov[0].iov_base = &whdr;
  565. iov[0].iov_len = sizeof(whdr);
  566. iov[1].iov_base = (char *)rxrpc_keepalive_string;
  567. iov[1].iov_len = sizeof(rxrpc_keepalive_string);
  568. len = iov[0].iov_len + iov[1].iov_len;
  569. _proto("Tx VERSION (keepalive)");
  570. ret = kernel_sendmsg(peer->local->socket, &msg, iov, 2, len);
  571. if (ret < 0)
  572. trace_rxrpc_tx_fail(peer->debug_id, 0, ret,
  573. rxrpc_tx_point_version_keepalive);
  574. else
  575. trace_rxrpc_tx_packet(peer->debug_id, &whdr,
  576. rxrpc_tx_point_version_keepalive);
  577. peer->last_tx_at = ktime_get_seconds();
  578. _leave("");
  579. }