conn_object.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /* RxRPC virtual connection handler, common bits.
  3. *
  4. * Copyright (C) 2007, 2016 Red Hat, Inc. All Rights Reserved.
  5. * Written by David Howells ([email protected])
  6. */
  7. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  8. #include <linux/module.h>
  9. #include <linux/slab.h>
  10. #include <linux/net.h>
  11. #include <linux/skbuff.h>
  12. #include "ar-internal.h"
  13. /*
  14. * Time till a connection expires after last use (in seconds).
  15. */
  16. unsigned int __read_mostly rxrpc_connection_expiry = 10 * 60;
  17. unsigned int __read_mostly rxrpc_closed_conn_expiry = 10;
  18. static void rxrpc_destroy_connection(struct rcu_head *);
  19. static void rxrpc_connection_timer(struct timer_list *timer)
  20. {
  21. struct rxrpc_connection *conn =
  22. container_of(timer, struct rxrpc_connection, timer);
  23. rxrpc_queue_conn(conn);
  24. }
  25. /*
  26. * allocate a new connection
  27. */
  28. struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
  29. {
  30. struct rxrpc_connection *conn;
  31. _enter("");
  32. conn = kzalloc(sizeof(struct rxrpc_connection), gfp);
  33. if (conn) {
  34. INIT_LIST_HEAD(&conn->cache_link);
  35. timer_setup(&conn->timer, &rxrpc_connection_timer, 0);
  36. INIT_WORK(&conn->processor, &rxrpc_process_connection);
  37. INIT_LIST_HEAD(&conn->proc_link);
  38. INIT_LIST_HEAD(&conn->link);
  39. skb_queue_head_init(&conn->rx_queue);
  40. conn->security = &rxrpc_no_security;
  41. spin_lock_init(&conn->state_lock);
  42. conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
  43. conn->idle_timestamp = jiffies;
  44. }
  45. _leave(" = %p{%d}", conn, conn ? conn->debug_id : 0);
  46. return conn;
  47. }
  48. /*
  49. * Look up a connection in the cache by protocol parameters.
  50. *
  51. * If successful, a pointer to the connection is returned, but no ref is taken.
  52. * NULL is returned if there is no match.
  53. *
  54. * When searching for a service call, if we find a peer but no connection, we
  55. * return that through *_peer in case we need to create a new service call.
  56. *
  57. * The caller must be holding the RCU read lock.
  58. */
  59. struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *local,
  60. struct sk_buff *skb,
  61. struct rxrpc_peer **_peer)
  62. {
  63. struct rxrpc_connection *conn;
  64. struct rxrpc_conn_proto k;
  65. struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
  66. struct sockaddr_rxrpc srx;
  67. struct rxrpc_peer *peer;
  68. _enter(",%x", sp->hdr.cid & RXRPC_CIDMASK);
  69. if (rxrpc_extract_addr_from_skb(&srx, skb) < 0)
  70. goto not_found;
  71. if (srx.transport.family != local->srx.transport.family &&
  72. (srx.transport.family == AF_INET &&
  73. local->srx.transport.family != AF_INET6)) {
  74. pr_warn_ratelimited("AF_RXRPC: Protocol mismatch %u not %u\n",
  75. srx.transport.family,
  76. local->srx.transport.family);
  77. goto not_found;
  78. }
  79. k.epoch = sp->hdr.epoch;
  80. k.cid = sp->hdr.cid & RXRPC_CIDMASK;
  81. if (rxrpc_to_server(sp)) {
  82. /* We need to look up service connections by the full protocol
  83. * parameter set. We look up the peer first as an intermediate
  84. * step and then the connection from the peer's tree.
  85. */
  86. peer = rxrpc_lookup_peer_rcu(local, &srx);
  87. if (!peer)
  88. goto not_found;
  89. *_peer = peer;
  90. conn = rxrpc_find_service_conn_rcu(peer, skb);
  91. if (!conn || refcount_read(&conn->ref) == 0)
  92. goto not_found;
  93. _leave(" = %p", conn);
  94. return conn;
  95. } else {
  96. /* Look up client connections by connection ID alone as their
  97. * IDs are unique for this machine.
  98. */
  99. conn = idr_find(&rxrpc_client_conn_ids,
  100. sp->hdr.cid >> RXRPC_CIDSHIFT);
  101. if (!conn || refcount_read(&conn->ref) == 0) {
  102. _debug("no conn");
  103. goto not_found;
  104. }
  105. if (conn->proto.epoch != k.epoch ||
  106. conn->params.local != local)
  107. goto not_found;
  108. peer = conn->params.peer;
  109. switch (srx.transport.family) {
  110. case AF_INET:
  111. if (peer->srx.transport.sin.sin_port !=
  112. srx.transport.sin.sin_port ||
  113. peer->srx.transport.sin.sin_addr.s_addr !=
  114. srx.transport.sin.sin_addr.s_addr)
  115. goto not_found;
  116. break;
  117. #ifdef CONFIG_AF_RXRPC_IPV6
  118. case AF_INET6:
  119. if (peer->srx.transport.sin6.sin6_port !=
  120. srx.transport.sin6.sin6_port ||
  121. memcmp(&peer->srx.transport.sin6.sin6_addr,
  122. &srx.transport.sin6.sin6_addr,
  123. sizeof(struct in6_addr)) != 0)
  124. goto not_found;
  125. break;
  126. #endif
  127. default:
  128. BUG();
  129. }
  130. _leave(" = %p", conn);
  131. return conn;
  132. }
  133. not_found:
  134. _leave(" = NULL");
  135. return NULL;
  136. }
  137. /*
  138. * Disconnect a call and clear any channel it occupies when that call
  139. * terminates. The caller must hold the channel_lock and must release the
  140. * call's ref on the connection.
  141. */
  142. void __rxrpc_disconnect_call(struct rxrpc_connection *conn,
  143. struct rxrpc_call *call)
  144. {
  145. struct rxrpc_channel *chan =
  146. &conn->channels[call->cid & RXRPC_CHANNELMASK];
  147. _enter("%d,%x", conn->debug_id, call->cid);
  148. if (rcu_access_pointer(chan->call) == call) {
  149. /* Save the result of the call so that we can repeat it if necessary
  150. * through the channel, whilst disposing of the actual call record.
  151. */
  152. trace_rxrpc_disconnect_call(call);
  153. switch (call->completion) {
  154. case RXRPC_CALL_SUCCEEDED:
  155. chan->last_seq = call->rx_hard_ack;
  156. chan->last_type = RXRPC_PACKET_TYPE_ACK;
  157. break;
  158. case RXRPC_CALL_LOCALLY_ABORTED:
  159. chan->last_abort = call->abort_code;
  160. chan->last_type = RXRPC_PACKET_TYPE_ABORT;
  161. break;
  162. default:
  163. chan->last_abort = RX_CALL_DEAD;
  164. chan->last_type = RXRPC_PACKET_TYPE_ABORT;
  165. break;
  166. }
  167. /* Sync with rxrpc_conn_retransmit(). */
  168. smp_wmb();
  169. chan->last_call = chan->call_id;
  170. chan->call_id = chan->call_counter;
  171. rcu_assign_pointer(chan->call, NULL);
  172. }
  173. _leave("");
  174. }
  175. /*
  176. * Disconnect a call and clear any channel it occupies when that call
  177. * terminates.
  178. */
  179. void rxrpc_disconnect_call(struct rxrpc_call *call)
  180. {
  181. struct rxrpc_connection *conn = call->conn;
  182. call->peer->cong_cwnd = call->cong_cwnd;
  183. if (!hlist_unhashed(&call->error_link)) {
  184. spin_lock_bh(&call->peer->lock);
  185. hlist_del_rcu(&call->error_link);
  186. spin_unlock_bh(&call->peer->lock);
  187. }
  188. if (rxrpc_is_client_call(call))
  189. return rxrpc_disconnect_client_call(conn->bundle, call);
  190. spin_lock(&conn->bundle->channel_lock);
  191. __rxrpc_disconnect_call(conn, call);
  192. spin_unlock(&conn->bundle->channel_lock);
  193. set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
  194. conn->idle_timestamp = jiffies;
  195. }
  196. /*
  197. * Kill off a connection.
  198. */
  199. void rxrpc_kill_connection(struct rxrpc_connection *conn)
  200. {
  201. struct rxrpc_net *rxnet = conn->params.local->rxnet;
  202. ASSERT(!rcu_access_pointer(conn->channels[0].call) &&
  203. !rcu_access_pointer(conn->channels[1].call) &&
  204. !rcu_access_pointer(conn->channels[2].call) &&
  205. !rcu_access_pointer(conn->channels[3].call));
  206. ASSERT(list_empty(&conn->cache_link));
  207. write_lock(&rxnet->conn_lock);
  208. list_del_init(&conn->proc_link);
  209. write_unlock(&rxnet->conn_lock);
  210. /* Drain the Rx queue. Note that even though we've unpublished, an
  211. * incoming packet could still be being added to our Rx queue, so we
  212. * will need to drain it again in the RCU cleanup handler.
  213. */
  214. rxrpc_purge_queue(&conn->rx_queue);
  215. /* Leave final destruction to RCU. The connection processor work item
  216. * must carry a ref on the connection to prevent us getting here whilst
  217. * it is queued or running.
  218. */
  219. call_rcu(&conn->rcu, rxrpc_destroy_connection);
  220. }
  221. /*
  222. * Queue a connection's work processor, getting a ref to pass to the work
  223. * queue.
  224. */
  225. bool rxrpc_queue_conn(struct rxrpc_connection *conn)
  226. {
  227. const void *here = __builtin_return_address(0);
  228. int r;
  229. if (!__refcount_inc_not_zero(&conn->ref, &r))
  230. return false;
  231. if (rxrpc_queue_work(&conn->processor))
  232. trace_rxrpc_conn(conn->debug_id, rxrpc_conn_queued, r + 1, here);
  233. else
  234. rxrpc_put_connection(conn);
  235. return true;
  236. }
  237. /*
  238. * Note the re-emergence of a connection.
  239. */
  240. void rxrpc_see_connection(struct rxrpc_connection *conn)
  241. {
  242. const void *here = __builtin_return_address(0);
  243. if (conn) {
  244. int n = refcount_read(&conn->ref);
  245. trace_rxrpc_conn(conn->debug_id, rxrpc_conn_seen, n, here);
  246. }
  247. }
  248. /*
  249. * Get a ref on a connection.
  250. */
  251. struct rxrpc_connection *rxrpc_get_connection(struct rxrpc_connection *conn)
  252. {
  253. const void *here = __builtin_return_address(0);
  254. int r;
  255. __refcount_inc(&conn->ref, &r);
  256. trace_rxrpc_conn(conn->debug_id, rxrpc_conn_got, r, here);
  257. return conn;
  258. }
  259. /*
  260. * Try to get a ref on a connection.
  261. */
  262. struct rxrpc_connection *
  263. rxrpc_get_connection_maybe(struct rxrpc_connection *conn)
  264. {
  265. const void *here = __builtin_return_address(0);
  266. int r;
  267. if (conn) {
  268. if (__refcount_inc_not_zero(&conn->ref, &r))
  269. trace_rxrpc_conn(conn->debug_id, rxrpc_conn_got, r + 1, here);
  270. else
  271. conn = NULL;
  272. }
  273. return conn;
  274. }
  275. /*
  276. * Set the service connection reap timer.
  277. */
  278. static void rxrpc_set_service_reap_timer(struct rxrpc_net *rxnet,
  279. unsigned long reap_at)
  280. {
  281. if (rxnet->live)
  282. timer_reduce(&rxnet->service_conn_reap_timer, reap_at);
  283. }
  284. /*
  285. * Release a service connection
  286. */
  287. void rxrpc_put_service_conn(struct rxrpc_connection *conn)
  288. {
  289. const void *here = __builtin_return_address(0);
  290. unsigned int debug_id = conn->debug_id;
  291. int r;
  292. __refcount_dec(&conn->ref, &r);
  293. trace_rxrpc_conn(debug_id, rxrpc_conn_put_service, r - 1, here);
  294. if (r - 1 == 1)
  295. rxrpc_set_service_reap_timer(conn->params.local->rxnet,
  296. jiffies + rxrpc_connection_expiry);
  297. }
  298. /*
  299. * destroy a virtual connection
  300. */
  301. static void rxrpc_destroy_connection(struct rcu_head *rcu)
  302. {
  303. struct rxrpc_connection *conn =
  304. container_of(rcu, struct rxrpc_connection, rcu);
  305. _enter("{%d,u=%d}", conn->debug_id, refcount_read(&conn->ref));
  306. ASSERTCMP(refcount_read(&conn->ref), ==, 0);
  307. _net("DESTROY CONN %d", conn->debug_id);
  308. del_timer_sync(&conn->timer);
  309. rxrpc_purge_queue(&conn->rx_queue);
  310. conn->security->clear(conn);
  311. key_put(conn->params.key);
  312. rxrpc_put_bundle(conn->bundle);
  313. rxrpc_put_peer(conn->params.peer);
  314. if (atomic_dec_and_test(&conn->params.local->rxnet->nr_conns))
  315. wake_up_var(&conn->params.local->rxnet->nr_conns);
  316. rxrpc_put_local(conn->params.local);
  317. kfree(conn);
  318. _leave("");
  319. }
  320. /*
  321. * reap dead service connections
  322. */
  323. void rxrpc_service_connection_reaper(struct work_struct *work)
  324. {
  325. struct rxrpc_connection *conn, *_p;
  326. struct rxrpc_net *rxnet =
  327. container_of(work, struct rxrpc_net, service_conn_reaper);
  328. unsigned long expire_at, earliest, idle_timestamp, now;
  329. LIST_HEAD(graveyard);
  330. _enter("");
  331. now = jiffies;
  332. earliest = now + MAX_JIFFY_OFFSET;
  333. write_lock(&rxnet->conn_lock);
  334. list_for_each_entry_safe(conn, _p, &rxnet->service_conns, link) {
  335. ASSERTCMP(refcount_read(&conn->ref), >, 0);
  336. if (likely(refcount_read(&conn->ref) > 1))
  337. continue;
  338. if (conn->state == RXRPC_CONN_SERVICE_PREALLOC)
  339. continue;
  340. if (rxnet->live && !conn->params.local->dead) {
  341. idle_timestamp = READ_ONCE(conn->idle_timestamp);
  342. expire_at = idle_timestamp + rxrpc_connection_expiry * HZ;
  343. if (conn->params.local->service_closed)
  344. expire_at = idle_timestamp + rxrpc_closed_conn_expiry * HZ;
  345. _debug("reap CONN %d { u=%d,t=%ld }",
  346. conn->debug_id, refcount_read(&conn->ref),
  347. (long)expire_at - (long)now);
  348. if (time_before(now, expire_at)) {
  349. if (time_before(expire_at, earliest))
  350. earliest = expire_at;
  351. continue;
  352. }
  353. }
  354. /* The usage count sits at 1 whilst the object is unused on the
  355. * list; we reduce that to 0 to make the object unavailable.
  356. */
  357. if (!refcount_dec_if_one(&conn->ref))
  358. continue;
  359. trace_rxrpc_conn(conn->debug_id, rxrpc_conn_reap_service, 0, NULL);
  360. if (rxrpc_conn_is_client(conn))
  361. BUG();
  362. else
  363. rxrpc_unpublish_service_conn(conn);
  364. list_move_tail(&conn->link, &graveyard);
  365. }
  366. write_unlock(&rxnet->conn_lock);
  367. if (earliest != now + MAX_JIFFY_OFFSET) {
  368. _debug("reschedule reaper %ld", (long)earliest - (long)now);
  369. ASSERT(time_after(earliest, now));
  370. rxrpc_set_service_reap_timer(rxnet, earliest);
  371. }
  372. while (!list_empty(&graveyard)) {
  373. conn = list_entry(graveyard.next, struct rxrpc_connection,
  374. link);
  375. list_del_init(&conn->link);
  376. ASSERTCMP(refcount_read(&conn->ref), ==, 0);
  377. rxrpc_kill_connection(conn);
  378. }
  379. _leave("");
  380. }
  381. /*
  382. * preemptively destroy all the service connection records rather than
  383. * waiting for them to time out
  384. */
  385. void rxrpc_destroy_all_connections(struct rxrpc_net *rxnet)
  386. {
  387. struct rxrpc_connection *conn, *_p;
  388. bool leak = false;
  389. _enter("");
  390. atomic_dec(&rxnet->nr_conns);
  391. rxrpc_destroy_all_client_connections(rxnet);
  392. del_timer_sync(&rxnet->service_conn_reap_timer);
  393. rxrpc_queue_work(&rxnet->service_conn_reaper);
  394. flush_workqueue(rxrpc_workqueue);
  395. write_lock(&rxnet->conn_lock);
  396. list_for_each_entry_safe(conn, _p, &rxnet->service_conns, link) {
  397. pr_err("AF_RXRPC: Leaked conn %p {%d}\n",
  398. conn, refcount_read(&conn->ref));
  399. leak = true;
  400. }
  401. write_unlock(&rxnet->conn_lock);
  402. BUG_ON(leak);
  403. ASSERT(list_empty(&rxnet->conn_proc_list));
  404. /* We need to wait for the connections to be destroyed by RCU as they
  405. * pin things that we still need to get rid of.
  406. */
  407. wait_var_event(&rxnet->nr_conns, !atomic_read(&rxnet->nr_conns));
  408. _leave("");
  409. }