ar-internal.h 42 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258
  1. /* SPDX-License-Identifier: GPL-2.0-or-later */
  2. /* AF_RXRPC internal definitions
  3. *
  4. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  5. * Written by David Howells ([email protected])
  6. */
  7. #include <linux/atomic.h>
  8. #include <linux/seqlock.h>
  9. #include <linux/win_minmax.h>
  10. #include <net/net_namespace.h>
  11. #include <net/netns/generic.h>
  12. #include <net/sock.h>
  13. #include <net/af_rxrpc.h>
  14. #include <keys/rxrpc-type.h>
  15. #include "protocol.h"
  16. #define FCRYPT_BSIZE 8
  17. struct rxrpc_crypt {
  18. union {
  19. u8 x[FCRYPT_BSIZE];
  20. __be32 n[2];
  21. };
  22. } __attribute__((aligned(8)));
  23. #define rxrpc_queue_work(WS) queue_work(rxrpc_workqueue, (WS))
  24. #define rxrpc_queue_delayed_work(WS,D) \
  25. queue_delayed_work(rxrpc_workqueue, (WS), (D))
  26. struct key_preparsed_payload;
  27. struct rxrpc_connection;
  28. /*
  29. * Mark applied to socket buffers in skb->mark. skb->priority is used
  30. * to pass supplementary information.
  31. */
  32. enum rxrpc_skb_mark {
  33. RXRPC_SKB_MARK_REJECT_BUSY, /* Reject with BUSY */
  34. RXRPC_SKB_MARK_REJECT_ABORT, /* Reject with ABORT (code in skb->priority) */
  35. };
  36. /*
  37. * sk_state for RxRPC sockets
  38. */
  39. enum {
  40. RXRPC_UNBOUND = 0,
  41. RXRPC_CLIENT_UNBOUND, /* Unbound socket used as client */
  42. RXRPC_CLIENT_BOUND, /* client local address bound */
  43. RXRPC_SERVER_BOUND, /* server local address bound */
  44. RXRPC_SERVER_BOUND2, /* second server local address bound */
  45. RXRPC_SERVER_LISTENING, /* server listening for connections */
  46. RXRPC_SERVER_LISTEN_DISABLED, /* server listening disabled */
  47. RXRPC_CLOSE, /* socket is being closed */
  48. };
  49. /*
  50. * Per-network namespace data.
  51. */
  52. struct rxrpc_net {
  53. struct proc_dir_entry *proc_net; /* Subdir in /proc/net */
  54. u32 epoch; /* Local epoch for detecting local-end reset */
  55. struct list_head calls; /* List of calls active in this namespace */
  56. spinlock_t call_lock; /* Lock for ->calls */
  57. atomic_t nr_calls; /* Count of allocated calls */
  58. atomic_t nr_conns;
  59. struct list_head conn_proc_list; /* List of conns in this namespace for proc */
  60. struct list_head service_conns; /* Service conns in this namespace */
  61. rwlock_t conn_lock; /* Lock for ->conn_proc_list, ->service_conns */
  62. struct work_struct service_conn_reaper;
  63. struct timer_list service_conn_reap_timer;
  64. bool live;
  65. bool kill_all_client_conns;
  66. atomic_t nr_client_conns;
  67. spinlock_t client_conn_cache_lock; /* Lock for ->*_client_conns */
  68. spinlock_t client_conn_discard_lock; /* Prevent multiple discarders */
  69. struct list_head idle_client_conns;
  70. struct work_struct client_conn_reaper;
  71. struct timer_list client_conn_reap_timer;
  72. struct hlist_head local_endpoints;
  73. struct mutex local_mutex; /* Lock for ->local_endpoints */
  74. DECLARE_HASHTABLE (peer_hash, 10);
  75. spinlock_t peer_hash_lock; /* Lock for ->peer_hash */
  76. #define RXRPC_KEEPALIVE_TIME 20 /* NAT keepalive time in seconds */
  77. u8 peer_keepalive_cursor;
  78. time64_t peer_keepalive_base;
  79. struct list_head peer_keepalive[32];
  80. struct list_head peer_keepalive_new;
  81. struct timer_list peer_keepalive_timer;
  82. struct work_struct peer_keepalive_work;
  83. };
  84. /*
  85. * Service backlog preallocation.
  86. *
  87. * This contains circular buffers of preallocated peers, connections and calls
  88. * for incoming service calls and their head and tail pointers. This allows
  89. * calls to be set up in the data_ready handler, thereby avoiding the need to
  90. * shuffle packets around so much.
  91. */
  92. struct rxrpc_backlog {
  93. unsigned short peer_backlog_head;
  94. unsigned short peer_backlog_tail;
  95. unsigned short conn_backlog_head;
  96. unsigned short conn_backlog_tail;
  97. unsigned short call_backlog_head;
  98. unsigned short call_backlog_tail;
  99. #define RXRPC_BACKLOG_MAX 32
  100. struct rxrpc_peer *peer_backlog[RXRPC_BACKLOG_MAX];
  101. struct rxrpc_connection *conn_backlog[RXRPC_BACKLOG_MAX];
  102. struct rxrpc_call *call_backlog[RXRPC_BACKLOG_MAX];
  103. };
  104. /*
  105. * RxRPC socket definition
  106. */
  107. struct rxrpc_sock {
  108. /* WARNING: sk has to be the first member */
  109. struct sock sk;
  110. rxrpc_notify_new_call_t notify_new_call; /* Func to notify of new call */
  111. rxrpc_discard_new_call_t discard_new_call; /* Func to discard a new call */
  112. struct rxrpc_local *local; /* local endpoint */
  113. struct rxrpc_backlog *backlog; /* Preallocation for services */
  114. spinlock_t incoming_lock; /* Incoming call vs service shutdown lock */
  115. struct list_head sock_calls; /* List of calls owned by this socket */
  116. struct list_head to_be_accepted; /* calls awaiting acceptance */
  117. struct list_head recvmsg_q; /* Calls awaiting recvmsg's attention */
  118. rwlock_t recvmsg_lock; /* Lock for recvmsg_q */
  119. struct key *key; /* security for this socket */
  120. struct key *securities; /* list of server security descriptors */
  121. struct rb_root calls; /* User ID -> call mapping */
  122. unsigned long flags;
  123. #define RXRPC_SOCK_CONNECTED 0 /* connect_srx is set */
  124. rwlock_t call_lock; /* lock for calls */
  125. u32 min_sec_level; /* minimum security level */
  126. #define RXRPC_SECURITY_MAX RXRPC_SECURITY_ENCRYPT
  127. bool exclusive; /* Exclusive connection for a client socket */
  128. u16 second_service; /* Additional service bound to the endpoint */
  129. struct {
  130. /* Service upgrade information */
  131. u16 from; /* Service ID to upgrade (if not 0) */
  132. u16 to; /* service ID to upgrade to */
  133. } service_upgrade;
  134. sa_family_t family; /* Protocol family created with */
  135. struct sockaddr_rxrpc srx; /* Primary Service/local addresses */
  136. struct sockaddr_rxrpc connect_srx; /* Default client address from connect() */
  137. };
  138. #define rxrpc_sk(__sk) container_of((__sk), struct rxrpc_sock, sk)
  139. /*
  140. * CPU-byteorder normalised Rx packet header.
  141. */
  142. struct rxrpc_host_header {
  143. u32 epoch; /* client boot timestamp */
  144. u32 cid; /* connection and channel ID */
  145. u32 callNumber; /* call ID (0 for connection-level packets) */
  146. u32 seq; /* sequence number of pkt in call stream */
  147. u32 serial; /* serial number of pkt sent to network */
  148. u8 type; /* packet type */
  149. u8 flags; /* packet flags */
  150. u8 userStatus; /* app-layer defined status */
  151. u8 securityIndex; /* security protocol ID */
  152. union {
  153. u16 _rsvd; /* reserved */
  154. u16 cksum; /* kerberos security checksum */
  155. };
  156. u16 serviceId; /* service ID */
  157. } __packed;
  158. /*
  159. * RxRPC socket buffer private variables
  160. * - max 48 bytes (struct sk_buff::cb)
  161. */
  162. struct rxrpc_skb_priv {
  163. atomic_t nr_ring_pins; /* Number of rxtx ring pins */
  164. u8 nr_subpackets; /* Number of subpackets */
  165. u8 rx_flags; /* Received packet flags */
  166. #define RXRPC_SKB_INCL_LAST 0x01 /* - Includes last packet */
  167. #define RXRPC_SKB_TX_BUFFER 0x02 /* - Is transmit buffer */
  168. union {
  169. int remain; /* amount of space remaining for next write */
  170. /* List of requested ACKs on subpackets */
  171. unsigned long rx_req_ack[(RXRPC_MAX_NR_JUMBO + BITS_PER_LONG - 1) /
  172. BITS_PER_LONG];
  173. };
  174. struct rxrpc_host_header hdr; /* RxRPC packet header from this packet */
  175. };
  176. #define rxrpc_skb(__skb) ((struct rxrpc_skb_priv *) &(__skb)->cb)
  177. /*
  178. * RxRPC security module interface
  179. */
  180. struct rxrpc_security {
  181. const char *name; /* name of this service */
  182. u8 security_index; /* security type provided */
  183. u32 no_key_abort; /* Abort code indicating no key */
  184. /* Initialise a security service */
  185. int (*init)(void);
  186. /* Clean up a security service */
  187. void (*exit)(void);
  188. /* Parse the information from a server key */
  189. int (*preparse_server_key)(struct key_preparsed_payload *);
  190. /* Clean up the preparse buffer after parsing a server key */
  191. void (*free_preparse_server_key)(struct key_preparsed_payload *);
  192. /* Destroy the payload of a server key */
  193. void (*destroy_server_key)(struct key *);
  194. /* Describe a server key */
  195. void (*describe_server_key)(const struct key *, struct seq_file *);
  196. /* initialise a connection's security */
  197. int (*init_connection_security)(struct rxrpc_connection *,
  198. struct rxrpc_key_token *);
  199. /* Work out how much data we can store in a packet, given an estimate
  200. * of the amount of data remaining.
  201. */
  202. int (*how_much_data)(struct rxrpc_call *, size_t,
  203. size_t *, size_t *, size_t *);
  204. /* impose security on a packet */
  205. int (*secure_packet)(struct rxrpc_call *, struct sk_buff *, size_t);
  206. /* verify the security on a received packet */
  207. int (*verify_packet)(struct rxrpc_call *, struct sk_buff *,
  208. unsigned int, unsigned int, rxrpc_seq_t, u16);
  209. /* Free crypto request on a call */
  210. void (*free_call_crypto)(struct rxrpc_call *);
  211. /* Locate the data in a received packet that has been verified. */
  212. void (*locate_data)(struct rxrpc_call *, struct sk_buff *,
  213. unsigned int *, unsigned int *);
  214. /* issue a challenge */
  215. int (*issue_challenge)(struct rxrpc_connection *);
  216. /* respond to a challenge */
  217. int (*respond_to_challenge)(struct rxrpc_connection *,
  218. struct sk_buff *,
  219. u32 *);
  220. /* verify a response */
  221. int (*verify_response)(struct rxrpc_connection *,
  222. struct sk_buff *,
  223. u32 *);
  224. /* clear connection security */
  225. void (*clear)(struct rxrpc_connection *);
  226. };
  227. /*
  228. * RxRPC local transport endpoint description
  229. * - owned by a single AF_RXRPC socket
  230. * - pointed to by transport socket struct sk_user_data
  231. */
  232. struct rxrpc_local {
  233. struct rcu_head rcu;
  234. atomic_t active_users; /* Number of users of the local endpoint */
  235. refcount_t ref; /* Number of references to the structure */
  236. struct rxrpc_net *rxnet; /* The network ns in which this resides */
  237. struct hlist_node link;
  238. struct socket *socket; /* my UDP socket */
  239. struct work_struct processor;
  240. struct rxrpc_sock __rcu *service; /* Service(s) listening on this endpoint */
  241. struct rw_semaphore defrag_sem; /* control re-enablement of IP DF bit */
  242. struct sk_buff_head reject_queue; /* packets awaiting rejection */
  243. struct sk_buff_head event_queue; /* endpoint event packets awaiting processing */
  244. struct rb_root client_bundles; /* Client connection bundles by socket params */
  245. spinlock_t client_bundles_lock; /* Lock for client_bundles */
  246. spinlock_t lock; /* access lock */
  247. rwlock_t services_lock; /* lock for services list */
  248. int debug_id; /* debug ID for printks */
  249. bool dead;
  250. bool service_closed; /* Service socket closed */
  251. struct sockaddr_rxrpc srx; /* local address */
  252. };
  253. /*
  254. * RxRPC remote transport endpoint definition
  255. * - matched by local endpoint, remote port, address and protocol type
  256. */
  257. struct rxrpc_peer {
  258. struct rcu_head rcu; /* This must be first */
  259. refcount_t ref;
  260. unsigned long hash_key;
  261. struct hlist_node hash_link;
  262. struct rxrpc_local *local;
  263. struct hlist_head error_targets; /* targets for net error distribution */
  264. struct rb_root service_conns; /* Service connections */
  265. struct list_head keepalive_link; /* Link in net->peer_keepalive[] */
  266. time64_t last_tx_at; /* Last time packet sent here */
  267. seqlock_t service_conn_lock;
  268. spinlock_t lock; /* access lock */
  269. unsigned int if_mtu; /* interface MTU for this peer */
  270. unsigned int mtu; /* network MTU for this peer */
  271. unsigned int maxdata; /* data size (MTU - hdrsize) */
  272. unsigned short hdrsize; /* header size (IP + UDP + RxRPC) */
  273. int debug_id; /* debug ID for printks */
  274. struct sockaddr_rxrpc srx; /* remote address */
  275. /* calculated RTT cache */
  276. #define RXRPC_RTT_CACHE_SIZE 32
  277. spinlock_t rtt_input_lock; /* RTT lock for input routine */
  278. ktime_t rtt_last_req; /* Time of last RTT request */
  279. unsigned int rtt_count; /* Number of samples we've got */
  280. u32 srtt_us; /* smoothed round trip time << 3 in usecs */
  281. u32 mdev_us; /* medium deviation */
  282. u32 mdev_max_us; /* maximal mdev for the last rtt period */
  283. u32 rttvar_us; /* smoothed mdev_max */
  284. u32 rto_j; /* Retransmission timeout in jiffies */
  285. u8 backoff; /* Backoff timeout */
  286. u8 cong_cwnd; /* Congestion window size */
  287. };
  288. /*
  289. * Keys for matching a connection.
  290. */
  291. struct rxrpc_conn_proto {
  292. union {
  293. struct {
  294. u32 epoch; /* epoch of this connection */
  295. u32 cid; /* connection ID */
  296. };
  297. u64 index_key;
  298. };
  299. };
  300. struct rxrpc_conn_parameters {
  301. struct rxrpc_local *local; /* Representation of local endpoint */
  302. struct rxrpc_peer *peer; /* Remote endpoint */
  303. struct key *key; /* Security details */
  304. bool exclusive; /* T if conn is exclusive */
  305. bool upgrade; /* T if service ID can be upgraded */
  306. u16 service_id; /* Service ID for this connection */
  307. u32 security_level; /* Security level selected */
  308. };
  309. /*
  310. * Bits in the connection flags.
  311. */
  312. enum rxrpc_conn_flag {
  313. RXRPC_CONN_HAS_IDR, /* Has a client conn ID assigned */
  314. RXRPC_CONN_IN_SERVICE_CONNS, /* Conn is in peer->service_conns */
  315. RXRPC_CONN_DONT_REUSE, /* Don't reuse this connection */
  316. RXRPC_CONN_PROBING_FOR_UPGRADE, /* Probing for service upgrade */
  317. RXRPC_CONN_FINAL_ACK_0, /* Need final ACK for channel 0 */
  318. RXRPC_CONN_FINAL_ACK_1, /* Need final ACK for channel 1 */
  319. RXRPC_CONN_FINAL_ACK_2, /* Need final ACK for channel 2 */
  320. RXRPC_CONN_FINAL_ACK_3, /* Need final ACK for channel 3 */
  321. };
  322. #define RXRPC_CONN_FINAL_ACK_MASK ((1UL << RXRPC_CONN_FINAL_ACK_0) | \
  323. (1UL << RXRPC_CONN_FINAL_ACK_1) | \
  324. (1UL << RXRPC_CONN_FINAL_ACK_2) | \
  325. (1UL << RXRPC_CONN_FINAL_ACK_3))
  326. /*
  327. * Events that can be raised upon a connection.
  328. */
  329. enum rxrpc_conn_event {
  330. RXRPC_CONN_EV_CHALLENGE, /* Send challenge packet */
  331. };
  332. /*
  333. * The connection protocol state.
  334. */
  335. enum rxrpc_conn_proto_state {
  336. RXRPC_CONN_UNUSED, /* Connection not yet attempted */
  337. RXRPC_CONN_CLIENT, /* Client connection */
  338. RXRPC_CONN_SERVICE_PREALLOC, /* Service connection preallocation */
  339. RXRPC_CONN_SERVICE_UNSECURED, /* Service unsecured connection */
  340. RXRPC_CONN_SERVICE_CHALLENGING, /* Service challenging for security */
  341. RXRPC_CONN_SERVICE, /* Service secured connection */
  342. RXRPC_CONN_REMOTELY_ABORTED, /* Conn aborted by peer */
  343. RXRPC_CONN_LOCALLY_ABORTED, /* Conn aborted locally */
  344. RXRPC_CONN__NR_STATES
  345. };
  346. /*
  347. * RxRPC client connection bundle.
  348. */
  349. struct rxrpc_bundle {
  350. struct rxrpc_conn_parameters params;
  351. refcount_t ref;
  352. atomic_t active; /* Number of active users */
  353. unsigned int debug_id;
  354. bool try_upgrade; /* True if the bundle is attempting upgrade */
  355. bool alloc_conn; /* True if someone's getting a conn */
  356. short alloc_error; /* Error from last conn allocation */
  357. spinlock_t channel_lock;
  358. struct rb_node local_node; /* Node in local->client_conns */
  359. struct list_head waiting_calls; /* Calls waiting for channels */
  360. unsigned long avail_chans; /* Mask of available channels */
  361. struct rxrpc_connection *conns[4]; /* The connections in the bundle (max 4) */
  362. };
  363. /*
  364. * RxRPC connection definition
  365. * - matched by { local, peer, epoch, conn_id, direction }
  366. * - each connection can only handle four simultaneous calls
  367. */
  368. struct rxrpc_connection {
  369. struct rxrpc_conn_proto proto;
  370. struct rxrpc_conn_parameters params;
  371. refcount_t ref;
  372. struct rcu_head rcu;
  373. struct list_head cache_link;
  374. unsigned char act_chans; /* Mask of active channels */
  375. struct rxrpc_channel {
  376. unsigned long final_ack_at; /* Time at which to issue final ACK */
  377. struct rxrpc_call __rcu *call; /* Active call */
  378. unsigned int call_debug_id; /* call->debug_id */
  379. u32 call_id; /* ID of current call */
  380. u32 call_counter; /* Call ID counter */
  381. u32 last_call; /* ID of last call */
  382. u8 last_type; /* Type of last packet */
  383. union {
  384. u32 last_seq;
  385. u32 last_abort;
  386. };
  387. } channels[RXRPC_MAXCALLS];
  388. struct timer_list timer; /* Conn event timer */
  389. struct work_struct processor; /* connection event processor */
  390. struct rxrpc_bundle *bundle; /* Client connection bundle */
  391. struct rb_node service_node; /* Node in peer->service_conns */
  392. struct list_head proc_link; /* link in procfs list */
  393. struct list_head link; /* link in master connection list */
  394. struct sk_buff_head rx_queue; /* received conn-level packets */
  395. const struct rxrpc_security *security; /* applied security module */
  396. union {
  397. struct {
  398. struct crypto_sync_skcipher *cipher; /* encryption handle */
  399. struct rxrpc_crypt csum_iv; /* packet checksum base */
  400. u32 nonce; /* response re-use preventer */
  401. } rxkad;
  402. };
  403. unsigned long flags;
  404. unsigned long events;
  405. unsigned long idle_timestamp; /* Time at which last became idle */
  406. spinlock_t state_lock; /* state-change lock */
  407. enum rxrpc_conn_proto_state state; /* current state of connection */
  408. u32 abort_code; /* Abort code of connection abort */
  409. int debug_id; /* debug ID for printks */
  410. atomic_t serial; /* packet serial number counter */
  411. unsigned int hi_serial; /* highest serial number received */
  412. u32 service_id; /* Service ID, possibly upgraded */
  413. u8 security_ix; /* security type */
  414. u8 out_clientflag; /* RXRPC_CLIENT_INITIATED if we are client */
  415. u8 bundle_shift; /* Index into bundle->avail_chans */
  416. short error; /* Local error code */
  417. };
  418. static inline bool rxrpc_to_server(const struct rxrpc_skb_priv *sp)
  419. {
  420. return sp->hdr.flags & RXRPC_CLIENT_INITIATED;
  421. }
  422. static inline bool rxrpc_to_client(const struct rxrpc_skb_priv *sp)
  423. {
  424. return !rxrpc_to_server(sp);
  425. }
  426. /*
  427. * Flags in call->flags.
  428. */
  429. enum rxrpc_call_flag {
  430. RXRPC_CALL_RELEASED, /* call has been released - no more message to userspace */
  431. RXRPC_CALL_HAS_USERID, /* has a user ID attached */
  432. RXRPC_CALL_IS_SERVICE, /* Call is service call */
  433. RXRPC_CALL_EXPOSED, /* The call was exposed to the world */
  434. RXRPC_CALL_RX_LAST, /* Received the last packet (at rxtx_top) */
  435. RXRPC_CALL_TX_LAST, /* Last packet in Tx buffer (at rxtx_top) */
  436. RXRPC_CALL_SEND_PING, /* A ping will need to be sent */
  437. RXRPC_CALL_RETRANS_TIMEOUT, /* Retransmission due to timeout occurred */
  438. RXRPC_CALL_BEGAN_RX_TIMER, /* We began the expect_rx_by timer */
  439. RXRPC_CALL_RX_HEARD, /* The peer responded at least once to this call */
  440. RXRPC_CALL_RX_UNDERRUN, /* Got data underrun */
  441. RXRPC_CALL_DISCONNECTED, /* The call has been disconnected */
  442. RXRPC_CALL_KERNEL, /* The call was made by the kernel */
  443. RXRPC_CALL_UPGRADE, /* Service upgrade was requested for the call */
  444. };
  445. /*
  446. * Events that can be raised on a call.
  447. */
  448. enum rxrpc_call_event {
  449. RXRPC_CALL_EV_ACK, /* need to generate ACK */
  450. RXRPC_CALL_EV_ABORT, /* need to generate abort */
  451. RXRPC_CALL_EV_RESEND, /* Tx resend required */
  452. RXRPC_CALL_EV_PING, /* Ping send required */
  453. RXRPC_CALL_EV_EXPIRED, /* Expiry occurred */
  454. RXRPC_CALL_EV_ACK_LOST, /* ACK may be lost, send ping */
  455. };
  456. /*
  457. * The states that a call can be in.
  458. */
  459. enum rxrpc_call_state {
  460. RXRPC_CALL_UNINITIALISED,
  461. RXRPC_CALL_CLIENT_AWAIT_CONN, /* - client waiting for connection to become available */
  462. RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
  463. RXRPC_CALL_CLIENT_AWAIT_REPLY, /* - client awaiting reply */
  464. RXRPC_CALL_CLIENT_RECV_REPLY, /* - client receiving reply phase */
  465. RXRPC_CALL_SERVER_PREALLOC, /* - service preallocation */
  466. RXRPC_CALL_SERVER_SECURING, /* - server securing request connection */
  467. RXRPC_CALL_SERVER_RECV_REQUEST, /* - server receiving request */
  468. RXRPC_CALL_SERVER_ACK_REQUEST, /* - server pending ACK of request */
  469. RXRPC_CALL_SERVER_SEND_REPLY, /* - server sending reply */
  470. RXRPC_CALL_SERVER_AWAIT_ACK, /* - server awaiting final ACK */
  471. RXRPC_CALL_COMPLETE, /* - call complete */
  472. NR__RXRPC_CALL_STATES
  473. };
  474. /*
  475. * Call completion condition (state == RXRPC_CALL_COMPLETE).
  476. */
  477. enum rxrpc_call_completion {
  478. RXRPC_CALL_SUCCEEDED, /* - Normal termination */
  479. RXRPC_CALL_REMOTELY_ABORTED, /* - call aborted by peer */
  480. RXRPC_CALL_LOCALLY_ABORTED, /* - call aborted locally on error or close */
  481. RXRPC_CALL_LOCAL_ERROR, /* - call failed due to local error */
  482. RXRPC_CALL_NETWORK_ERROR, /* - call terminated by network error */
  483. NR__RXRPC_CALL_COMPLETIONS
  484. };
  485. /*
  486. * Call Tx congestion management modes.
  487. */
  488. enum rxrpc_congest_mode {
  489. RXRPC_CALL_SLOW_START,
  490. RXRPC_CALL_CONGEST_AVOIDANCE,
  491. RXRPC_CALL_PACKET_LOSS,
  492. RXRPC_CALL_FAST_RETRANSMIT,
  493. NR__RXRPC_CONGEST_MODES
  494. };
  495. /*
  496. * RxRPC call definition
  497. * - matched by { connection, call_id }
  498. */
  499. struct rxrpc_call {
  500. struct rcu_head rcu;
  501. struct rxrpc_connection *conn; /* connection carrying call */
  502. struct rxrpc_peer *peer; /* Peer record for remote address */
  503. struct rxrpc_sock __rcu *socket; /* socket responsible */
  504. struct rxrpc_net *rxnet; /* Network namespace to which call belongs */
  505. const struct rxrpc_security *security; /* applied security module */
  506. struct mutex user_mutex; /* User access mutex */
  507. unsigned long ack_at; /* When deferred ACK needs to happen */
  508. unsigned long ack_lost_at; /* When ACK is figured as lost */
  509. unsigned long resend_at; /* When next resend needs to happen */
  510. unsigned long ping_at; /* When next to send a ping */
  511. unsigned long keepalive_at; /* When next to send a keepalive ping */
  512. unsigned long expect_rx_by; /* When we expect to get a packet by */
  513. unsigned long expect_req_by; /* When we expect to get a request DATA packet by */
  514. unsigned long expect_term_by; /* When we expect call termination by */
  515. u32 next_rx_timo; /* Timeout for next Rx packet (jif) */
  516. u32 next_req_timo; /* Timeout for next Rx request packet (jif) */
  517. struct skcipher_request *cipher_req; /* Packet cipher request buffer */
  518. struct timer_list timer; /* Combined event timer */
  519. struct work_struct processor; /* Event processor */
  520. rxrpc_notify_rx_t notify_rx; /* kernel service Rx notification function */
  521. struct list_head link; /* link in master call list */
  522. struct list_head chan_wait_link; /* Link in conn->bundle->waiting_calls */
  523. struct hlist_node error_link; /* link in error distribution list */
  524. struct list_head accept_link; /* Link in rx->acceptq */
  525. struct list_head recvmsg_link; /* Link in rx->recvmsg_q */
  526. struct list_head sock_link; /* Link in rx->sock_calls */
  527. struct rb_node sock_node; /* Node in rx->calls */
  528. struct sk_buff *tx_pending; /* Tx socket buffer being filled */
  529. wait_queue_head_t waitq; /* Wait queue for channel or Tx */
  530. s64 tx_total_len; /* Total length left to be transmitted (or -1) */
  531. __be32 crypto_buf[2]; /* Temporary packet crypto buffer */
  532. unsigned long user_call_ID; /* user-defined call ID */
  533. unsigned long flags;
  534. unsigned long events;
  535. spinlock_t lock;
  536. spinlock_t notify_lock; /* Kernel notification lock */
  537. rwlock_t state_lock; /* lock for state transition */
  538. u32 abort_code; /* Local/remote abort code */
  539. int error; /* Local error incurred */
  540. enum rxrpc_call_state state; /* current state of call */
  541. enum rxrpc_call_completion completion; /* Call completion condition */
  542. refcount_t ref;
  543. u16 service_id; /* service ID */
  544. u8 security_ix; /* Security type */
  545. enum rxrpc_interruptibility interruptibility; /* At what point call may be interrupted */
  546. u32 call_id; /* call ID on connection */
  547. u32 cid; /* connection ID plus channel index */
  548. int debug_id; /* debug ID for printks */
  549. unsigned short rx_pkt_offset; /* Current recvmsg packet offset */
  550. unsigned short rx_pkt_len; /* Current recvmsg packet len */
  551. bool rx_pkt_last; /* Current recvmsg packet is last */
  552. /* Rx/Tx circular buffer, depending on phase.
  553. *
  554. * In the Rx phase, packets are annotated with 0 or the number of the
  555. * segment of a jumbo packet each buffer refers to. There can be up to
  556. * 47 segments in a maximum-size UDP packet.
  557. *
  558. * In the Tx phase, packets are annotated with which buffers have been
  559. * acked.
  560. */
  561. #define RXRPC_RXTX_BUFF_SIZE 64
  562. #define RXRPC_RXTX_BUFF_MASK (RXRPC_RXTX_BUFF_SIZE - 1)
  563. #define RXRPC_INIT_RX_WINDOW_SIZE 63
  564. struct sk_buff **rxtx_buffer;
  565. u8 *rxtx_annotations;
  566. #define RXRPC_TX_ANNO_ACK 0
  567. #define RXRPC_TX_ANNO_UNACK 1
  568. #define RXRPC_TX_ANNO_NAK 2
  569. #define RXRPC_TX_ANNO_RETRANS 3
  570. #define RXRPC_TX_ANNO_MASK 0x03
  571. #define RXRPC_TX_ANNO_LAST 0x04
  572. #define RXRPC_TX_ANNO_RESENT 0x08
  573. #define RXRPC_RX_ANNO_SUBPACKET 0x3f /* Subpacket number in jumbogram */
  574. #define RXRPC_RX_ANNO_VERIFIED 0x80 /* Set if verified and decrypted */
  575. rxrpc_seq_t tx_hard_ack; /* Dead slot in buffer; the first transmitted but
  576. * not hard-ACK'd packet follows this.
  577. */
  578. rxrpc_seq_t tx_top; /* Highest Tx slot allocated. */
  579. u16 tx_backoff; /* Delay to insert due to Tx failure */
  580. /* TCP-style slow-start congestion control [RFC5681]. Since the SMSS
  581. * is fixed, we keep these numbers in terms of segments (ie. DATA
  582. * packets) rather than bytes.
  583. */
  584. #define RXRPC_TX_SMSS RXRPC_JUMBO_DATALEN
  585. u8 cong_cwnd; /* Congestion window size */
  586. u8 cong_extra; /* Extra to send for congestion management */
  587. u8 cong_ssthresh; /* Slow-start threshold */
  588. enum rxrpc_congest_mode cong_mode:8; /* Congestion management mode */
  589. u8 cong_dup_acks; /* Count of ACKs showing missing packets */
  590. u8 cong_cumul_acks; /* Cumulative ACK count */
  591. ktime_t cong_tstamp; /* Last time cwnd was changed */
  592. rxrpc_seq_t rx_hard_ack; /* Dead slot in buffer; the first received but not
  593. * consumed packet follows this.
  594. */
  595. rxrpc_seq_t rx_top; /* Highest Rx slot allocated. */
  596. rxrpc_seq_t rx_expect_next; /* Expected next packet sequence number */
  597. rxrpc_serial_t rx_serial; /* Highest serial received for this call */
  598. u8 rx_winsize; /* Size of Rx window */
  599. u8 tx_winsize; /* Maximum size of Tx window */
  600. bool tx_phase; /* T if transmission phase, F if receive phase */
  601. u8 nr_jumbo_bad; /* Number of jumbo dups/exceeds-windows */
  602. spinlock_t input_lock; /* Lock for packet input to this call */
  603. /* Receive-phase ACK management (ACKs we send). */
  604. u8 ackr_reason; /* reason to ACK */
  605. rxrpc_serial_t ackr_serial; /* serial of packet being ACK'd */
  606. rxrpc_seq_t ackr_highest_seq; /* Higest sequence number received */
  607. atomic_t ackr_nr_unacked; /* Number of unacked packets */
  608. atomic_t ackr_nr_consumed; /* Number of packets needing hard ACK */
  609. /* RTT management */
  610. rxrpc_serial_t rtt_serial[4]; /* Serial number of DATA or PING sent */
  611. ktime_t rtt_sent_at[4]; /* Time packet sent */
  612. unsigned long rtt_avail; /* Mask of available slots in bits 0-3,
  613. * Mask of pending samples in 8-11 */
  614. #define RXRPC_CALL_RTT_AVAIL_MASK 0xf
  615. #define RXRPC_CALL_RTT_PEND_SHIFT 8
  616. /* Transmission-phase ACK management (ACKs we've received). */
  617. ktime_t acks_latest_ts; /* Timestamp of latest ACK received */
  618. rxrpc_seq_t acks_first_seq; /* first sequence number received */
  619. rxrpc_seq_t acks_prev_seq; /* Highest previousPacket received */
  620. rxrpc_seq_t acks_lowest_nak; /* Lowest NACK in the buffer (or ==tx_hard_ack) */
  621. rxrpc_seq_t acks_lost_top; /* tx_top at the time lost-ack ping sent */
  622. rxrpc_serial_t acks_lost_ping; /* Serial number of probe ACK */
  623. };
  624. /*
  625. * Summary of a new ACK and the changes it made to the Tx buffer packet states.
  626. */
  627. struct rxrpc_ack_summary {
  628. u8 ack_reason;
  629. u8 nr_acks; /* Number of ACKs in packet */
  630. u8 nr_nacks; /* Number of NACKs in packet */
  631. u8 nr_new_acks; /* Number of new ACKs in packet */
  632. u8 nr_new_nacks; /* Number of new NACKs in packet */
  633. u8 nr_rot_new_acks; /* Number of rotated new ACKs */
  634. bool new_low_nack; /* T if new low NACK found */
  635. bool retrans_timeo; /* T if reTx due to timeout happened */
  636. u8 flight_size; /* Number of unreceived transmissions */
  637. /* Place to stash values for tracing */
  638. enum rxrpc_congest_mode mode:8;
  639. u8 cwnd;
  640. u8 ssthresh;
  641. u8 dup_acks;
  642. u8 cumulative_acks;
  643. };
  644. /*
  645. * sendmsg() cmsg-specified parameters.
  646. */
  647. enum rxrpc_command {
  648. RXRPC_CMD_SEND_DATA, /* send data message */
  649. RXRPC_CMD_SEND_ABORT, /* request abort generation */
  650. RXRPC_CMD_REJECT_BUSY, /* [server] reject a call as busy */
  651. RXRPC_CMD_CHARGE_ACCEPT, /* [server] charge accept preallocation */
  652. };
  653. struct rxrpc_call_params {
  654. s64 tx_total_len; /* Total Tx data length (if send data) */
  655. unsigned long user_call_ID; /* User's call ID */
  656. struct {
  657. u32 hard; /* Maximum lifetime (sec) */
  658. u32 idle; /* Max time since last data packet (msec) */
  659. u32 normal; /* Max time since last call packet (msec) */
  660. } timeouts;
  661. u8 nr_timeouts; /* Number of timeouts specified */
  662. bool kernel; /* T if kernel is making the call */
  663. enum rxrpc_interruptibility interruptibility; /* How is interruptible is the call? */
  664. };
  665. struct rxrpc_send_params {
  666. struct rxrpc_call_params call;
  667. u32 abort_code; /* Abort code to Tx (if abort) */
  668. enum rxrpc_command command : 8; /* The command to implement */
  669. bool exclusive; /* Shared or exclusive call */
  670. bool upgrade; /* If the connection is upgradeable */
  671. };
  672. #include <trace/events/rxrpc.h>
  673. /*
  674. * af_rxrpc.c
  675. */
  676. extern atomic_t rxrpc_n_tx_skbs, rxrpc_n_rx_skbs;
  677. extern struct workqueue_struct *rxrpc_workqueue;
  678. /*
  679. * call_accept.c
  680. */
  681. int rxrpc_service_prealloc(struct rxrpc_sock *, gfp_t);
  682. void rxrpc_discard_prealloc(struct rxrpc_sock *);
  683. struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *,
  684. struct rxrpc_sock *,
  685. struct sk_buff *);
  686. void rxrpc_accept_incoming_calls(struct rxrpc_local *);
  687. int rxrpc_user_charge_accept(struct rxrpc_sock *, unsigned long);
  688. /*
  689. * call_event.c
  690. */
  691. void rxrpc_propose_ACK(struct rxrpc_call *, u8, u32, bool, bool,
  692. enum rxrpc_propose_ack_trace);
  693. void rxrpc_process_call(struct work_struct *);
  694. void rxrpc_reduce_call_timer(struct rxrpc_call *call,
  695. unsigned long expire_at,
  696. unsigned long now,
  697. enum rxrpc_timer_trace why);
  698. void rxrpc_delete_call_timer(struct rxrpc_call *call);
  699. /*
  700. * call_object.c
  701. */
  702. extern const char *const rxrpc_call_states[];
  703. extern const char *const rxrpc_call_completions[];
  704. extern struct kmem_cache *rxrpc_call_jar;
  705. struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *, unsigned long);
  706. struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *, gfp_t, unsigned int);
  707. struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *,
  708. struct rxrpc_conn_parameters *,
  709. struct sockaddr_rxrpc *,
  710. struct rxrpc_call_params *, gfp_t,
  711. unsigned int);
  712. void rxrpc_incoming_call(struct rxrpc_sock *, struct rxrpc_call *,
  713. struct sk_buff *);
  714. void rxrpc_release_call(struct rxrpc_sock *, struct rxrpc_call *);
  715. void rxrpc_release_calls_on_socket(struct rxrpc_sock *);
  716. bool __rxrpc_queue_call(struct rxrpc_call *);
  717. bool rxrpc_queue_call(struct rxrpc_call *);
  718. void rxrpc_see_call(struct rxrpc_call *);
  719. bool rxrpc_try_get_call(struct rxrpc_call *call, enum rxrpc_call_trace op);
  720. void rxrpc_get_call(struct rxrpc_call *, enum rxrpc_call_trace);
  721. void rxrpc_put_call(struct rxrpc_call *, enum rxrpc_call_trace);
  722. void rxrpc_cleanup_call(struct rxrpc_call *);
  723. void rxrpc_destroy_all_calls(struct rxrpc_net *);
  724. static inline bool rxrpc_is_service_call(const struct rxrpc_call *call)
  725. {
  726. return test_bit(RXRPC_CALL_IS_SERVICE, &call->flags);
  727. }
  728. static inline bool rxrpc_is_client_call(const struct rxrpc_call *call)
  729. {
  730. return !rxrpc_is_service_call(call);
  731. }
  732. /*
  733. * conn_client.c
  734. */
  735. extern unsigned int rxrpc_reap_client_connections;
  736. extern unsigned long rxrpc_conn_idle_client_expiry;
  737. extern unsigned long rxrpc_conn_idle_client_fast_expiry;
  738. extern struct idr rxrpc_client_conn_ids;
  739. void rxrpc_destroy_client_conn_ids(void);
  740. struct rxrpc_bundle *rxrpc_get_bundle(struct rxrpc_bundle *);
  741. void rxrpc_put_bundle(struct rxrpc_bundle *);
  742. int rxrpc_connect_call(struct rxrpc_sock *, struct rxrpc_call *,
  743. struct rxrpc_conn_parameters *, struct sockaddr_rxrpc *,
  744. gfp_t);
  745. void rxrpc_expose_client_call(struct rxrpc_call *);
  746. void rxrpc_disconnect_client_call(struct rxrpc_bundle *, struct rxrpc_call *);
  747. void rxrpc_put_client_conn(struct rxrpc_connection *);
  748. void rxrpc_discard_expired_client_conns(struct work_struct *);
  749. void rxrpc_destroy_all_client_connections(struct rxrpc_net *);
  750. void rxrpc_clean_up_local_conns(struct rxrpc_local *);
  751. /*
  752. * conn_event.c
  753. */
  754. void rxrpc_process_connection(struct work_struct *);
  755. void rxrpc_process_delayed_final_acks(struct rxrpc_connection *, bool);
  756. /*
  757. * conn_object.c
  758. */
  759. extern unsigned int rxrpc_connection_expiry;
  760. extern unsigned int rxrpc_closed_conn_expiry;
  761. struct rxrpc_connection *rxrpc_alloc_connection(gfp_t);
  762. struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *,
  763. struct sk_buff *,
  764. struct rxrpc_peer **);
  765. void __rxrpc_disconnect_call(struct rxrpc_connection *, struct rxrpc_call *);
  766. void rxrpc_disconnect_call(struct rxrpc_call *);
  767. void rxrpc_kill_connection(struct rxrpc_connection *);
  768. bool rxrpc_queue_conn(struct rxrpc_connection *);
  769. void rxrpc_see_connection(struct rxrpc_connection *);
  770. struct rxrpc_connection *rxrpc_get_connection(struct rxrpc_connection *);
  771. struct rxrpc_connection *rxrpc_get_connection_maybe(struct rxrpc_connection *);
  772. void rxrpc_put_service_conn(struct rxrpc_connection *);
  773. void rxrpc_service_connection_reaper(struct work_struct *);
  774. void rxrpc_destroy_all_connections(struct rxrpc_net *);
  775. static inline bool rxrpc_conn_is_client(const struct rxrpc_connection *conn)
  776. {
  777. return conn->out_clientflag;
  778. }
  779. static inline bool rxrpc_conn_is_service(const struct rxrpc_connection *conn)
  780. {
  781. return !rxrpc_conn_is_client(conn);
  782. }
  783. static inline void rxrpc_put_connection(struct rxrpc_connection *conn)
  784. {
  785. if (!conn)
  786. return;
  787. if (rxrpc_conn_is_client(conn))
  788. rxrpc_put_client_conn(conn);
  789. else
  790. rxrpc_put_service_conn(conn);
  791. }
  792. static inline void rxrpc_reduce_conn_timer(struct rxrpc_connection *conn,
  793. unsigned long expire_at)
  794. {
  795. timer_reduce(&conn->timer, expire_at);
  796. }
  797. /*
  798. * conn_service.c
  799. */
  800. struct rxrpc_connection *rxrpc_find_service_conn_rcu(struct rxrpc_peer *,
  801. struct sk_buff *);
  802. struct rxrpc_connection *rxrpc_prealloc_service_connection(struct rxrpc_net *, gfp_t);
  803. void rxrpc_new_incoming_connection(struct rxrpc_sock *, struct rxrpc_connection *,
  804. const struct rxrpc_security *, struct sk_buff *);
  805. void rxrpc_unpublish_service_conn(struct rxrpc_connection *);
  806. /*
  807. * input.c
  808. */
  809. int rxrpc_input_packet(struct sock *, struct sk_buff *);
  810. /*
  811. * insecure.c
  812. */
  813. extern const struct rxrpc_security rxrpc_no_security;
  814. /*
  815. * key.c
  816. */
  817. extern struct key_type key_type_rxrpc;
  818. int rxrpc_request_key(struct rxrpc_sock *, sockptr_t , int);
  819. int rxrpc_get_server_data_key(struct rxrpc_connection *, const void *, time64_t,
  820. u32);
  821. /*
  822. * local_event.c
  823. */
  824. extern void rxrpc_process_local_events(struct rxrpc_local *);
  825. /*
  826. * local_object.c
  827. */
  828. struct rxrpc_local *rxrpc_lookup_local(struct net *, const struct sockaddr_rxrpc *);
  829. struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *);
  830. struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *);
  831. void rxrpc_put_local(struct rxrpc_local *);
  832. struct rxrpc_local *rxrpc_use_local(struct rxrpc_local *);
  833. void rxrpc_unuse_local(struct rxrpc_local *);
  834. void rxrpc_queue_local(struct rxrpc_local *);
  835. void rxrpc_destroy_all_locals(struct rxrpc_net *);
  836. static inline bool __rxrpc_unuse_local(struct rxrpc_local *local)
  837. {
  838. return atomic_dec_return(&local->active_users) == 0;
  839. }
  840. static inline bool __rxrpc_use_local(struct rxrpc_local *local)
  841. {
  842. return atomic_fetch_add_unless(&local->active_users, 1, 0) != 0;
  843. }
  844. /*
  845. * misc.c
  846. */
  847. extern unsigned int rxrpc_max_backlog __read_mostly;
  848. extern unsigned long rxrpc_requested_ack_delay;
  849. extern unsigned long rxrpc_soft_ack_delay;
  850. extern unsigned long rxrpc_idle_ack_delay;
  851. extern unsigned int rxrpc_rx_window_size;
  852. extern unsigned int rxrpc_rx_mtu;
  853. extern unsigned int rxrpc_rx_jumbo_max;
  854. extern const s8 rxrpc_ack_priority[];
  855. /*
  856. * net_ns.c
  857. */
  858. extern unsigned int rxrpc_net_id;
  859. extern struct pernet_operations rxrpc_net_ops;
  860. static inline struct rxrpc_net *rxrpc_net(struct net *net)
  861. {
  862. return net_generic(net, rxrpc_net_id);
  863. }
  864. /*
  865. * output.c
  866. */
  867. int rxrpc_send_ack_packet(struct rxrpc_call *, bool, rxrpc_serial_t *);
  868. int rxrpc_send_abort_packet(struct rxrpc_call *);
  869. int rxrpc_send_data_packet(struct rxrpc_call *, struct sk_buff *, bool);
  870. void rxrpc_reject_packets(struct rxrpc_local *);
  871. void rxrpc_send_keepalive(struct rxrpc_peer *);
  872. /*
  873. * peer_event.c
  874. */
  875. void rxrpc_encap_err_rcv(struct sock *sk, struct sk_buff *skb, unsigned int udp_offset);
  876. void rxrpc_error_report(struct sock *);
  877. void rxrpc_peer_keepalive_worker(struct work_struct *);
  878. /*
  879. * peer_object.c
  880. */
  881. struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *,
  882. const struct sockaddr_rxrpc *);
  883. struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_sock *, struct rxrpc_local *,
  884. struct sockaddr_rxrpc *, gfp_t);
  885. struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *, gfp_t);
  886. void rxrpc_new_incoming_peer(struct rxrpc_sock *, struct rxrpc_local *,
  887. struct rxrpc_peer *);
  888. void rxrpc_destroy_all_peers(struct rxrpc_net *);
  889. struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *);
  890. struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *);
  891. void rxrpc_put_peer(struct rxrpc_peer *);
  892. void rxrpc_put_peer_locked(struct rxrpc_peer *);
  893. /*
  894. * proc.c
  895. */
  896. extern const struct seq_operations rxrpc_call_seq_ops;
  897. extern const struct seq_operations rxrpc_connection_seq_ops;
  898. extern const struct seq_operations rxrpc_peer_seq_ops;
  899. extern const struct seq_operations rxrpc_local_seq_ops;
  900. /*
  901. * recvmsg.c
  902. */
  903. void rxrpc_notify_socket(struct rxrpc_call *);
  904. bool __rxrpc_set_call_completion(struct rxrpc_call *, enum rxrpc_call_completion, u32, int);
  905. bool rxrpc_set_call_completion(struct rxrpc_call *, enum rxrpc_call_completion, u32, int);
  906. bool __rxrpc_call_completed(struct rxrpc_call *);
  907. bool rxrpc_call_completed(struct rxrpc_call *);
  908. bool __rxrpc_abort_call(const char *, struct rxrpc_call *, rxrpc_seq_t, u32, int);
  909. bool rxrpc_abort_call(const char *, struct rxrpc_call *, rxrpc_seq_t, u32, int);
  910. int rxrpc_recvmsg(struct socket *, struct msghdr *, size_t, int);
  911. /*
  912. * Abort a call due to a protocol error.
  913. */
  914. static inline bool __rxrpc_abort_eproto(struct rxrpc_call *call,
  915. struct sk_buff *skb,
  916. const char *eproto_why,
  917. const char *why,
  918. u32 abort_code)
  919. {
  920. struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
  921. trace_rxrpc_rx_eproto(call, sp->hdr.serial, eproto_why);
  922. return rxrpc_abort_call(why, call, sp->hdr.seq, abort_code, -EPROTO);
  923. }
  924. #define rxrpc_abort_eproto(call, skb, eproto_why, abort_why, abort_code) \
  925. __rxrpc_abort_eproto((call), (skb), tracepoint_string(eproto_why), \
  926. (abort_why), (abort_code))
  927. /*
  928. * rtt.c
  929. */
  930. void rxrpc_peer_add_rtt(struct rxrpc_call *, enum rxrpc_rtt_rx_trace, int,
  931. rxrpc_serial_t, rxrpc_serial_t, ktime_t, ktime_t);
  932. unsigned long rxrpc_get_rto_backoff(struct rxrpc_peer *, bool);
  933. void rxrpc_peer_init_rtt(struct rxrpc_peer *);
  934. /*
  935. * rxkad.c
  936. */
  937. #ifdef CONFIG_RXKAD
  938. extern const struct rxrpc_security rxkad;
  939. #endif
  940. /*
  941. * security.c
  942. */
  943. int __init rxrpc_init_security(void);
  944. const struct rxrpc_security *rxrpc_security_lookup(u8);
  945. void rxrpc_exit_security(void);
  946. int rxrpc_init_client_conn_security(struct rxrpc_connection *);
  947. const struct rxrpc_security *rxrpc_get_incoming_security(struct rxrpc_sock *,
  948. struct sk_buff *);
  949. struct key *rxrpc_look_up_server_security(struct rxrpc_connection *,
  950. struct sk_buff *, u32, u32);
  951. /*
  952. * sendmsg.c
  953. */
  954. int rxrpc_do_sendmsg(struct rxrpc_sock *, struct msghdr *, size_t);
  955. /*
  956. * server_key.c
  957. */
  958. extern struct key_type key_type_rxrpc_s;
  959. int rxrpc_server_keyring(struct rxrpc_sock *, sockptr_t, int);
  960. /*
  961. * skbuff.c
  962. */
  963. void rxrpc_kernel_data_consumed(struct rxrpc_call *, struct sk_buff *);
  964. void rxrpc_packet_destructor(struct sk_buff *);
  965. void rxrpc_new_skb(struct sk_buff *, enum rxrpc_skb_trace);
  966. void rxrpc_see_skb(struct sk_buff *, enum rxrpc_skb_trace);
  967. void rxrpc_eaten_skb(struct sk_buff *, enum rxrpc_skb_trace);
  968. void rxrpc_get_skb(struct sk_buff *, enum rxrpc_skb_trace);
  969. void rxrpc_free_skb(struct sk_buff *, enum rxrpc_skb_trace);
  970. void rxrpc_purge_queue(struct sk_buff_head *);
  971. /*
  972. * sysctl.c
  973. */
  974. #ifdef CONFIG_SYSCTL
  975. extern int __init rxrpc_sysctl_init(void);
  976. extern void rxrpc_sysctl_exit(void);
  977. #else
  978. static inline int __init rxrpc_sysctl_init(void) { return 0; }
  979. static inline void rxrpc_sysctl_exit(void) {}
  980. #endif
  981. /*
  982. * utils.c
  983. */
  984. int rxrpc_extract_addr_from_skb(struct sockaddr_rxrpc *, struct sk_buff *);
  985. static inline bool before(u32 seq1, u32 seq2)
  986. {
  987. return (s32)(seq1 - seq2) < 0;
  988. }
  989. static inline bool before_eq(u32 seq1, u32 seq2)
  990. {
  991. return (s32)(seq1 - seq2) <= 0;
  992. }
  993. static inline bool after(u32 seq1, u32 seq2)
  994. {
  995. return (s32)(seq1 - seq2) > 0;
  996. }
  997. static inline bool after_eq(u32 seq1, u32 seq2)
  998. {
  999. return (s32)(seq1 - seq2) >= 0;
  1000. }
  1001. /*
  1002. * debug tracing
  1003. */
  1004. extern unsigned int rxrpc_debug;
  1005. #define dbgprintk(FMT,...) \
  1006. printk("[%-6.6s] "FMT"\n", current->comm ,##__VA_ARGS__)
  1007. #define kenter(FMT,...) dbgprintk("==> %s("FMT")",__func__ ,##__VA_ARGS__)
  1008. #define kleave(FMT,...) dbgprintk("<== %s()"FMT"",__func__ ,##__VA_ARGS__)
  1009. #define kdebug(FMT,...) dbgprintk(" "FMT ,##__VA_ARGS__)
  1010. #define kproto(FMT,...) dbgprintk("### "FMT ,##__VA_ARGS__)
  1011. #define knet(FMT,...) dbgprintk("@@@ "FMT ,##__VA_ARGS__)
  1012. #if defined(__KDEBUG)
  1013. #define _enter(FMT,...) kenter(FMT,##__VA_ARGS__)
  1014. #define _leave(FMT,...) kleave(FMT,##__VA_ARGS__)
  1015. #define _debug(FMT,...) kdebug(FMT,##__VA_ARGS__)
  1016. #define _proto(FMT,...) kproto(FMT,##__VA_ARGS__)
  1017. #define _net(FMT,...) knet(FMT,##__VA_ARGS__)
  1018. #elif defined(CONFIG_AF_RXRPC_DEBUG)
  1019. #define RXRPC_DEBUG_KENTER 0x01
  1020. #define RXRPC_DEBUG_KLEAVE 0x02
  1021. #define RXRPC_DEBUG_KDEBUG 0x04
  1022. #define RXRPC_DEBUG_KPROTO 0x08
  1023. #define RXRPC_DEBUG_KNET 0x10
  1024. #define _enter(FMT,...) \
  1025. do { \
  1026. if (unlikely(rxrpc_debug & RXRPC_DEBUG_KENTER)) \
  1027. kenter(FMT,##__VA_ARGS__); \
  1028. } while (0)
  1029. #define _leave(FMT,...) \
  1030. do { \
  1031. if (unlikely(rxrpc_debug & RXRPC_DEBUG_KLEAVE)) \
  1032. kleave(FMT,##__VA_ARGS__); \
  1033. } while (0)
  1034. #define _debug(FMT,...) \
  1035. do { \
  1036. if (unlikely(rxrpc_debug & RXRPC_DEBUG_KDEBUG)) \
  1037. kdebug(FMT,##__VA_ARGS__); \
  1038. } while (0)
  1039. #define _proto(FMT,...) \
  1040. do { \
  1041. if (unlikely(rxrpc_debug & RXRPC_DEBUG_KPROTO)) \
  1042. kproto(FMT,##__VA_ARGS__); \
  1043. } while (0)
  1044. #define _net(FMT,...) \
  1045. do { \
  1046. if (unlikely(rxrpc_debug & RXRPC_DEBUG_KNET)) \
  1047. knet(FMT,##__VA_ARGS__); \
  1048. } while (0)
  1049. #else
  1050. #define _enter(FMT,...) no_printk("==> %s("FMT")",__func__ ,##__VA_ARGS__)
  1051. #define _leave(FMT,...) no_printk("<== %s()"FMT"",__func__ ,##__VA_ARGS__)
  1052. #define _debug(FMT,...) no_printk(" "FMT ,##__VA_ARGS__)
  1053. #define _proto(FMT,...) no_printk("### "FMT ,##__VA_ARGS__)
  1054. #define _net(FMT,...) no_printk("@@@ "FMT ,##__VA_ARGS__)
  1055. #endif
  1056. /*
  1057. * debug assertion checking
  1058. */
  1059. #if 1 // defined(__KDEBUGALL)
  1060. #define ASSERT(X) \
  1061. do { \
  1062. if (unlikely(!(X))) { \
  1063. pr_err("Assertion failed\n"); \
  1064. BUG(); \
  1065. } \
  1066. } while (0)
  1067. #define ASSERTCMP(X, OP, Y) \
  1068. do { \
  1069. __typeof__(X) _x = (X); \
  1070. __typeof__(Y) _y = (__typeof__(X))(Y); \
  1071. if (unlikely(!(_x OP _y))) { \
  1072. pr_err("Assertion failed - %lu(0x%lx) %s %lu(0x%lx) is false\n", \
  1073. (unsigned long)_x, (unsigned long)_x, #OP, \
  1074. (unsigned long)_y, (unsigned long)_y); \
  1075. BUG(); \
  1076. } \
  1077. } while (0)
  1078. #define ASSERTIF(C, X) \
  1079. do { \
  1080. if (unlikely((C) && !(X))) { \
  1081. pr_err("Assertion failed\n"); \
  1082. BUG(); \
  1083. } \
  1084. } while (0)
  1085. #define ASSERTIFCMP(C, X, OP, Y) \
  1086. do { \
  1087. __typeof__(X) _x = (X); \
  1088. __typeof__(Y) _y = (__typeof__(X))(Y); \
  1089. if (unlikely((C) && !(_x OP _y))) { \
  1090. pr_err("Assertion failed - %lu(0x%lx) %s %lu(0x%lx) is false\n", \
  1091. (unsigned long)_x, (unsigned long)_x, #OP, \
  1092. (unsigned long)_y, (unsigned long)_y); \
  1093. BUG(); \
  1094. } \
  1095. } while (0)
  1096. #else
  1097. #define ASSERT(X) \
  1098. do { \
  1099. } while (0)
  1100. #define ASSERTCMP(X, OP, Y) \
  1101. do { \
  1102. } while (0)
  1103. #define ASSERTIF(C, X) \
  1104. do { \
  1105. } while (0)
  1106. #define ASSERTIFCMP(C, X, OP, Y) \
  1107. do { \
  1108. } while (0)
  1109. #endif /* __KDEBUGALL */