ar-internal.h 41 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263
  1. /* AF_RXRPC internal definitions
  2. *
  3. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <linux/atomic.h>
  12. #include <linux/seqlock.h>
  13. #include <net/net_namespace.h>
  14. #include <net/netns/generic.h>
  15. #include <net/sock.h>
  16. #include <net/af_rxrpc.h>
  17. #include "protocol.h"
  18. #if 0
  19. #define CHECK_SLAB_OKAY(X) \
  20. BUG_ON(atomic_read((X)) >> (sizeof(atomic_t) - 2) == \
  21. (POISON_FREE << 8 | POISON_FREE))
  22. #else
  23. #define CHECK_SLAB_OKAY(X) do {} while (0)
  24. #endif
  25. #define FCRYPT_BSIZE 8
  26. struct rxrpc_crypt {
  27. union {
  28. u8 x[FCRYPT_BSIZE];
  29. __be32 n[2];
  30. };
  31. } __attribute__((aligned(8)));
  32. #define rxrpc_queue_work(WS) queue_work(rxrpc_workqueue, (WS))
  33. #define rxrpc_queue_delayed_work(WS,D) \
  34. queue_delayed_work(rxrpc_workqueue, (WS), (D))
  35. struct rxrpc_connection;
  36. /*
  37. * Mark applied to socket buffers.
  38. */
  39. enum rxrpc_skb_mark {
  40. RXRPC_SKB_MARK_DATA, /* data message */
  41. RXRPC_SKB_MARK_FINAL_ACK, /* final ACK received message */
  42. RXRPC_SKB_MARK_BUSY, /* server busy message */
  43. RXRPC_SKB_MARK_REMOTE_ABORT, /* remote abort message */
  44. RXRPC_SKB_MARK_LOCAL_ABORT, /* local abort message */
  45. RXRPC_SKB_MARK_NET_ERROR, /* network error message */
  46. RXRPC_SKB_MARK_LOCAL_ERROR, /* local error message */
  47. RXRPC_SKB_MARK_NEW_CALL, /* local error message */
  48. };
  49. /*
  50. * sk_state for RxRPC sockets
  51. */
  52. enum {
  53. RXRPC_UNBOUND = 0,
  54. RXRPC_CLIENT_UNBOUND, /* Unbound socket used as client */
  55. RXRPC_CLIENT_BOUND, /* client local address bound */
  56. RXRPC_SERVER_BOUND, /* server local address bound */
  57. RXRPC_SERVER_BOUND2, /* second server local address bound */
  58. RXRPC_SERVER_LISTENING, /* server listening for connections */
  59. RXRPC_SERVER_LISTEN_DISABLED, /* server listening disabled */
  60. RXRPC_CLOSE, /* socket is being closed */
  61. };
  62. /*
  63. * Per-network namespace data.
  64. */
  65. struct rxrpc_net {
  66. struct proc_dir_entry *proc_net; /* Subdir in /proc/net */
  67. u32 epoch; /* Local epoch for detecting local-end reset */
  68. struct list_head calls; /* List of calls active in this namespace */
  69. rwlock_t call_lock; /* Lock for ->calls */
  70. atomic_t nr_calls; /* Count of allocated calls */
  71. atomic_t nr_conns;
  72. struct list_head conn_proc_list; /* List of conns in this namespace for proc */
  73. struct list_head service_conns; /* Service conns in this namespace */
  74. rwlock_t conn_lock; /* Lock for ->conn_proc_list, ->service_conns */
  75. struct work_struct service_conn_reaper;
  76. struct timer_list service_conn_reap_timer;
  77. unsigned int nr_client_conns;
  78. unsigned int nr_active_client_conns;
  79. bool kill_all_client_conns;
  80. bool live;
  81. spinlock_t client_conn_cache_lock; /* Lock for ->*_client_conns */
  82. spinlock_t client_conn_discard_lock; /* Prevent multiple discarders */
  83. struct list_head waiting_client_conns;
  84. struct list_head active_client_conns;
  85. struct list_head idle_client_conns;
  86. struct work_struct client_conn_reaper;
  87. struct timer_list client_conn_reap_timer;
  88. struct list_head local_endpoints;
  89. struct mutex local_mutex; /* Lock for ->local_endpoints */
  90. DECLARE_HASHTABLE (peer_hash, 10);
  91. spinlock_t peer_hash_lock; /* Lock for ->peer_hash */
  92. #define RXRPC_KEEPALIVE_TIME 20 /* NAT keepalive time in seconds */
  93. u8 peer_keepalive_cursor;
  94. time64_t peer_keepalive_base;
  95. struct list_head peer_keepalive[32];
  96. struct list_head peer_keepalive_new;
  97. struct timer_list peer_keepalive_timer;
  98. struct work_struct peer_keepalive_work;
  99. };
  100. /*
  101. * Service backlog preallocation.
  102. *
  103. * This contains circular buffers of preallocated peers, connections and calls
  104. * for incoming service calls and their head and tail pointers. This allows
  105. * calls to be set up in the data_ready handler, thereby avoiding the need to
  106. * shuffle packets around so much.
  107. */
  108. struct rxrpc_backlog {
  109. unsigned short peer_backlog_head;
  110. unsigned short peer_backlog_tail;
  111. unsigned short conn_backlog_head;
  112. unsigned short conn_backlog_tail;
  113. unsigned short call_backlog_head;
  114. unsigned short call_backlog_tail;
  115. #define RXRPC_BACKLOG_MAX 32
  116. struct rxrpc_peer *peer_backlog[RXRPC_BACKLOG_MAX];
  117. struct rxrpc_connection *conn_backlog[RXRPC_BACKLOG_MAX];
  118. struct rxrpc_call *call_backlog[RXRPC_BACKLOG_MAX];
  119. };
  120. /*
  121. * RxRPC socket definition
  122. */
  123. struct rxrpc_sock {
  124. /* WARNING: sk has to be the first member */
  125. struct sock sk;
  126. rxrpc_notify_new_call_t notify_new_call; /* Func to notify of new call */
  127. rxrpc_discard_new_call_t discard_new_call; /* Func to discard a new call */
  128. struct rxrpc_local *local; /* local endpoint */
  129. struct rxrpc_backlog *backlog; /* Preallocation for services */
  130. spinlock_t incoming_lock; /* Incoming call vs service shutdown lock */
  131. struct list_head sock_calls; /* List of calls owned by this socket */
  132. struct list_head to_be_accepted; /* calls awaiting acceptance */
  133. struct list_head recvmsg_q; /* Calls awaiting recvmsg's attention */
  134. rwlock_t recvmsg_lock; /* Lock for recvmsg_q */
  135. struct key *key; /* security for this socket */
  136. struct key *securities; /* list of server security descriptors */
  137. struct rb_root calls; /* User ID -> call mapping */
  138. unsigned long flags;
  139. #define RXRPC_SOCK_CONNECTED 0 /* connect_srx is set */
  140. rwlock_t call_lock; /* lock for calls */
  141. u32 min_sec_level; /* minimum security level */
  142. #define RXRPC_SECURITY_MAX RXRPC_SECURITY_ENCRYPT
  143. bool exclusive; /* Exclusive connection for a client socket */
  144. u16 second_service; /* Additional service bound to the endpoint */
  145. struct {
  146. /* Service upgrade information */
  147. u16 from; /* Service ID to upgrade (if not 0) */
  148. u16 to; /* service ID to upgrade to */
  149. } service_upgrade;
  150. sa_family_t family; /* Protocol family created with */
  151. struct sockaddr_rxrpc srx; /* Primary Service/local addresses */
  152. struct sockaddr_rxrpc connect_srx; /* Default client address from connect() */
  153. };
  154. #define rxrpc_sk(__sk) container_of((__sk), struct rxrpc_sock, sk)
  155. /*
  156. * CPU-byteorder normalised Rx packet header.
  157. */
  158. struct rxrpc_host_header {
  159. u32 epoch; /* client boot timestamp */
  160. u32 cid; /* connection and channel ID */
  161. u32 callNumber; /* call ID (0 for connection-level packets) */
  162. u32 seq; /* sequence number of pkt in call stream */
  163. u32 serial; /* serial number of pkt sent to network */
  164. u8 type; /* packet type */
  165. u8 flags; /* packet flags */
  166. u8 userStatus; /* app-layer defined status */
  167. u8 securityIndex; /* security protocol ID */
  168. union {
  169. u16 _rsvd; /* reserved */
  170. u16 cksum; /* kerberos security checksum */
  171. };
  172. u16 serviceId; /* service ID */
  173. } __packed;
  174. /*
  175. * RxRPC socket buffer private variables
  176. * - max 48 bytes (struct sk_buff::cb)
  177. */
  178. struct rxrpc_skb_priv {
  179. union {
  180. u8 nr_jumbo; /* Number of jumbo subpackets */
  181. };
  182. union {
  183. int remain; /* amount of space remaining for next write */
  184. };
  185. struct rxrpc_host_header hdr; /* RxRPC packet header from this packet */
  186. };
  187. #define rxrpc_skb(__skb) ((struct rxrpc_skb_priv *) &(__skb)->cb)
  188. /*
  189. * RxRPC security module interface
  190. */
  191. struct rxrpc_security {
  192. const char *name; /* name of this service */
  193. u8 security_index; /* security type provided */
  194. /* Initialise a security service */
  195. int (*init)(void);
  196. /* Clean up a security service */
  197. void (*exit)(void);
  198. /* initialise a connection's security */
  199. int (*init_connection_security)(struct rxrpc_connection *);
  200. /* prime a connection's packet security */
  201. int (*prime_packet_security)(struct rxrpc_connection *);
  202. /* impose security on a packet */
  203. int (*secure_packet)(struct rxrpc_call *,
  204. struct sk_buff *,
  205. size_t,
  206. void *);
  207. /* verify the security on a received packet */
  208. int (*verify_packet)(struct rxrpc_call *, struct sk_buff *,
  209. unsigned int, unsigned int, rxrpc_seq_t, u16);
  210. /* Locate the data in a received packet that has been verified. */
  211. void (*locate_data)(struct rxrpc_call *, struct sk_buff *,
  212. unsigned int *, unsigned int *);
  213. /* issue a challenge */
  214. int (*issue_challenge)(struct rxrpc_connection *);
  215. /* respond to a challenge */
  216. int (*respond_to_challenge)(struct rxrpc_connection *,
  217. struct sk_buff *,
  218. u32 *);
  219. /* verify a response */
  220. int (*verify_response)(struct rxrpc_connection *,
  221. struct sk_buff *,
  222. u32 *);
  223. /* clear connection security */
  224. void (*clear)(struct rxrpc_connection *);
  225. };
  226. /*
  227. * RxRPC local transport endpoint description
  228. * - owned by a single AF_RXRPC socket
  229. * - pointed to by transport socket struct sk_user_data
  230. */
  231. struct rxrpc_local {
  232. struct rcu_head rcu;
  233. atomic_t usage;
  234. struct rxrpc_net *rxnet; /* The network ns in which this resides */
  235. struct list_head link;
  236. struct socket *socket; /* my UDP socket */
  237. struct work_struct processor;
  238. struct rxrpc_sock __rcu *service; /* Service(s) listening on this endpoint */
  239. struct rw_semaphore defrag_sem; /* control re-enablement of IP DF bit */
  240. struct sk_buff_head reject_queue; /* packets awaiting rejection */
  241. struct sk_buff_head event_queue; /* endpoint event packets awaiting processing */
  242. struct rb_root client_conns; /* Client connections by socket params */
  243. spinlock_t client_conns_lock; /* Lock for client_conns */
  244. spinlock_t lock; /* access lock */
  245. rwlock_t services_lock; /* lock for services list */
  246. int debug_id; /* debug ID for printks */
  247. bool dead;
  248. bool service_closed; /* Service socket closed */
  249. struct sockaddr_rxrpc srx; /* local address */
  250. };
  251. /*
  252. * RxRPC remote transport endpoint definition
  253. * - matched by local endpoint, remote port, address and protocol type
  254. */
  255. struct rxrpc_peer {
  256. struct rcu_head rcu; /* This must be first */
  257. atomic_t usage;
  258. unsigned long hash_key;
  259. struct hlist_node hash_link;
  260. struct rxrpc_local *local;
  261. struct hlist_head error_targets; /* targets for net error distribution */
  262. struct work_struct error_distributor;
  263. struct rb_root service_conns; /* Service connections */
  264. struct list_head keepalive_link; /* Link in net->peer_keepalive[] */
  265. time64_t last_tx_at; /* Last time packet sent here */
  266. seqlock_t service_conn_lock;
  267. spinlock_t lock; /* access lock */
  268. unsigned int if_mtu; /* interface MTU for this peer */
  269. unsigned int mtu; /* network MTU for this peer */
  270. unsigned int maxdata; /* data size (MTU - hdrsize) */
  271. unsigned short hdrsize; /* header size (IP + UDP + RxRPC) */
  272. int debug_id; /* debug ID for printks */
  273. int error_report; /* Net (+0) or local (+1000000) to distribute */
  274. #define RXRPC_LOCAL_ERROR_OFFSET 1000000
  275. struct sockaddr_rxrpc srx; /* remote address */
  276. /* calculated RTT cache */
  277. #define RXRPC_RTT_CACHE_SIZE 32
  278. ktime_t rtt_last_req; /* Time of last RTT request */
  279. u64 rtt; /* Current RTT estimate (in nS) */
  280. u64 rtt_sum; /* Sum of cache contents */
  281. u64 rtt_cache[RXRPC_RTT_CACHE_SIZE]; /* Determined RTT cache */
  282. u8 rtt_cursor; /* next entry at which to insert */
  283. u8 rtt_usage; /* amount of cache actually used */
  284. u8 cong_cwnd; /* Congestion window size */
  285. };
  286. /*
  287. * Keys for matching a connection.
  288. */
  289. struct rxrpc_conn_proto {
  290. union {
  291. struct {
  292. u32 epoch; /* epoch of this connection */
  293. u32 cid; /* connection ID */
  294. };
  295. u64 index_key;
  296. };
  297. };
  298. struct rxrpc_conn_parameters {
  299. struct rxrpc_local *local; /* Representation of local endpoint */
  300. struct rxrpc_peer *peer; /* Remote endpoint */
  301. struct key *key; /* Security details */
  302. bool exclusive; /* T if conn is exclusive */
  303. bool upgrade; /* T if service ID can be upgraded */
  304. u16 service_id; /* Service ID for this connection */
  305. u32 security_level; /* Security level selected */
  306. };
  307. /*
  308. * Bits in the connection flags.
  309. */
  310. enum rxrpc_conn_flag {
  311. RXRPC_CONN_HAS_IDR, /* Has a client conn ID assigned */
  312. RXRPC_CONN_IN_SERVICE_CONNS, /* Conn is in peer->service_conns */
  313. RXRPC_CONN_IN_CLIENT_CONNS, /* Conn is in local->client_conns */
  314. RXRPC_CONN_EXPOSED, /* Conn has extra ref for exposure */
  315. RXRPC_CONN_DONT_REUSE, /* Don't reuse this connection */
  316. RXRPC_CONN_COUNTED, /* Counted by rxrpc_nr_client_conns */
  317. RXRPC_CONN_PROBING_FOR_UPGRADE, /* Probing for service upgrade */
  318. RXRPC_CONN_FINAL_ACK_0, /* Need final ACK for channel 0 */
  319. RXRPC_CONN_FINAL_ACK_1, /* Need final ACK for channel 1 */
  320. RXRPC_CONN_FINAL_ACK_2, /* Need final ACK for channel 2 */
  321. RXRPC_CONN_FINAL_ACK_3, /* Need final ACK for channel 3 */
  322. };
  323. #define RXRPC_CONN_FINAL_ACK_MASK ((1UL << RXRPC_CONN_FINAL_ACK_0) | \
  324. (1UL << RXRPC_CONN_FINAL_ACK_1) | \
  325. (1UL << RXRPC_CONN_FINAL_ACK_2) | \
  326. (1UL << RXRPC_CONN_FINAL_ACK_3))
  327. /*
  328. * Events that can be raised upon a connection.
  329. */
  330. enum rxrpc_conn_event {
  331. RXRPC_CONN_EV_CHALLENGE, /* Send challenge packet */
  332. };
  333. /*
  334. * The connection cache state.
  335. */
  336. enum rxrpc_conn_cache_state {
  337. RXRPC_CONN_CLIENT_INACTIVE, /* Conn is not yet listed */
  338. RXRPC_CONN_CLIENT_WAITING, /* Conn is on wait list, waiting for capacity */
  339. RXRPC_CONN_CLIENT_ACTIVE, /* Conn is on active list, doing calls */
  340. RXRPC_CONN_CLIENT_UPGRADE, /* Conn is on active list, probing for upgrade */
  341. RXRPC_CONN_CLIENT_CULLED, /* Conn is culled and delisted, doing calls */
  342. RXRPC_CONN_CLIENT_IDLE, /* Conn is on idle list, doing mostly nothing */
  343. RXRPC_CONN__NR_CACHE_STATES
  344. };
  345. /*
  346. * The connection protocol state.
  347. */
  348. enum rxrpc_conn_proto_state {
  349. RXRPC_CONN_UNUSED, /* Connection not yet attempted */
  350. RXRPC_CONN_CLIENT, /* Client connection */
  351. RXRPC_CONN_SERVICE_PREALLOC, /* Service connection preallocation */
  352. RXRPC_CONN_SERVICE_UNSECURED, /* Service unsecured connection */
  353. RXRPC_CONN_SERVICE_CHALLENGING, /* Service challenging for security */
  354. RXRPC_CONN_SERVICE, /* Service secured connection */
  355. RXRPC_CONN_REMOTELY_ABORTED, /* Conn aborted by peer */
  356. RXRPC_CONN_LOCALLY_ABORTED, /* Conn aborted locally */
  357. RXRPC_CONN__NR_STATES
  358. };
  359. /*
  360. * RxRPC connection definition
  361. * - matched by { local, peer, epoch, conn_id, direction }
  362. * - each connection can only handle four simultaneous calls
  363. */
  364. struct rxrpc_connection {
  365. struct rxrpc_conn_proto proto;
  366. struct rxrpc_conn_parameters params;
  367. atomic_t usage;
  368. struct rcu_head rcu;
  369. struct list_head cache_link;
  370. spinlock_t channel_lock;
  371. unsigned char active_chans; /* Mask of active channels */
  372. #define RXRPC_ACTIVE_CHANS_MASK ((1 << RXRPC_MAXCALLS) - 1)
  373. struct list_head waiting_calls; /* Calls waiting for channels */
  374. struct rxrpc_channel {
  375. unsigned long final_ack_at; /* Time at which to issue final ACK */
  376. struct rxrpc_call __rcu *call; /* Active call */
  377. unsigned int call_debug_id; /* call->debug_id */
  378. u32 call_id; /* ID of current call */
  379. u32 call_counter; /* Call ID counter */
  380. u32 last_call; /* ID of last call */
  381. u8 last_type; /* Type of last packet */
  382. union {
  383. u32 last_seq;
  384. u32 last_abort;
  385. };
  386. } channels[RXRPC_MAXCALLS];
  387. struct timer_list timer; /* Conn event timer */
  388. struct work_struct processor; /* connection event processor */
  389. union {
  390. struct rb_node client_node; /* Node in local->client_conns */
  391. struct rb_node service_node; /* Node in peer->service_conns */
  392. };
  393. struct list_head proc_link; /* link in procfs list */
  394. struct list_head link; /* link in master connection list */
  395. struct sk_buff_head rx_queue; /* received conn-level packets */
  396. const struct rxrpc_security *security; /* applied security module */
  397. struct key *server_key; /* security for this service */
  398. struct crypto_skcipher *cipher; /* encryption handle */
  399. struct rxrpc_crypt csum_iv; /* packet checksum base */
  400. unsigned long flags;
  401. unsigned long events;
  402. unsigned long idle_timestamp; /* Time at which last became idle */
  403. spinlock_t state_lock; /* state-change lock */
  404. enum rxrpc_conn_cache_state cache_state;
  405. enum rxrpc_conn_proto_state state; /* current state of connection */
  406. u32 local_abort; /* local abort code */
  407. u32 remote_abort; /* remote abort code */
  408. int debug_id; /* debug ID for printks */
  409. atomic_t serial; /* packet serial number counter */
  410. unsigned int hi_serial; /* highest serial number received */
  411. u32 security_nonce; /* response re-use preventer */
  412. u16 service_id; /* Service ID, possibly upgraded */
  413. u8 size_align; /* data size alignment (for security) */
  414. u8 security_size; /* security header size */
  415. u8 security_ix; /* security type */
  416. u8 out_clientflag; /* RXRPC_CLIENT_INITIATED if we are client */
  417. };
  418. /*
  419. * Flags in call->flags.
  420. */
  421. enum rxrpc_call_flag {
  422. RXRPC_CALL_RELEASED, /* call has been released - no more message to userspace */
  423. RXRPC_CALL_HAS_USERID, /* has a user ID attached */
  424. RXRPC_CALL_IS_SERVICE, /* Call is service call */
  425. RXRPC_CALL_EXPOSED, /* The call was exposed to the world */
  426. RXRPC_CALL_RX_LAST, /* Received the last packet (at rxtx_top) */
  427. RXRPC_CALL_TX_LAST, /* Last packet in Tx buffer (at rxtx_top) */
  428. RXRPC_CALL_TX_LASTQ, /* Last packet has been queued */
  429. RXRPC_CALL_SEND_PING, /* A ping will need to be sent */
  430. RXRPC_CALL_PINGING, /* Ping in process */
  431. RXRPC_CALL_RETRANS_TIMEOUT, /* Retransmission due to timeout occurred */
  432. RXRPC_CALL_BEGAN_RX_TIMER, /* We began the expect_rx_by timer */
  433. RXRPC_CALL_RX_HEARD, /* The peer responded at least once to this call */
  434. RXRPC_CALL_RX_UNDERRUN, /* Got data underrun */
  435. };
  436. /*
  437. * Events that can be raised on a call.
  438. */
  439. enum rxrpc_call_event {
  440. RXRPC_CALL_EV_ACK, /* need to generate ACK */
  441. RXRPC_CALL_EV_ABORT, /* need to generate abort */
  442. RXRPC_CALL_EV_RESEND, /* Tx resend required */
  443. RXRPC_CALL_EV_PING, /* Ping send required */
  444. RXRPC_CALL_EV_EXPIRED, /* Expiry occurred */
  445. RXRPC_CALL_EV_ACK_LOST, /* ACK may be lost, send ping */
  446. };
  447. /*
  448. * The states that a call can be in.
  449. */
  450. enum rxrpc_call_state {
  451. RXRPC_CALL_UNINITIALISED,
  452. RXRPC_CALL_CLIENT_AWAIT_CONN, /* - client waiting for connection to become available */
  453. RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
  454. RXRPC_CALL_CLIENT_AWAIT_REPLY, /* - client awaiting reply */
  455. RXRPC_CALL_CLIENT_RECV_REPLY, /* - client receiving reply phase */
  456. RXRPC_CALL_SERVER_PREALLOC, /* - service preallocation */
  457. RXRPC_CALL_SERVER_SECURING, /* - server securing request connection */
  458. RXRPC_CALL_SERVER_ACCEPTING, /* - server accepting request */
  459. RXRPC_CALL_SERVER_RECV_REQUEST, /* - server receiving request */
  460. RXRPC_CALL_SERVER_ACK_REQUEST, /* - server pending ACK of request */
  461. RXRPC_CALL_SERVER_SEND_REPLY, /* - server sending reply */
  462. RXRPC_CALL_SERVER_AWAIT_ACK, /* - server awaiting final ACK */
  463. RXRPC_CALL_COMPLETE, /* - call complete */
  464. NR__RXRPC_CALL_STATES
  465. };
  466. /*
  467. * Call Tx congestion management modes.
  468. */
  469. enum rxrpc_congest_mode {
  470. RXRPC_CALL_SLOW_START,
  471. RXRPC_CALL_CONGEST_AVOIDANCE,
  472. RXRPC_CALL_PACKET_LOSS,
  473. RXRPC_CALL_FAST_RETRANSMIT,
  474. NR__RXRPC_CONGEST_MODES
  475. };
  476. /*
  477. * RxRPC call definition
  478. * - matched by { connection, call_id }
  479. */
  480. struct rxrpc_call {
  481. struct rcu_head rcu;
  482. struct rxrpc_connection *conn; /* connection carrying call */
  483. struct rxrpc_peer *peer; /* Peer record for remote address */
  484. struct rxrpc_sock __rcu *socket; /* socket responsible */
  485. struct rxrpc_net *rxnet; /* Network namespace to which call belongs */
  486. struct mutex user_mutex; /* User access mutex */
  487. unsigned long ack_at; /* When deferred ACK needs to happen */
  488. unsigned long ack_lost_at; /* When ACK is figured as lost */
  489. unsigned long resend_at; /* When next resend needs to happen */
  490. unsigned long ping_at; /* When next to send a ping */
  491. unsigned long keepalive_at; /* When next to send a keepalive ping */
  492. unsigned long expect_rx_by; /* When we expect to get a packet by */
  493. unsigned long expect_req_by; /* When we expect to get a request DATA packet by */
  494. unsigned long expect_term_by; /* When we expect call termination by */
  495. u32 next_rx_timo; /* Timeout for next Rx packet (jif) */
  496. u32 next_req_timo; /* Timeout for next Rx request packet (jif) */
  497. struct timer_list timer; /* Combined event timer */
  498. struct work_struct processor; /* Event processor */
  499. rxrpc_notify_rx_t notify_rx; /* kernel service Rx notification function */
  500. struct list_head link; /* link in master call list */
  501. struct list_head chan_wait_link; /* Link in conn->waiting_calls */
  502. struct hlist_node error_link; /* link in error distribution list */
  503. struct list_head accept_link; /* Link in rx->acceptq */
  504. struct list_head recvmsg_link; /* Link in rx->recvmsg_q */
  505. struct list_head sock_link; /* Link in rx->sock_calls */
  506. struct rb_node sock_node; /* Node in rx->calls */
  507. struct sk_buff *tx_pending; /* Tx socket buffer being filled */
  508. wait_queue_head_t waitq; /* Wait queue for channel or Tx */
  509. s64 tx_total_len; /* Total length left to be transmitted (or -1) */
  510. __be32 crypto_buf[2]; /* Temporary packet crypto buffer */
  511. unsigned long user_call_ID; /* user-defined call ID */
  512. unsigned long flags;
  513. unsigned long events;
  514. spinlock_t lock;
  515. spinlock_t notify_lock; /* Kernel notification lock */
  516. rwlock_t state_lock; /* lock for state transition */
  517. u32 abort_code; /* Local/remote abort code */
  518. int error; /* Local error incurred */
  519. enum rxrpc_call_state state; /* current state of call */
  520. enum rxrpc_call_completion completion; /* Call completion condition */
  521. atomic_t usage;
  522. u16 service_id; /* service ID */
  523. u8 security_ix; /* Security type */
  524. u32 call_id; /* call ID on connection */
  525. u32 cid; /* connection ID plus channel index */
  526. int debug_id; /* debug ID for printks */
  527. unsigned short rx_pkt_offset; /* Current recvmsg packet offset */
  528. unsigned short rx_pkt_len; /* Current recvmsg packet len */
  529. /* Rx/Tx circular buffer, depending on phase.
  530. *
  531. * In the Rx phase, packets are annotated with 0 or the number of the
  532. * segment of a jumbo packet each buffer refers to. There can be up to
  533. * 47 segments in a maximum-size UDP packet.
  534. *
  535. * In the Tx phase, packets are annotated with which buffers have been
  536. * acked.
  537. */
  538. #define RXRPC_RXTX_BUFF_SIZE 64
  539. #define RXRPC_RXTX_BUFF_MASK (RXRPC_RXTX_BUFF_SIZE - 1)
  540. #define RXRPC_INIT_RX_WINDOW_SIZE 63
  541. struct sk_buff **rxtx_buffer;
  542. u8 *rxtx_annotations;
  543. #define RXRPC_TX_ANNO_ACK 0
  544. #define RXRPC_TX_ANNO_UNACK 1
  545. #define RXRPC_TX_ANNO_NAK 2
  546. #define RXRPC_TX_ANNO_RETRANS 3
  547. #define RXRPC_TX_ANNO_MASK 0x03
  548. #define RXRPC_TX_ANNO_LAST 0x04
  549. #define RXRPC_TX_ANNO_RESENT 0x08
  550. #define RXRPC_RX_ANNO_JUMBO 0x3f /* Jumbo subpacket number + 1 if not zero */
  551. #define RXRPC_RX_ANNO_JLAST 0x40 /* Set if last element of a jumbo packet */
  552. #define RXRPC_RX_ANNO_VERIFIED 0x80 /* Set if verified and decrypted */
  553. rxrpc_seq_t tx_hard_ack; /* Dead slot in buffer; the first transmitted but
  554. * not hard-ACK'd packet follows this.
  555. */
  556. rxrpc_seq_t tx_top; /* Highest Tx slot allocated. */
  557. /* TCP-style slow-start congestion control [RFC5681]. Since the SMSS
  558. * is fixed, we keep these numbers in terms of segments (ie. DATA
  559. * packets) rather than bytes.
  560. */
  561. #define RXRPC_TX_SMSS RXRPC_JUMBO_DATALEN
  562. u8 cong_cwnd; /* Congestion window size */
  563. u8 cong_extra; /* Extra to send for congestion management */
  564. u8 cong_ssthresh; /* Slow-start threshold */
  565. enum rxrpc_congest_mode cong_mode:8; /* Congestion management mode */
  566. u8 cong_dup_acks; /* Count of ACKs showing missing packets */
  567. u8 cong_cumul_acks; /* Cumulative ACK count */
  568. ktime_t cong_tstamp; /* Last time cwnd was changed */
  569. rxrpc_seq_t rx_hard_ack; /* Dead slot in buffer; the first received but not
  570. * consumed packet follows this.
  571. */
  572. rxrpc_seq_t rx_top; /* Highest Rx slot allocated. */
  573. rxrpc_seq_t rx_expect_next; /* Expected next packet sequence number */
  574. rxrpc_serial_t rx_serial; /* Highest serial received for this call */
  575. u8 rx_winsize; /* Size of Rx window */
  576. u8 tx_winsize; /* Maximum size of Tx window */
  577. bool tx_phase; /* T if transmission phase, F if receive phase */
  578. u8 nr_jumbo_bad; /* Number of jumbo dups/exceeds-windows */
  579. /* receive-phase ACK management */
  580. u8 ackr_reason; /* reason to ACK */
  581. u16 ackr_skew; /* skew on packet being ACK'd */
  582. rxrpc_serial_t ackr_serial; /* serial of packet being ACK'd */
  583. rxrpc_seq_t ackr_prev_seq; /* previous sequence number received */
  584. rxrpc_seq_t ackr_consumed; /* Highest packet shown consumed */
  585. rxrpc_seq_t ackr_seen; /* Highest packet shown seen */
  586. /* ping management */
  587. rxrpc_serial_t ping_serial; /* Last ping sent */
  588. ktime_t ping_time; /* Time last ping sent */
  589. /* transmission-phase ACK management */
  590. ktime_t acks_latest_ts; /* Timestamp of latest ACK received */
  591. rxrpc_serial_t acks_latest; /* serial number of latest ACK received */
  592. rxrpc_seq_t acks_lowest_nak; /* Lowest NACK in the buffer (or ==tx_hard_ack) */
  593. rxrpc_seq_t acks_lost_top; /* tx_top at the time lost-ack ping sent */
  594. rxrpc_serial_t acks_lost_ping; /* Serial number of probe ACK */
  595. };
  596. /*
  597. * Summary of a new ACK and the changes it made to the Tx buffer packet states.
  598. */
  599. struct rxrpc_ack_summary {
  600. u8 ack_reason;
  601. u8 nr_acks; /* Number of ACKs in packet */
  602. u8 nr_nacks; /* Number of NACKs in packet */
  603. u8 nr_new_acks; /* Number of new ACKs in packet */
  604. u8 nr_new_nacks; /* Number of new NACKs in packet */
  605. u8 nr_rot_new_acks; /* Number of rotated new ACKs */
  606. bool new_low_nack; /* T if new low NACK found */
  607. bool retrans_timeo; /* T if reTx due to timeout happened */
  608. u8 flight_size; /* Number of unreceived transmissions */
  609. /* Place to stash values for tracing */
  610. enum rxrpc_congest_mode mode:8;
  611. u8 cwnd;
  612. u8 ssthresh;
  613. u8 dup_acks;
  614. u8 cumulative_acks;
  615. };
  616. /*
  617. * sendmsg() cmsg-specified parameters.
  618. */
  619. enum rxrpc_command {
  620. RXRPC_CMD_SEND_DATA, /* send data message */
  621. RXRPC_CMD_SEND_ABORT, /* request abort generation */
  622. RXRPC_CMD_ACCEPT, /* [server] accept incoming call */
  623. RXRPC_CMD_REJECT_BUSY, /* [server] reject a call as busy */
  624. };
  625. struct rxrpc_call_params {
  626. s64 tx_total_len; /* Total Tx data length (if send data) */
  627. unsigned long user_call_ID; /* User's call ID */
  628. struct {
  629. u32 hard; /* Maximum lifetime (sec) */
  630. u32 idle; /* Max time since last data packet (msec) */
  631. u32 normal; /* Max time since last call packet (msec) */
  632. } timeouts;
  633. u8 nr_timeouts; /* Number of timeouts specified */
  634. };
  635. struct rxrpc_send_params {
  636. struct rxrpc_call_params call;
  637. u32 abort_code; /* Abort code to Tx (if abort) */
  638. enum rxrpc_command command : 8; /* The command to implement */
  639. bool exclusive; /* Shared or exclusive call */
  640. bool upgrade; /* If the connection is upgradeable */
  641. };
  642. #include <trace/events/rxrpc.h>
  643. /*
  644. * af_rxrpc.c
  645. */
  646. extern atomic_t rxrpc_n_tx_skbs, rxrpc_n_rx_skbs;
  647. extern struct workqueue_struct *rxrpc_workqueue;
  648. /*
  649. * call_accept.c
  650. */
  651. int rxrpc_service_prealloc(struct rxrpc_sock *, gfp_t);
  652. void rxrpc_discard_prealloc(struct rxrpc_sock *);
  653. struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *,
  654. struct rxrpc_connection *,
  655. struct sk_buff *);
  656. void rxrpc_accept_incoming_calls(struct rxrpc_local *);
  657. struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *, unsigned long,
  658. rxrpc_notify_rx_t);
  659. int rxrpc_reject_call(struct rxrpc_sock *);
  660. /*
  661. * call_event.c
  662. */
  663. void rxrpc_propose_ACK(struct rxrpc_call *, u8, u16, u32, bool, bool,
  664. enum rxrpc_propose_ack_trace);
  665. void rxrpc_process_call(struct work_struct *);
  666. static inline void rxrpc_reduce_call_timer(struct rxrpc_call *call,
  667. unsigned long expire_at,
  668. unsigned long now,
  669. enum rxrpc_timer_trace why)
  670. {
  671. trace_rxrpc_timer(call, why, now);
  672. timer_reduce(&call->timer, expire_at);
  673. }
  674. /*
  675. * call_object.c
  676. */
  677. extern const char *const rxrpc_call_states[];
  678. extern const char *const rxrpc_call_completions[];
  679. extern unsigned int rxrpc_max_call_lifetime;
  680. extern struct kmem_cache *rxrpc_call_jar;
  681. struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *, unsigned long);
  682. struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *, gfp_t, unsigned int);
  683. struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *,
  684. struct rxrpc_conn_parameters *,
  685. struct sockaddr_rxrpc *,
  686. struct rxrpc_call_params *, gfp_t,
  687. unsigned int);
  688. int rxrpc_retry_client_call(struct rxrpc_sock *,
  689. struct rxrpc_call *,
  690. struct rxrpc_conn_parameters *,
  691. struct sockaddr_rxrpc *,
  692. gfp_t);
  693. void rxrpc_incoming_call(struct rxrpc_sock *, struct rxrpc_call *,
  694. struct sk_buff *);
  695. void rxrpc_release_call(struct rxrpc_sock *, struct rxrpc_call *);
  696. int rxrpc_prepare_call_for_retry(struct rxrpc_sock *, struct rxrpc_call *);
  697. void rxrpc_release_calls_on_socket(struct rxrpc_sock *);
  698. bool __rxrpc_queue_call(struct rxrpc_call *);
  699. bool rxrpc_queue_call(struct rxrpc_call *);
  700. void rxrpc_see_call(struct rxrpc_call *);
  701. void rxrpc_get_call(struct rxrpc_call *, enum rxrpc_call_trace);
  702. void rxrpc_put_call(struct rxrpc_call *, enum rxrpc_call_trace);
  703. void rxrpc_cleanup_call(struct rxrpc_call *);
  704. void rxrpc_destroy_all_calls(struct rxrpc_net *);
  705. static inline bool rxrpc_is_service_call(const struct rxrpc_call *call)
  706. {
  707. return test_bit(RXRPC_CALL_IS_SERVICE, &call->flags);
  708. }
  709. static inline bool rxrpc_is_client_call(const struct rxrpc_call *call)
  710. {
  711. return !rxrpc_is_service_call(call);
  712. }
  713. /*
  714. * Transition a call to the complete state.
  715. */
  716. static inline bool __rxrpc_set_call_completion(struct rxrpc_call *call,
  717. enum rxrpc_call_completion compl,
  718. u32 abort_code,
  719. int error)
  720. {
  721. if (call->state < RXRPC_CALL_COMPLETE) {
  722. call->abort_code = abort_code;
  723. call->error = error;
  724. call->completion = compl,
  725. call->state = RXRPC_CALL_COMPLETE;
  726. trace_rxrpc_call_complete(call);
  727. wake_up(&call->waitq);
  728. return true;
  729. }
  730. return false;
  731. }
  732. static inline bool rxrpc_set_call_completion(struct rxrpc_call *call,
  733. enum rxrpc_call_completion compl,
  734. u32 abort_code,
  735. int error)
  736. {
  737. bool ret;
  738. write_lock_bh(&call->state_lock);
  739. ret = __rxrpc_set_call_completion(call, compl, abort_code, error);
  740. write_unlock_bh(&call->state_lock);
  741. return ret;
  742. }
  743. /*
  744. * Record that a call successfully completed.
  745. */
  746. static inline bool __rxrpc_call_completed(struct rxrpc_call *call)
  747. {
  748. return __rxrpc_set_call_completion(call, RXRPC_CALL_SUCCEEDED, 0, 0);
  749. }
  750. static inline bool rxrpc_call_completed(struct rxrpc_call *call)
  751. {
  752. bool ret;
  753. write_lock_bh(&call->state_lock);
  754. ret = __rxrpc_call_completed(call);
  755. write_unlock_bh(&call->state_lock);
  756. return ret;
  757. }
  758. /*
  759. * Record that a call is locally aborted.
  760. */
  761. static inline bool __rxrpc_abort_call(const char *why, struct rxrpc_call *call,
  762. rxrpc_seq_t seq,
  763. u32 abort_code, int error)
  764. {
  765. trace_rxrpc_abort(call->debug_id, why, call->cid, call->call_id, seq,
  766. abort_code, error);
  767. return __rxrpc_set_call_completion(call, RXRPC_CALL_LOCALLY_ABORTED,
  768. abort_code, error);
  769. }
  770. static inline bool rxrpc_abort_call(const char *why, struct rxrpc_call *call,
  771. rxrpc_seq_t seq, u32 abort_code, int error)
  772. {
  773. bool ret;
  774. write_lock_bh(&call->state_lock);
  775. ret = __rxrpc_abort_call(why, call, seq, abort_code, error);
  776. write_unlock_bh(&call->state_lock);
  777. return ret;
  778. }
  779. /*
  780. * Abort a call due to a protocol error.
  781. */
  782. static inline bool __rxrpc_abort_eproto(struct rxrpc_call *call,
  783. struct sk_buff *skb,
  784. const char *eproto_why,
  785. const char *why,
  786. u32 abort_code)
  787. {
  788. struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
  789. trace_rxrpc_rx_eproto(call, sp->hdr.serial, eproto_why);
  790. return rxrpc_abort_call(why, call, sp->hdr.seq, abort_code, -EPROTO);
  791. }
  792. #define rxrpc_abort_eproto(call, skb, eproto_why, abort_why, abort_code) \
  793. __rxrpc_abort_eproto((call), (skb), tracepoint_string(eproto_why), \
  794. (abort_why), (abort_code))
  795. /*
  796. * conn_client.c
  797. */
  798. extern unsigned int rxrpc_max_client_connections;
  799. extern unsigned int rxrpc_reap_client_connections;
  800. extern unsigned long rxrpc_conn_idle_client_expiry;
  801. extern unsigned long rxrpc_conn_idle_client_fast_expiry;
  802. extern struct idr rxrpc_client_conn_ids;
  803. void rxrpc_destroy_client_conn_ids(void);
  804. int rxrpc_connect_call(struct rxrpc_call *, struct rxrpc_conn_parameters *,
  805. struct sockaddr_rxrpc *, gfp_t);
  806. void rxrpc_expose_client_call(struct rxrpc_call *);
  807. void rxrpc_disconnect_client_call(struct rxrpc_call *);
  808. void rxrpc_put_client_conn(struct rxrpc_connection *);
  809. void rxrpc_discard_expired_client_conns(struct work_struct *);
  810. void rxrpc_destroy_all_client_connections(struct rxrpc_net *);
  811. /*
  812. * conn_event.c
  813. */
  814. void rxrpc_process_connection(struct work_struct *);
  815. /*
  816. * conn_object.c
  817. */
  818. extern unsigned int rxrpc_connection_expiry;
  819. extern unsigned int rxrpc_closed_conn_expiry;
  820. struct rxrpc_connection *rxrpc_alloc_connection(gfp_t);
  821. struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *,
  822. struct sk_buff *);
  823. void __rxrpc_disconnect_call(struct rxrpc_connection *, struct rxrpc_call *);
  824. void rxrpc_disconnect_call(struct rxrpc_call *);
  825. void rxrpc_kill_connection(struct rxrpc_connection *);
  826. bool rxrpc_queue_conn(struct rxrpc_connection *);
  827. void rxrpc_see_connection(struct rxrpc_connection *);
  828. void rxrpc_get_connection(struct rxrpc_connection *);
  829. struct rxrpc_connection *rxrpc_get_connection_maybe(struct rxrpc_connection *);
  830. void rxrpc_put_service_conn(struct rxrpc_connection *);
  831. void rxrpc_service_connection_reaper(struct work_struct *);
  832. void rxrpc_destroy_all_connections(struct rxrpc_net *);
  833. static inline bool rxrpc_conn_is_client(const struct rxrpc_connection *conn)
  834. {
  835. return conn->out_clientflag;
  836. }
  837. static inline bool rxrpc_conn_is_service(const struct rxrpc_connection *conn)
  838. {
  839. return !rxrpc_conn_is_client(conn);
  840. }
  841. static inline void rxrpc_put_connection(struct rxrpc_connection *conn)
  842. {
  843. if (!conn)
  844. return;
  845. if (rxrpc_conn_is_client(conn))
  846. rxrpc_put_client_conn(conn);
  847. else
  848. rxrpc_put_service_conn(conn);
  849. }
  850. static inline void rxrpc_reduce_conn_timer(struct rxrpc_connection *conn,
  851. unsigned long expire_at)
  852. {
  853. timer_reduce(&conn->timer, expire_at);
  854. }
  855. /*
  856. * conn_service.c
  857. */
  858. struct rxrpc_connection *rxrpc_find_service_conn_rcu(struct rxrpc_peer *,
  859. struct sk_buff *);
  860. struct rxrpc_connection *rxrpc_prealloc_service_connection(struct rxrpc_net *, gfp_t);
  861. void rxrpc_new_incoming_connection(struct rxrpc_sock *,
  862. struct rxrpc_connection *, struct sk_buff *);
  863. void rxrpc_unpublish_service_conn(struct rxrpc_connection *);
  864. /*
  865. * input.c
  866. */
  867. void rxrpc_data_ready(struct sock *);
  868. /*
  869. * insecure.c
  870. */
  871. extern const struct rxrpc_security rxrpc_no_security;
  872. /*
  873. * key.c
  874. */
  875. extern struct key_type key_type_rxrpc;
  876. extern struct key_type key_type_rxrpc_s;
  877. int rxrpc_request_key(struct rxrpc_sock *, char __user *, int);
  878. int rxrpc_server_keyring(struct rxrpc_sock *, char __user *, int);
  879. int rxrpc_get_server_data_key(struct rxrpc_connection *, const void *, time64_t,
  880. u32);
  881. /*
  882. * local_event.c
  883. */
  884. extern void rxrpc_process_local_events(struct rxrpc_local *);
  885. /*
  886. * local_object.c
  887. */
  888. struct rxrpc_local *rxrpc_lookup_local(struct net *, const struct sockaddr_rxrpc *);
  889. struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *);
  890. struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *);
  891. void rxrpc_put_local(struct rxrpc_local *);
  892. void rxrpc_queue_local(struct rxrpc_local *);
  893. void rxrpc_destroy_all_locals(struct rxrpc_net *);
  894. /*
  895. * misc.c
  896. */
  897. extern unsigned int rxrpc_max_backlog __read_mostly;
  898. extern unsigned long rxrpc_requested_ack_delay;
  899. extern unsigned long rxrpc_soft_ack_delay;
  900. extern unsigned long rxrpc_idle_ack_delay;
  901. extern unsigned int rxrpc_rx_window_size;
  902. extern unsigned int rxrpc_rx_mtu;
  903. extern unsigned int rxrpc_rx_jumbo_max;
  904. extern unsigned long rxrpc_resend_timeout;
  905. extern const s8 rxrpc_ack_priority[];
  906. /*
  907. * net_ns.c
  908. */
  909. extern unsigned int rxrpc_net_id;
  910. extern struct pernet_operations rxrpc_net_ops;
  911. static inline struct rxrpc_net *rxrpc_net(struct net *net)
  912. {
  913. return net_generic(net, rxrpc_net_id);
  914. }
  915. /*
  916. * output.c
  917. */
  918. int rxrpc_send_ack_packet(struct rxrpc_call *, bool, rxrpc_serial_t *);
  919. int rxrpc_send_abort_packet(struct rxrpc_call *);
  920. int rxrpc_send_data_packet(struct rxrpc_call *, struct sk_buff *, bool);
  921. void rxrpc_reject_packets(struct rxrpc_local *);
  922. void rxrpc_send_keepalive(struct rxrpc_peer *);
  923. /*
  924. * peer_event.c
  925. */
  926. void rxrpc_error_report(struct sock *);
  927. void rxrpc_peer_error_distributor(struct work_struct *);
  928. void rxrpc_peer_add_rtt(struct rxrpc_call *, enum rxrpc_rtt_rx_trace,
  929. rxrpc_serial_t, rxrpc_serial_t, ktime_t, ktime_t);
  930. void rxrpc_peer_keepalive_worker(struct work_struct *);
  931. /*
  932. * peer_object.c
  933. */
  934. struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *,
  935. const struct sockaddr_rxrpc *);
  936. struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *,
  937. struct sockaddr_rxrpc *, gfp_t);
  938. struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *, gfp_t);
  939. struct rxrpc_peer *rxrpc_lookup_incoming_peer(struct rxrpc_local *,
  940. struct rxrpc_peer *);
  941. void rxrpc_destroy_all_peers(struct rxrpc_net *);
  942. struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *);
  943. struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *);
  944. void rxrpc_put_peer(struct rxrpc_peer *);
  945. void __rxrpc_queue_peer_error(struct rxrpc_peer *);
  946. /*
  947. * proc.c
  948. */
  949. extern const struct seq_operations rxrpc_call_seq_ops;
  950. extern const struct seq_operations rxrpc_connection_seq_ops;
  951. /*
  952. * recvmsg.c
  953. */
  954. void rxrpc_notify_socket(struct rxrpc_call *);
  955. int rxrpc_recvmsg(struct socket *, struct msghdr *, size_t, int);
  956. /*
  957. * rxkad.c
  958. */
  959. #ifdef CONFIG_RXKAD
  960. extern const struct rxrpc_security rxkad;
  961. #endif
  962. /*
  963. * security.c
  964. */
  965. int __init rxrpc_init_security(void);
  966. void rxrpc_exit_security(void);
  967. int rxrpc_init_client_conn_security(struct rxrpc_connection *);
  968. int rxrpc_init_server_conn_security(struct rxrpc_connection *);
  969. /*
  970. * sendmsg.c
  971. */
  972. int rxrpc_do_sendmsg(struct rxrpc_sock *, struct msghdr *, size_t);
  973. /*
  974. * skbuff.c
  975. */
  976. void rxrpc_kernel_data_consumed(struct rxrpc_call *, struct sk_buff *);
  977. void rxrpc_packet_destructor(struct sk_buff *);
  978. void rxrpc_new_skb(struct sk_buff *, enum rxrpc_skb_trace);
  979. void rxrpc_see_skb(struct sk_buff *, enum rxrpc_skb_trace);
  980. void rxrpc_get_skb(struct sk_buff *, enum rxrpc_skb_trace);
  981. void rxrpc_free_skb(struct sk_buff *, enum rxrpc_skb_trace);
  982. void rxrpc_lose_skb(struct sk_buff *, enum rxrpc_skb_trace);
  983. void rxrpc_purge_queue(struct sk_buff_head *);
  984. /*
  985. * sysctl.c
  986. */
  987. #ifdef CONFIG_SYSCTL
  988. extern int __init rxrpc_sysctl_init(void);
  989. extern void rxrpc_sysctl_exit(void);
  990. #else
  991. static inline int __init rxrpc_sysctl_init(void) { return 0; }
  992. static inline void rxrpc_sysctl_exit(void) {}
  993. #endif
  994. /*
  995. * utils.c
  996. */
  997. int rxrpc_extract_addr_from_skb(struct rxrpc_local *, struct sockaddr_rxrpc *,
  998. struct sk_buff *);
  999. static inline bool before(u32 seq1, u32 seq2)
  1000. {
  1001. return (s32)(seq1 - seq2) < 0;
  1002. }
  1003. static inline bool before_eq(u32 seq1, u32 seq2)
  1004. {
  1005. return (s32)(seq1 - seq2) <= 0;
  1006. }
  1007. static inline bool after(u32 seq1, u32 seq2)
  1008. {
  1009. return (s32)(seq1 - seq2) > 0;
  1010. }
  1011. static inline bool after_eq(u32 seq1, u32 seq2)
  1012. {
  1013. return (s32)(seq1 - seq2) >= 0;
  1014. }
  1015. /*
  1016. * debug tracing
  1017. */
  1018. extern unsigned int rxrpc_debug;
  1019. #define dbgprintk(FMT,...) \
  1020. printk("[%-6.6s] "FMT"\n", current->comm ,##__VA_ARGS__)
  1021. #define kenter(FMT,...) dbgprintk("==> %s("FMT")",__func__ ,##__VA_ARGS__)
  1022. #define kleave(FMT,...) dbgprintk("<== %s()"FMT"",__func__ ,##__VA_ARGS__)
  1023. #define kdebug(FMT,...) dbgprintk(" "FMT ,##__VA_ARGS__)
  1024. #define kproto(FMT,...) dbgprintk("### "FMT ,##__VA_ARGS__)
  1025. #define knet(FMT,...) dbgprintk("@@@ "FMT ,##__VA_ARGS__)
  1026. #if defined(__KDEBUG)
  1027. #define _enter(FMT,...) kenter(FMT,##__VA_ARGS__)
  1028. #define _leave(FMT,...) kleave(FMT,##__VA_ARGS__)
  1029. #define _debug(FMT,...) kdebug(FMT,##__VA_ARGS__)
  1030. #define _proto(FMT,...) kproto(FMT,##__VA_ARGS__)
  1031. #define _net(FMT,...) knet(FMT,##__VA_ARGS__)
  1032. #elif defined(CONFIG_AF_RXRPC_DEBUG)
  1033. #define RXRPC_DEBUG_KENTER 0x01
  1034. #define RXRPC_DEBUG_KLEAVE 0x02
  1035. #define RXRPC_DEBUG_KDEBUG 0x04
  1036. #define RXRPC_DEBUG_KPROTO 0x08
  1037. #define RXRPC_DEBUG_KNET 0x10
  1038. #define _enter(FMT,...) \
  1039. do { \
  1040. if (unlikely(rxrpc_debug & RXRPC_DEBUG_KENTER)) \
  1041. kenter(FMT,##__VA_ARGS__); \
  1042. } while (0)
  1043. #define _leave(FMT,...) \
  1044. do { \
  1045. if (unlikely(rxrpc_debug & RXRPC_DEBUG_KLEAVE)) \
  1046. kleave(FMT,##__VA_ARGS__); \
  1047. } while (0)
  1048. #define _debug(FMT,...) \
  1049. do { \
  1050. if (unlikely(rxrpc_debug & RXRPC_DEBUG_KDEBUG)) \
  1051. kdebug(FMT,##__VA_ARGS__); \
  1052. } while (0)
  1053. #define _proto(FMT,...) \
  1054. do { \
  1055. if (unlikely(rxrpc_debug & RXRPC_DEBUG_KPROTO)) \
  1056. kproto(FMT,##__VA_ARGS__); \
  1057. } while (0)
  1058. #define _net(FMT,...) \
  1059. do { \
  1060. if (unlikely(rxrpc_debug & RXRPC_DEBUG_KNET)) \
  1061. knet(FMT,##__VA_ARGS__); \
  1062. } while (0)
  1063. #else
  1064. #define _enter(FMT,...) no_printk("==> %s("FMT")",__func__ ,##__VA_ARGS__)
  1065. #define _leave(FMT,...) no_printk("<== %s()"FMT"",__func__ ,##__VA_ARGS__)
  1066. #define _debug(FMT,...) no_printk(" "FMT ,##__VA_ARGS__)
  1067. #define _proto(FMT,...) no_printk("### "FMT ,##__VA_ARGS__)
  1068. #define _net(FMT,...) no_printk("@@@ "FMT ,##__VA_ARGS__)
  1069. #endif
  1070. /*
  1071. * debug assertion checking
  1072. */
  1073. #if 1 // defined(__KDEBUGALL)
  1074. #define ASSERT(X) \
  1075. do { \
  1076. if (unlikely(!(X))) { \
  1077. pr_err("Assertion failed\n"); \
  1078. BUG(); \
  1079. } \
  1080. } while (0)
  1081. #define ASSERTCMP(X, OP, Y) \
  1082. do { \
  1083. __typeof__(X) _x = (X); \
  1084. __typeof__(Y) _y = (__typeof__(X))(Y); \
  1085. if (unlikely(!(_x OP _y))) { \
  1086. pr_err("Assertion failed - %lu(0x%lx) %s %lu(0x%lx) is false\n", \
  1087. (unsigned long)_x, (unsigned long)_x, #OP, \
  1088. (unsigned long)_y, (unsigned long)_y); \
  1089. BUG(); \
  1090. } \
  1091. } while (0)
  1092. #define ASSERTIF(C, X) \
  1093. do { \
  1094. if (unlikely((C) && !(X))) { \
  1095. pr_err("Assertion failed\n"); \
  1096. BUG(); \
  1097. } \
  1098. } while (0)
  1099. #define ASSERTIFCMP(C, X, OP, Y) \
  1100. do { \
  1101. __typeof__(X) _x = (X); \
  1102. __typeof__(Y) _y = (__typeof__(X))(Y); \
  1103. if (unlikely((C) && !(_x OP _y))) { \
  1104. pr_err("Assertion failed - %lu(0x%lx) %s %lu(0x%lx) is false\n", \
  1105. (unsigned long)_x, (unsigned long)_x, #OP, \
  1106. (unsigned long)_y, (unsigned long)_y); \
  1107. BUG(); \
  1108. } \
  1109. } while (0)
  1110. #else
  1111. #define ASSERT(X) \
  1112. do { \
  1113. } while (0)
  1114. #define ASSERTCMP(X, OP, Y) \
  1115. do { \
  1116. } while (0)
  1117. #define ASSERTIF(C, X) \
  1118. do { \
  1119. } while (0)
  1120. #define ASSERTIFCMP(C, X, OP, Y) \
  1121. do { \
  1122. } while (0)
  1123. #endif /* __KDEBUGALL */