ar-internal.h 38 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207
  1. /* AF_RXRPC internal definitions
  2. *
  3. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <linux/atomic.h>
  12. #include <linux/seqlock.h>
  13. #include <net/net_namespace.h>
  14. #include <net/netns/generic.h>
  15. #include <net/sock.h>
  16. #include <net/af_rxrpc.h>
  17. #include "protocol.h"
  18. #if 0
  19. #define CHECK_SLAB_OKAY(X) \
  20. BUG_ON(atomic_read((X)) >> (sizeof(atomic_t) - 2) == \
  21. (POISON_FREE << 8 | POISON_FREE))
  22. #else
  23. #define CHECK_SLAB_OKAY(X) do {} while (0)
  24. #endif
  25. #define FCRYPT_BSIZE 8
  26. struct rxrpc_crypt {
  27. union {
  28. u8 x[FCRYPT_BSIZE];
  29. __be32 n[2];
  30. };
  31. } __attribute__((aligned(8)));
  32. #define rxrpc_queue_work(WS) queue_work(rxrpc_workqueue, (WS))
  33. #define rxrpc_queue_delayed_work(WS,D) \
  34. queue_delayed_work(rxrpc_workqueue, (WS), (D))
  35. struct rxrpc_connection;
  36. /*
  37. * Mark applied to socket buffers.
  38. */
  39. enum rxrpc_skb_mark {
  40. RXRPC_SKB_MARK_DATA, /* data message */
  41. RXRPC_SKB_MARK_FINAL_ACK, /* final ACK received message */
  42. RXRPC_SKB_MARK_BUSY, /* server busy message */
  43. RXRPC_SKB_MARK_REMOTE_ABORT, /* remote abort message */
  44. RXRPC_SKB_MARK_LOCAL_ABORT, /* local abort message */
  45. RXRPC_SKB_MARK_NET_ERROR, /* network error message */
  46. RXRPC_SKB_MARK_LOCAL_ERROR, /* local error message */
  47. RXRPC_SKB_MARK_NEW_CALL, /* local error message */
  48. };
  49. /*
  50. * sk_state for RxRPC sockets
  51. */
  52. enum {
  53. RXRPC_UNBOUND = 0,
  54. RXRPC_CLIENT_UNBOUND, /* Unbound socket used as client */
  55. RXRPC_CLIENT_BOUND, /* client local address bound */
  56. RXRPC_SERVER_BOUND, /* server local address bound */
  57. RXRPC_SERVER_BOUND2, /* second server local address bound */
  58. RXRPC_SERVER_LISTENING, /* server listening for connections */
  59. RXRPC_SERVER_LISTEN_DISABLED, /* server listening disabled */
  60. RXRPC_CLOSE, /* socket is being closed */
  61. };
  62. /*
  63. * Per-network namespace data.
  64. */
  65. struct rxrpc_net {
  66. struct proc_dir_entry *proc_net; /* Subdir in /proc/net */
  67. u32 epoch; /* Local epoch for detecting local-end reset */
  68. struct list_head calls; /* List of calls active in this namespace */
  69. rwlock_t call_lock; /* Lock for ->calls */
  70. struct list_head conn_proc_list; /* List of conns in this namespace for proc */
  71. struct list_head service_conns; /* Service conns in this namespace */
  72. rwlock_t conn_lock; /* Lock for ->conn_proc_list, ->service_conns */
  73. struct delayed_work service_conn_reaper;
  74. unsigned int nr_client_conns;
  75. unsigned int nr_active_client_conns;
  76. bool kill_all_client_conns;
  77. spinlock_t client_conn_cache_lock; /* Lock for ->*_client_conns */
  78. spinlock_t client_conn_discard_lock; /* Prevent multiple discarders */
  79. struct list_head waiting_client_conns;
  80. struct list_head active_client_conns;
  81. struct list_head idle_client_conns;
  82. struct delayed_work client_conn_reaper;
  83. struct list_head local_endpoints;
  84. struct mutex local_mutex; /* Lock for ->local_endpoints */
  85. spinlock_t peer_hash_lock; /* Lock for ->peer_hash */
  86. DECLARE_HASHTABLE (peer_hash, 10);
  87. };
  88. /*
  89. * Service backlog preallocation.
  90. *
  91. * This contains circular buffers of preallocated peers, connections and calls
  92. * for incoming service calls and their head and tail pointers. This allows
  93. * calls to be set up in the data_ready handler, thereby avoiding the need to
  94. * shuffle packets around so much.
  95. */
  96. struct rxrpc_backlog {
  97. unsigned short peer_backlog_head;
  98. unsigned short peer_backlog_tail;
  99. unsigned short conn_backlog_head;
  100. unsigned short conn_backlog_tail;
  101. unsigned short call_backlog_head;
  102. unsigned short call_backlog_tail;
  103. #define RXRPC_BACKLOG_MAX 32
  104. struct rxrpc_peer *peer_backlog[RXRPC_BACKLOG_MAX];
  105. struct rxrpc_connection *conn_backlog[RXRPC_BACKLOG_MAX];
  106. struct rxrpc_call *call_backlog[RXRPC_BACKLOG_MAX];
  107. };
  108. /*
  109. * RxRPC socket definition
  110. */
  111. struct rxrpc_sock {
  112. /* WARNING: sk has to be the first member */
  113. struct sock sk;
  114. rxrpc_notify_new_call_t notify_new_call; /* Func to notify of new call */
  115. rxrpc_discard_new_call_t discard_new_call; /* Func to discard a new call */
  116. struct rxrpc_local *local; /* local endpoint */
  117. struct rxrpc_backlog *backlog; /* Preallocation for services */
  118. spinlock_t incoming_lock; /* Incoming call vs service shutdown lock */
  119. struct list_head sock_calls; /* List of calls owned by this socket */
  120. struct list_head to_be_accepted; /* calls awaiting acceptance */
  121. struct list_head recvmsg_q; /* Calls awaiting recvmsg's attention */
  122. rwlock_t recvmsg_lock; /* Lock for recvmsg_q */
  123. struct key *key; /* security for this socket */
  124. struct key *securities; /* list of server security descriptors */
  125. struct rb_root calls; /* User ID -> call mapping */
  126. unsigned long flags;
  127. #define RXRPC_SOCK_CONNECTED 0 /* connect_srx is set */
  128. rwlock_t call_lock; /* lock for calls */
  129. u32 min_sec_level; /* minimum security level */
  130. #define RXRPC_SECURITY_MAX RXRPC_SECURITY_ENCRYPT
  131. bool exclusive; /* Exclusive connection for a client socket */
  132. u16 second_service; /* Additional service bound to the endpoint */
  133. struct {
  134. /* Service upgrade information */
  135. u16 from; /* Service ID to upgrade (if not 0) */
  136. u16 to; /* service ID to upgrade to */
  137. } service_upgrade;
  138. sa_family_t family; /* Protocol family created with */
  139. struct sockaddr_rxrpc srx; /* Primary Service/local addresses */
  140. struct sockaddr_rxrpc connect_srx; /* Default client address from connect() */
  141. };
  142. #define rxrpc_sk(__sk) container_of((__sk), struct rxrpc_sock, sk)
  143. /*
  144. * CPU-byteorder normalised Rx packet header.
  145. */
  146. struct rxrpc_host_header {
  147. u32 epoch; /* client boot timestamp */
  148. u32 cid; /* connection and channel ID */
  149. u32 callNumber; /* call ID (0 for connection-level packets) */
  150. u32 seq; /* sequence number of pkt in call stream */
  151. u32 serial; /* serial number of pkt sent to network */
  152. u8 type; /* packet type */
  153. u8 flags; /* packet flags */
  154. u8 userStatus; /* app-layer defined status */
  155. u8 securityIndex; /* security protocol ID */
  156. union {
  157. u16 _rsvd; /* reserved */
  158. u16 cksum; /* kerberos security checksum */
  159. };
  160. u16 serviceId; /* service ID */
  161. } __packed;
  162. /*
  163. * RxRPC socket buffer private variables
  164. * - max 48 bytes (struct sk_buff::cb)
  165. */
  166. struct rxrpc_skb_priv {
  167. union {
  168. u8 nr_jumbo; /* Number of jumbo subpackets */
  169. };
  170. union {
  171. int remain; /* amount of space remaining for next write */
  172. };
  173. struct rxrpc_host_header hdr; /* RxRPC packet header from this packet */
  174. };
  175. #define rxrpc_skb(__skb) ((struct rxrpc_skb_priv *) &(__skb)->cb)
  176. /*
  177. * RxRPC security module interface
  178. */
  179. struct rxrpc_security {
  180. const char *name; /* name of this service */
  181. u8 security_index; /* security type provided */
  182. /* Initialise a security service */
  183. int (*init)(void);
  184. /* Clean up a security service */
  185. void (*exit)(void);
  186. /* initialise a connection's security */
  187. int (*init_connection_security)(struct rxrpc_connection *);
  188. /* prime a connection's packet security */
  189. int (*prime_packet_security)(struct rxrpc_connection *);
  190. /* impose security on a packet */
  191. int (*secure_packet)(struct rxrpc_call *,
  192. struct sk_buff *,
  193. size_t,
  194. void *);
  195. /* verify the security on a received packet */
  196. int (*verify_packet)(struct rxrpc_call *, struct sk_buff *,
  197. unsigned int, unsigned int, rxrpc_seq_t, u16);
  198. /* Locate the data in a received packet that has been verified. */
  199. void (*locate_data)(struct rxrpc_call *, struct sk_buff *,
  200. unsigned int *, unsigned int *);
  201. /* issue a challenge */
  202. int (*issue_challenge)(struct rxrpc_connection *);
  203. /* respond to a challenge */
  204. int (*respond_to_challenge)(struct rxrpc_connection *,
  205. struct sk_buff *,
  206. u32 *);
  207. /* verify a response */
  208. int (*verify_response)(struct rxrpc_connection *,
  209. struct sk_buff *,
  210. u32 *);
  211. /* clear connection security */
  212. void (*clear)(struct rxrpc_connection *);
  213. };
  214. /*
  215. * RxRPC local transport endpoint description
  216. * - owned by a single AF_RXRPC socket
  217. * - pointed to by transport socket struct sk_user_data
  218. */
  219. struct rxrpc_local {
  220. struct rcu_head rcu;
  221. atomic_t usage;
  222. struct rxrpc_net *rxnet; /* The network ns in which this resides */
  223. struct list_head link;
  224. struct socket *socket; /* my UDP socket */
  225. struct work_struct processor;
  226. struct rxrpc_sock __rcu *service; /* Service(s) listening on this endpoint */
  227. struct rw_semaphore defrag_sem; /* control re-enablement of IP DF bit */
  228. struct sk_buff_head reject_queue; /* packets awaiting rejection */
  229. struct sk_buff_head event_queue; /* endpoint event packets awaiting processing */
  230. struct rb_root client_conns; /* Client connections by socket params */
  231. spinlock_t client_conns_lock; /* Lock for client_conns */
  232. spinlock_t lock; /* access lock */
  233. rwlock_t services_lock; /* lock for services list */
  234. int debug_id; /* debug ID for printks */
  235. bool dead;
  236. struct sockaddr_rxrpc srx; /* local address */
  237. };
  238. /*
  239. * RxRPC remote transport endpoint definition
  240. * - matched by local endpoint, remote port, address and protocol type
  241. */
  242. struct rxrpc_peer {
  243. struct rcu_head rcu; /* This must be first */
  244. atomic_t usage;
  245. unsigned long hash_key;
  246. struct hlist_node hash_link;
  247. struct rxrpc_local *local;
  248. struct hlist_head error_targets; /* targets for net error distribution */
  249. struct work_struct error_distributor;
  250. struct rb_root service_conns; /* Service connections */
  251. seqlock_t service_conn_lock;
  252. spinlock_t lock; /* access lock */
  253. unsigned int if_mtu; /* interface MTU for this peer */
  254. unsigned int mtu; /* network MTU for this peer */
  255. unsigned int maxdata; /* data size (MTU - hdrsize) */
  256. unsigned short hdrsize; /* header size (IP + UDP + RxRPC) */
  257. int debug_id; /* debug ID for printks */
  258. int error_report; /* Net (+0) or local (+1000000) to distribute */
  259. #define RXRPC_LOCAL_ERROR_OFFSET 1000000
  260. struct sockaddr_rxrpc srx; /* remote address */
  261. /* calculated RTT cache */
  262. #define RXRPC_RTT_CACHE_SIZE 32
  263. ktime_t rtt_last_req; /* Time of last RTT request */
  264. u64 rtt; /* Current RTT estimate (in nS) */
  265. u64 rtt_sum; /* Sum of cache contents */
  266. u64 rtt_cache[RXRPC_RTT_CACHE_SIZE]; /* Determined RTT cache */
  267. u8 rtt_cursor; /* next entry at which to insert */
  268. u8 rtt_usage; /* amount of cache actually used */
  269. u8 cong_cwnd; /* Congestion window size */
  270. };
  271. /*
  272. * Keys for matching a connection.
  273. */
  274. struct rxrpc_conn_proto {
  275. union {
  276. struct {
  277. u32 epoch; /* epoch of this connection */
  278. u32 cid; /* connection ID */
  279. };
  280. u64 index_key;
  281. };
  282. };
  283. struct rxrpc_conn_parameters {
  284. struct rxrpc_local *local; /* Representation of local endpoint */
  285. struct rxrpc_peer *peer; /* Remote endpoint */
  286. struct key *key; /* Security details */
  287. bool exclusive; /* T if conn is exclusive */
  288. bool upgrade; /* T if service ID can be upgraded */
  289. u16 service_id; /* Service ID for this connection */
  290. u32 security_level; /* Security level selected */
  291. };
  292. /*
  293. * Bits in the connection flags.
  294. */
  295. enum rxrpc_conn_flag {
  296. RXRPC_CONN_HAS_IDR, /* Has a client conn ID assigned */
  297. RXRPC_CONN_IN_SERVICE_CONNS, /* Conn is in peer->service_conns */
  298. RXRPC_CONN_IN_CLIENT_CONNS, /* Conn is in local->client_conns */
  299. RXRPC_CONN_EXPOSED, /* Conn has extra ref for exposure */
  300. RXRPC_CONN_DONT_REUSE, /* Don't reuse this connection */
  301. RXRPC_CONN_COUNTED, /* Counted by rxrpc_nr_client_conns */
  302. RXRPC_CONN_PROBING_FOR_UPGRADE, /* Probing for service upgrade */
  303. };
  304. /*
  305. * Events that can be raised upon a connection.
  306. */
  307. enum rxrpc_conn_event {
  308. RXRPC_CONN_EV_CHALLENGE, /* Send challenge packet */
  309. };
  310. /*
  311. * The connection cache state.
  312. */
  313. enum rxrpc_conn_cache_state {
  314. RXRPC_CONN_CLIENT_INACTIVE, /* Conn is not yet listed */
  315. RXRPC_CONN_CLIENT_WAITING, /* Conn is on wait list, waiting for capacity */
  316. RXRPC_CONN_CLIENT_ACTIVE, /* Conn is on active list, doing calls */
  317. RXRPC_CONN_CLIENT_UPGRADE, /* Conn is on active list, probing for upgrade */
  318. RXRPC_CONN_CLIENT_CULLED, /* Conn is culled and delisted, doing calls */
  319. RXRPC_CONN_CLIENT_IDLE, /* Conn is on idle list, doing mostly nothing */
  320. RXRPC_CONN__NR_CACHE_STATES
  321. };
  322. /*
  323. * The connection protocol state.
  324. */
  325. enum rxrpc_conn_proto_state {
  326. RXRPC_CONN_UNUSED, /* Connection not yet attempted */
  327. RXRPC_CONN_CLIENT, /* Client connection */
  328. RXRPC_CONN_SERVICE_PREALLOC, /* Service connection preallocation */
  329. RXRPC_CONN_SERVICE_UNSECURED, /* Service unsecured connection */
  330. RXRPC_CONN_SERVICE_CHALLENGING, /* Service challenging for security */
  331. RXRPC_CONN_SERVICE, /* Service secured connection */
  332. RXRPC_CONN_REMOTELY_ABORTED, /* Conn aborted by peer */
  333. RXRPC_CONN_LOCALLY_ABORTED, /* Conn aborted locally */
  334. RXRPC_CONN__NR_STATES
  335. };
  336. /*
  337. * RxRPC connection definition
  338. * - matched by { local, peer, epoch, conn_id, direction }
  339. * - each connection can only handle four simultaneous calls
  340. */
  341. struct rxrpc_connection {
  342. struct rxrpc_conn_proto proto;
  343. struct rxrpc_conn_parameters params;
  344. atomic_t usage;
  345. struct rcu_head rcu;
  346. struct list_head cache_link;
  347. spinlock_t channel_lock;
  348. unsigned char active_chans; /* Mask of active channels */
  349. #define RXRPC_ACTIVE_CHANS_MASK ((1 << RXRPC_MAXCALLS) - 1)
  350. struct list_head waiting_calls; /* Calls waiting for channels */
  351. struct rxrpc_channel {
  352. struct rxrpc_call __rcu *call; /* Active call */
  353. u32 call_id; /* ID of current call */
  354. u32 call_counter; /* Call ID counter */
  355. u32 last_call; /* ID of last call */
  356. u8 last_type; /* Type of last packet */
  357. union {
  358. u32 last_seq;
  359. u32 last_abort;
  360. };
  361. } channels[RXRPC_MAXCALLS];
  362. struct work_struct processor; /* connection event processor */
  363. union {
  364. struct rb_node client_node; /* Node in local->client_conns */
  365. struct rb_node service_node; /* Node in peer->service_conns */
  366. };
  367. struct list_head proc_link; /* link in procfs list */
  368. struct list_head link; /* link in master connection list */
  369. struct sk_buff_head rx_queue; /* received conn-level packets */
  370. const struct rxrpc_security *security; /* applied security module */
  371. struct key *server_key; /* security for this service */
  372. struct crypto_skcipher *cipher; /* encryption handle */
  373. struct rxrpc_crypt csum_iv; /* packet checksum base */
  374. unsigned long flags;
  375. unsigned long events;
  376. unsigned long idle_timestamp; /* Time at which last became idle */
  377. spinlock_t state_lock; /* state-change lock */
  378. enum rxrpc_conn_cache_state cache_state;
  379. enum rxrpc_conn_proto_state state; /* current state of connection */
  380. u32 local_abort; /* local abort code */
  381. u32 remote_abort; /* remote abort code */
  382. int debug_id; /* debug ID for printks */
  383. atomic_t serial; /* packet serial number counter */
  384. unsigned int hi_serial; /* highest serial number received */
  385. u32 security_nonce; /* response re-use preventer */
  386. u16 service_id; /* Service ID, possibly upgraded */
  387. u8 size_align; /* data size alignment (for security) */
  388. u8 security_size; /* security header size */
  389. u8 security_ix; /* security type */
  390. u8 out_clientflag; /* RXRPC_CLIENT_INITIATED if we are client */
  391. };
  392. /*
  393. * Flags in call->flags.
  394. */
  395. enum rxrpc_call_flag {
  396. RXRPC_CALL_RELEASED, /* call has been released - no more message to userspace */
  397. RXRPC_CALL_HAS_USERID, /* has a user ID attached */
  398. RXRPC_CALL_IS_SERVICE, /* Call is service call */
  399. RXRPC_CALL_EXPOSED, /* The call was exposed to the world */
  400. RXRPC_CALL_RX_LAST, /* Received the last packet (at rxtx_top) */
  401. RXRPC_CALL_TX_LAST, /* Last packet in Tx buffer (at rxtx_top) */
  402. RXRPC_CALL_TX_LASTQ, /* Last packet has been queued */
  403. RXRPC_CALL_SEND_PING, /* A ping will need to be sent */
  404. RXRPC_CALL_PINGING, /* Ping in process */
  405. RXRPC_CALL_RETRANS_TIMEOUT, /* Retransmission due to timeout occurred */
  406. };
  407. /*
  408. * Events that can be raised on a call.
  409. */
  410. enum rxrpc_call_event {
  411. RXRPC_CALL_EV_ACK, /* need to generate ACK */
  412. RXRPC_CALL_EV_ABORT, /* need to generate abort */
  413. RXRPC_CALL_EV_TIMER, /* Timer expired */
  414. RXRPC_CALL_EV_RESEND, /* Tx resend required */
  415. RXRPC_CALL_EV_PING, /* Ping send required */
  416. };
  417. /*
  418. * The states that a call can be in.
  419. */
  420. enum rxrpc_call_state {
  421. RXRPC_CALL_UNINITIALISED,
  422. RXRPC_CALL_CLIENT_AWAIT_CONN, /* - client waiting for connection to become available */
  423. RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
  424. RXRPC_CALL_CLIENT_AWAIT_REPLY, /* - client awaiting reply */
  425. RXRPC_CALL_CLIENT_RECV_REPLY, /* - client receiving reply phase */
  426. RXRPC_CALL_SERVER_PREALLOC, /* - service preallocation */
  427. RXRPC_CALL_SERVER_SECURING, /* - server securing request connection */
  428. RXRPC_CALL_SERVER_ACCEPTING, /* - server accepting request */
  429. RXRPC_CALL_SERVER_RECV_REQUEST, /* - server receiving request */
  430. RXRPC_CALL_SERVER_ACK_REQUEST, /* - server pending ACK of request */
  431. RXRPC_CALL_SERVER_SEND_REPLY, /* - server sending reply */
  432. RXRPC_CALL_SERVER_AWAIT_ACK, /* - server awaiting final ACK */
  433. RXRPC_CALL_COMPLETE, /* - call complete */
  434. NR__RXRPC_CALL_STATES
  435. };
  436. /*
  437. * Call Tx congestion management modes.
  438. */
  439. enum rxrpc_congest_mode {
  440. RXRPC_CALL_SLOW_START,
  441. RXRPC_CALL_CONGEST_AVOIDANCE,
  442. RXRPC_CALL_PACKET_LOSS,
  443. RXRPC_CALL_FAST_RETRANSMIT,
  444. NR__RXRPC_CONGEST_MODES
  445. };
  446. /*
  447. * RxRPC call definition
  448. * - matched by { connection, call_id }
  449. */
  450. struct rxrpc_call {
  451. struct rcu_head rcu;
  452. struct rxrpc_connection *conn; /* connection carrying call */
  453. struct rxrpc_peer *peer; /* Peer record for remote address */
  454. struct rxrpc_sock __rcu *socket; /* socket responsible */
  455. struct mutex user_mutex; /* User access mutex */
  456. ktime_t ack_at; /* When deferred ACK needs to happen */
  457. ktime_t resend_at; /* When next resend needs to happen */
  458. ktime_t ping_at; /* When next to send a ping */
  459. ktime_t expire_at; /* When the call times out */
  460. struct timer_list timer; /* Combined event timer */
  461. struct work_struct processor; /* Event processor */
  462. rxrpc_notify_rx_t notify_rx; /* kernel service Rx notification function */
  463. struct list_head link; /* link in master call list */
  464. struct list_head chan_wait_link; /* Link in conn->waiting_calls */
  465. struct hlist_node error_link; /* link in error distribution list */
  466. struct list_head accept_link; /* Link in rx->acceptq */
  467. struct list_head recvmsg_link; /* Link in rx->recvmsg_q */
  468. struct list_head sock_link; /* Link in rx->sock_calls */
  469. struct rb_node sock_node; /* Node in rx->calls */
  470. struct sk_buff *tx_pending; /* Tx socket buffer being filled */
  471. wait_queue_head_t waitq; /* Wait queue for channel or Tx */
  472. s64 tx_total_len; /* Total length left to be transmitted (or -1) */
  473. __be32 crypto_buf[2]; /* Temporary packet crypto buffer */
  474. unsigned long user_call_ID; /* user-defined call ID */
  475. unsigned long flags;
  476. unsigned long events;
  477. spinlock_t lock;
  478. rwlock_t state_lock; /* lock for state transition */
  479. u32 abort_code; /* Local/remote abort code */
  480. int error; /* Local error incurred */
  481. enum rxrpc_call_state state; /* current state of call */
  482. enum rxrpc_call_completion completion; /* Call completion condition */
  483. atomic_t usage;
  484. u16 service_id; /* service ID */
  485. u8 security_ix; /* Security type */
  486. u32 call_id; /* call ID on connection */
  487. u32 cid; /* connection ID plus channel index */
  488. int debug_id; /* debug ID for printks */
  489. unsigned short rx_pkt_offset; /* Current recvmsg packet offset */
  490. unsigned short rx_pkt_len; /* Current recvmsg packet len */
  491. /* Rx/Tx circular buffer, depending on phase.
  492. *
  493. * In the Rx phase, packets are annotated with 0 or the number of the
  494. * segment of a jumbo packet each buffer refers to. There can be up to
  495. * 47 segments in a maximum-size UDP packet.
  496. *
  497. * In the Tx phase, packets are annotated with which buffers have been
  498. * acked.
  499. */
  500. #define RXRPC_RXTX_BUFF_SIZE 64
  501. #define RXRPC_RXTX_BUFF_MASK (RXRPC_RXTX_BUFF_SIZE - 1)
  502. #define RXRPC_INIT_RX_WINDOW_SIZE 32
  503. struct sk_buff **rxtx_buffer;
  504. u8 *rxtx_annotations;
  505. #define RXRPC_TX_ANNO_ACK 0
  506. #define RXRPC_TX_ANNO_UNACK 1
  507. #define RXRPC_TX_ANNO_NAK 2
  508. #define RXRPC_TX_ANNO_RETRANS 3
  509. #define RXRPC_TX_ANNO_MASK 0x03
  510. #define RXRPC_TX_ANNO_LAST 0x04
  511. #define RXRPC_TX_ANNO_RESENT 0x08
  512. #define RXRPC_RX_ANNO_JUMBO 0x3f /* Jumbo subpacket number + 1 if not zero */
  513. #define RXRPC_RX_ANNO_JLAST 0x40 /* Set if last element of a jumbo packet */
  514. #define RXRPC_RX_ANNO_VERIFIED 0x80 /* Set if verified and decrypted */
  515. rxrpc_seq_t tx_hard_ack; /* Dead slot in buffer; the first transmitted but
  516. * not hard-ACK'd packet follows this.
  517. */
  518. rxrpc_seq_t tx_top; /* Highest Tx slot allocated. */
  519. /* TCP-style slow-start congestion control [RFC5681]. Since the SMSS
  520. * is fixed, we keep these numbers in terms of segments (ie. DATA
  521. * packets) rather than bytes.
  522. */
  523. #define RXRPC_TX_SMSS RXRPC_JUMBO_DATALEN
  524. u8 cong_cwnd; /* Congestion window size */
  525. u8 cong_extra; /* Extra to send for congestion management */
  526. u8 cong_ssthresh; /* Slow-start threshold */
  527. enum rxrpc_congest_mode cong_mode:8; /* Congestion management mode */
  528. u8 cong_dup_acks; /* Count of ACKs showing missing packets */
  529. u8 cong_cumul_acks; /* Cumulative ACK count */
  530. ktime_t cong_tstamp; /* Last time cwnd was changed */
  531. rxrpc_seq_t rx_hard_ack; /* Dead slot in buffer; the first received but not
  532. * consumed packet follows this.
  533. */
  534. rxrpc_seq_t rx_top; /* Highest Rx slot allocated. */
  535. rxrpc_seq_t rx_expect_next; /* Expected next packet sequence number */
  536. u8 rx_winsize; /* Size of Rx window */
  537. u8 tx_winsize; /* Maximum size of Tx window */
  538. bool tx_phase; /* T if transmission phase, F if receive phase */
  539. u8 nr_jumbo_bad; /* Number of jumbo dups/exceeds-windows */
  540. /* receive-phase ACK management */
  541. u8 ackr_reason; /* reason to ACK */
  542. u16 ackr_skew; /* skew on packet being ACK'd */
  543. rxrpc_serial_t ackr_serial; /* serial of packet being ACK'd */
  544. rxrpc_seq_t ackr_prev_seq; /* previous sequence number received */
  545. rxrpc_seq_t ackr_consumed; /* Highest packet shown consumed */
  546. rxrpc_seq_t ackr_seen; /* Highest packet shown seen */
  547. /* ping management */
  548. rxrpc_serial_t ping_serial; /* Last ping sent */
  549. ktime_t ping_time; /* Time last ping sent */
  550. /* transmission-phase ACK management */
  551. ktime_t acks_latest_ts; /* Timestamp of latest ACK received */
  552. rxrpc_serial_t acks_latest; /* serial number of latest ACK received */
  553. rxrpc_seq_t acks_lowest_nak; /* Lowest NACK in the buffer (or ==tx_hard_ack) */
  554. };
  555. /*
  556. * Summary of a new ACK and the changes it made to the Tx buffer packet states.
  557. */
  558. struct rxrpc_ack_summary {
  559. u8 ack_reason;
  560. u8 nr_acks; /* Number of ACKs in packet */
  561. u8 nr_nacks; /* Number of NACKs in packet */
  562. u8 nr_new_acks; /* Number of new ACKs in packet */
  563. u8 nr_new_nacks; /* Number of new NACKs in packet */
  564. u8 nr_rot_new_acks; /* Number of rotated new ACKs */
  565. bool new_low_nack; /* T if new low NACK found */
  566. bool retrans_timeo; /* T if reTx due to timeout happened */
  567. u8 flight_size; /* Number of unreceived transmissions */
  568. /* Place to stash values for tracing */
  569. enum rxrpc_congest_mode mode:8;
  570. u8 cwnd;
  571. u8 ssthresh;
  572. u8 dup_acks;
  573. u8 cumulative_acks;
  574. };
  575. #include <trace/events/rxrpc.h>
  576. /*
  577. * af_rxrpc.c
  578. */
  579. extern atomic_t rxrpc_n_tx_skbs, rxrpc_n_rx_skbs;
  580. extern atomic_t rxrpc_debug_id;
  581. extern struct workqueue_struct *rxrpc_workqueue;
  582. /*
  583. * call_accept.c
  584. */
  585. int rxrpc_service_prealloc(struct rxrpc_sock *, gfp_t);
  586. void rxrpc_discard_prealloc(struct rxrpc_sock *);
  587. struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *,
  588. struct rxrpc_connection *,
  589. struct sk_buff *);
  590. void rxrpc_accept_incoming_calls(struct rxrpc_local *);
  591. struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *, unsigned long,
  592. rxrpc_notify_rx_t);
  593. int rxrpc_reject_call(struct rxrpc_sock *);
  594. /*
  595. * call_event.c
  596. */
  597. void __rxrpc_set_timer(struct rxrpc_call *, enum rxrpc_timer_trace, ktime_t);
  598. void rxrpc_set_timer(struct rxrpc_call *, enum rxrpc_timer_trace, ktime_t);
  599. void rxrpc_propose_ACK(struct rxrpc_call *, u8, u16, u32, bool, bool,
  600. enum rxrpc_propose_ack_trace);
  601. void rxrpc_process_call(struct work_struct *);
  602. /*
  603. * call_object.c
  604. */
  605. extern const char *const rxrpc_call_states[];
  606. extern const char *const rxrpc_call_completions[];
  607. extern unsigned int rxrpc_max_call_lifetime;
  608. extern struct kmem_cache *rxrpc_call_jar;
  609. struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *, unsigned long);
  610. struct rxrpc_call *rxrpc_alloc_call(gfp_t);
  611. struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *,
  612. struct rxrpc_conn_parameters *,
  613. struct sockaddr_rxrpc *,
  614. unsigned long, s64, gfp_t);
  615. int rxrpc_retry_client_call(struct rxrpc_sock *,
  616. struct rxrpc_call *,
  617. struct rxrpc_conn_parameters *,
  618. struct sockaddr_rxrpc *,
  619. gfp_t);
  620. void rxrpc_incoming_call(struct rxrpc_sock *, struct rxrpc_call *,
  621. struct sk_buff *);
  622. void rxrpc_release_call(struct rxrpc_sock *, struct rxrpc_call *);
  623. int rxrpc_prepare_call_for_retry(struct rxrpc_sock *, struct rxrpc_call *);
  624. void rxrpc_release_calls_on_socket(struct rxrpc_sock *);
  625. bool __rxrpc_queue_call(struct rxrpc_call *);
  626. bool rxrpc_queue_call(struct rxrpc_call *);
  627. void rxrpc_see_call(struct rxrpc_call *);
  628. void rxrpc_get_call(struct rxrpc_call *, enum rxrpc_call_trace);
  629. void rxrpc_put_call(struct rxrpc_call *, enum rxrpc_call_trace);
  630. void rxrpc_cleanup_call(struct rxrpc_call *);
  631. void rxrpc_destroy_all_calls(struct rxrpc_net *);
  632. static inline bool rxrpc_is_service_call(const struct rxrpc_call *call)
  633. {
  634. return test_bit(RXRPC_CALL_IS_SERVICE, &call->flags);
  635. }
  636. static inline bool rxrpc_is_client_call(const struct rxrpc_call *call)
  637. {
  638. return !rxrpc_is_service_call(call);
  639. }
  640. /*
  641. * Transition a call to the complete state.
  642. */
  643. static inline bool __rxrpc_set_call_completion(struct rxrpc_call *call,
  644. enum rxrpc_call_completion compl,
  645. u32 abort_code,
  646. int error)
  647. {
  648. if (call->state < RXRPC_CALL_COMPLETE) {
  649. call->abort_code = abort_code;
  650. call->error = error;
  651. call->completion = compl,
  652. call->state = RXRPC_CALL_COMPLETE;
  653. wake_up(&call->waitq);
  654. return true;
  655. }
  656. return false;
  657. }
  658. static inline bool rxrpc_set_call_completion(struct rxrpc_call *call,
  659. enum rxrpc_call_completion compl,
  660. u32 abort_code,
  661. int error)
  662. {
  663. bool ret;
  664. write_lock_bh(&call->state_lock);
  665. ret = __rxrpc_set_call_completion(call, compl, abort_code, error);
  666. write_unlock_bh(&call->state_lock);
  667. return ret;
  668. }
  669. /*
  670. * Record that a call successfully completed.
  671. */
  672. static inline bool __rxrpc_call_completed(struct rxrpc_call *call)
  673. {
  674. return __rxrpc_set_call_completion(call, RXRPC_CALL_SUCCEEDED, 0, 0);
  675. }
  676. static inline bool rxrpc_call_completed(struct rxrpc_call *call)
  677. {
  678. bool ret;
  679. write_lock_bh(&call->state_lock);
  680. ret = __rxrpc_call_completed(call);
  681. write_unlock_bh(&call->state_lock);
  682. return ret;
  683. }
  684. /*
  685. * Record that a call is locally aborted.
  686. */
  687. static inline bool __rxrpc_abort_call(const char *why, struct rxrpc_call *call,
  688. rxrpc_seq_t seq,
  689. u32 abort_code, int error)
  690. {
  691. trace_rxrpc_abort(why, call->cid, call->call_id, seq,
  692. abort_code, error);
  693. return __rxrpc_set_call_completion(call, RXRPC_CALL_LOCALLY_ABORTED,
  694. abort_code, error);
  695. }
  696. static inline bool rxrpc_abort_call(const char *why, struct rxrpc_call *call,
  697. rxrpc_seq_t seq, u32 abort_code, int error)
  698. {
  699. bool ret;
  700. write_lock_bh(&call->state_lock);
  701. ret = __rxrpc_abort_call(why, call, seq, abort_code, error);
  702. write_unlock_bh(&call->state_lock);
  703. return ret;
  704. }
  705. /*
  706. * Abort a call due to a protocol error.
  707. */
  708. static inline bool __rxrpc_abort_eproto(struct rxrpc_call *call,
  709. struct sk_buff *skb,
  710. const char *eproto_why,
  711. const char *why,
  712. u32 abort_code)
  713. {
  714. struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
  715. trace_rxrpc_rx_eproto(call, sp->hdr.serial, eproto_why);
  716. return rxrpc_abort_call(why, call, sp->hdr.seq, abort_code, -EPROTO);
  717. }
  718. #define rxrpc_abort_eproto(call, skb, eproto_why, abort_why, abort_code) \
  719. __rxrpc_abort_eproto((call), (skb), tracepoint_string(eproto_why), \
  720. (abort_why), (abort_code))
  721. /*
  722. * conn_client.c
  723. */
  724. extern unsigned int rxrpc_max_client_connections;
  725. extern unsigned int rxrpc_reap_client_connections;
  726. extern unsigned int rxrpc_conn_idle_client_expiry;
  727. extern unsigned int rxrpc_conn_idle_client_fast_expiry;
  728. extern struct idr rxrpc_client_conn_ids;
  729. void rxrpc_destroy_client_conn_ids(void);
  730. int rxrpc_connect_call(struct rxrpc_call *, struct rxrpc_conn_parameters *,
  731. struct sockaddr_rxrpc *, gfp_t);
  732. void rxrpc_expose_client_call(struct rxrpc_call *);
  733. void rxrpc_disconnect_client_call(struct rxrpc_call *);
  734. void rxrpc_put_client_conn(struct rxrpc_connection *);
  735. void rxrpc_discard_expired_client_conns(struct work_struct *);
  736. void rxrpc_destroy_all_client_connections(struct rxrpc_net *);
  737. /*
  738. * conn_event.c
  739. */
  740. void rxrpc_process_connection(struct work_struct *);
  741. /*
  742. * conn_object.c
  743. */
  744. extern unsigned int rxrpc_connection_expiry;
  745. struct rxrpc_connection *rxrpc_alloc_connection(gfp_t);
  746. struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *,
  747. struct sk_buff *);
  748. void __rxrpc_disconnect_call(struct rxrpc_connection *, struct rxrpc_call *);
  749. void rxrpc_disconnect_call(struct rxrpc_call *);
  750. void rxrpc_kill_connection(struct rxrpc_connection *);
  751. bool rxrpc_queue_conn(struct rxrpc_connection *);
  752. void rxrpc_see_connection(struct rxrpc_connection *);
  753. void rxrpc_get_connection(struct rxrpc_connection *);
  754. struct rxrpc_connection *rxrpc_get_connection_maybe(struct rxrpc_connection *);
  755. void rxrpc_put_service_conn(struct rxrpc_connection *);
  756. void rxrpc_service_connection_reaper(struct work_struct *);
  757. void rxrpc_destroy_all_connections(struct rxrpc_net *);
  758. static inline bool rxrpc_conn_is_client(const struct rxrpc_connection *conn)
  759. {
  760. return conn->out_clientflag;
  761. }
  762. static inline bool rxrpc_conn_is_service(const struct rxrpc_connection *conn)
  763. {
  764. return !rxrpc_conn_is_client(conn);
  765. }
  766. static inline void rxrpc_put_connection(struct rxrpc_connection *conn)
  767. {
  768. if (!conn)
  769. return;
  770. if (rxrpc_conn_is_client(conn))
  771. rxrpc_put_client_conn(conn);
  772. else
  773. rxrpc_put_service_conn(conn);
  774. }
  775. /*
  776. * conn_service.c
  777. */
  778. struct rxrpc_connection *rxrpc_find_service_conn_rcu(struct rxrpc_peer *,
  779. struct sk_buff *);
  780. struct rxrpc_connection *rxrpc_prealloc_service_connection(struct rxrpc_net *, gfp_t);
  781. void rxrpc_new_incoming_connection(struct rxrpc_sock *,
  782. struct rxrpc_connection *, struct sk_buff *);
  783. void rxrpc_unpublish_service_conn(struct rxrpc_connection *);
  784. /*
  785. * input.c
  786. */
  787. void rxrpc_data_ready(struct sock *);
  788. /*
  789. * insecure.c
  790. */
  791. extern const struct rxrpc_security rxrpc_no_security;
  792. /*
  793. * key.c
  794. */
  795. extern struct key_type key_type_rxrpc;
  796. extern struct key_type key_type_rxrpc_s;
  797. int rxrpc_request_key(struct rxrpc_sock *, char __user *, int);
  798. int rxrpc_server_keyring(struct rxrpc_sock *, char __user *, int);
  799. int rxrpc_get_server_data_key(struct rxrpc_connection *, const void *, time64_t,
  800. u32);
  801. /*
  802. * local_event.c
  803. */
  804. extern void rxrpc_process_local_events(struct rxrpc_local *);
  805. /*
  806. * local_object.c
  807. */
  808. struct rxrpc_local *rxrpc_lookup_local(struct net *, const struct sockaddr_rxrpc *);
  809. void __rxrpc_put_local(struct rxrpc_local *);
  810. void rxrpc_destroy_all_locals(struct rxrpc_net *);
  811. static inline void rxrpc_get_local(struct rxrpc_local *local)
  812. {
  813. atomic_inc(&local->usage);
  814. }
  815. static inline
  816. struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local)
  817. {
  818. return atomic_inc_not_zero(&local->usage) ? local : NULL;
  819. }
  820. static inline void rxrpc_put_local(struct rxrpc_local *local)
  821. {
  822. if (local && atomic_dec_and_test(&local->usage))
  823. __rxrpc_put_local(local);
  824. }
  825. static inline void rxrpc_queue_local(struct rxrpc_local *local)
  826. {
  827. rxrpc_queue_work(&local->processor);
  828. }
  829. /*
  830. * misc.c
  831. */
  832. extern unsigned int rxrpc_max_backlog __read_mostly;
  833. extern unsigned int rxrpc_requested_ack_delay;
  834. extern unsigned int rxrpc_soft_ack_delay;
  835. extern unsigned int rxrpc_idle_ack_delay;
  836. extern unsigned int rxrpc_rx_window_size;
  837. extern unsigned int rxrpc_rx_mtu;
  838. extern unsigned int rxrpc_rx_jumbo_max;
  839. extern unsigned int rxrpc_resend_timeout;
  840. extern const s8 rxrpc_ack_priority[];
  841. /*
  842. * net_ns.c
  843. */
  844. extern unsigned int rxrpc_net_id;
  845. extern struct pernet_operations rxrpc_net_ops;
  846. static inline struct rxrpc_net *rxrpc_net(struct net *net)
  847. {
  848. return net_generic(net, rxrpc_net_id);
  849. }
  850. /*
  851. * output.c
  852. */
  853. int rxrpc_send_ack_packet(struct rxrpc_call *, bool);
  854. int rxrpc_send_abort_packet(struct rxrpc_call *);
  855. int rxrpc_send_data_packet(struct rxrpc_call *, struct sk_buff *, bool);
  856. void rxrpc_reject_packets(struct rxrpc_local *);
  857. /*
  858. * peer_event.c
  859. */
  860. void rxrpc_error_report(struct sock *);
  861. void rxrpc_peer_error_distributor(struct work_struct *);
  862. void rxrpc_peer_add_rtt(struct rxrpc_call *, enum rxrpc_rtt_rx_trace,
  863. rxrpc_serial_t, rxrpc_serial_t, ktime_t, ktime_t);
  864. /*
  865. * peer_object.c
  866. */
  867. struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *,
  868. const struct sockaddr_rxrpc *);
  869. struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *,
  870. struct sockaddr_rxrpc *, gfp_t);
  871. struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *, gfp_t);
  872. struct rxrpc_peer *rxrpc_lookup_incoming_peer(struct rxrpc_local *,
  873. struct rxrpc_peer *);
  874. static inline struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *peer)
  875. {
  876. atomic_inc(&peer->usage);
  877. return peer;
  878. }
  879. static inline
  880. struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *peer)
  881. {
  882. return atomic_inc_not_zero(&peer->usage) ? peer : NULL;
  883. }
  884. extern void __rxrpc_put_peer(struct rxrpc_peer *peer);
  885. static inline void rxrpc_put_peer(struct rxrpc_peer *peer)
  886. {
  887. if (peer && atomic_dec_and_test(&peer->usage))
  888. __rxrpc_put_peer(peer);
  889. }
  890. /*
  891. * proc.c
  892. */
  893. extern const struct file_operations rxrpc_call_seq_fops;
  894. extern const struct file_operations rxrpc_connection_seq_fops;
  895. /*
  896. * recvmsg.c
  897. */
  898. void rxrpc_notify_socket(struct rxrpc_call *);
  899. int rxrpc_recvmsg(struct socket *, struct msghdr *, size_t, int);
  900. /*
  901. * rxkad.c
  902. */
  903. #ifdef CONFIG_RXKAD
  904. extern const struct rxrpc_security rxkad;
  905. #endif
  906. /*
  907. * security.c
  908. */
  909. int __init rxrpc_init_security(void);
  910. void rxrpc_exit_security(void);
  911. int rxrpc_init_client_conn_security(struct rxrpc_connection *);
  912. int rxrpc_init_server_conn_security(struct rxrpc_connection *);
  913. /*
  914. * sendmsg.c
  915. */
  916. int rxrpc_do_sendmsg(struct rxrpc_sock *, struct msghdr *, size_t);
  917. /*
  918. * skbuff.c
  919. */
  920. void rxrpc_kernel_data_consumed(struct rxrpc_call *, struct sk_buff *);
  921. void rxrpc_packet_destructor(struct sk_buff *);
  922. void rxrpc_new_skb(struct sk_buff *, enum rxrpc_skb_trace);
  923. void rxrpc_see_skb(struct sk_buff *, enum rxrpc_skb_trace);
  924. void rxrpc_get_skb(struct sk_buff *, enum rxrpc_skb_trace);
  925. void rxrpc_free_skb(struct sk_buff *, enum rxrpc_skb_trace);
  926. void rxrpc_lose_skb(struct sk_buff *, enum rxrpc_skb_trace);
  927. void rxrpc_purge_queue(struct sk_buff_head *);
  928. /*
  929. * sysctl.c
  930. */
  931. #ifdef CONFIG_SYSCTL
  932. extern int __init rxrpc_sysctl_init(void);
  933. extern void rxrpc_sysctl_exit(void);
  934. #else
  935. static inline int __init rxrpc_sysctl_init(void) { return 0; }
  936. static inline void rxrpc_sysctl_exit(void) {}
  937. #endif
  938. /*
  939. * utils.c
  940. */
  941. int rxrpc_extract_addr_from_skb(struct rxrpc_local *, struct sockaddr_rxrpc *,
  942. struct sk_buff *);
  943. static inline bool before(u32 seq1, u32 seq2)
  944. {
  945. return (s32)(seq1 - seq2) < 0;
  946. }
  947. static inline bool before_eq(u32 seq1, u32 seq2)
  948. {
  949. return (s32)(seq1 - seq2) <= 0;
  950. }
  951. static inline bool after(u32 seq1, u32 seq2)
  952. {
  953. return (s32)(seq1 - seq2) > 0;
  954. }
  955. static inline bool after_eq(u32 seq1, u32 seq2)
  956. {
  957. return (s32)(seq1 - seq2) >= 0;
  958. }
  959. /*
  960. * debug tracing
  961. */
  962. extern unsigned int rxrpc_debug;
  963. #define dbgprintk(FMT,...) \
  964. printk("[%-6.6s] "FMT"\n", current->comm ,##__VA_ARGS__)
  965. #define kenter(FMT,...) dbgprintk("==> %s("FMT")",__func__ ,##__VA_ARGS__)
  966. #define kleave(FMT,...) dbgprintk("<== %s()"FMT"",__func__ ,##__VA_ARGS__)
  967. #define kdebug(FMT,...) dbgprintk(" "FMT ,##__VA_ARGS__)
  968. #define kproto(FMT,...) dbgprintk("### "FMT ,##__VA_ARGS__)
  969. #define knet(FMT,...) dbgprintk("@@@ "FMT ,##__VA_ARGS__)
  970. #if defined(__KDEBUG)
  971. #define _enter(FMT,...) kenter(FMT,##__VA_ARGS__)
  972. #define _leave(FMT,...) kleave(FMT,##__VA_ARGS__)
  973. #define _debug(FMT,...) kdebug(FMT,##__VA_ARGS__)
  974. #define _proto(FMT,...) kproto(FMT,##__VA_ARGS__)
  975. #define _net(FMT,...) knet(FMT,##__VA_ARGS__)
  976. #elif defined(CONFIG_AF_RXRPC_DEBUG)
  977. #define RXRPC_DEBUG_KENTER 0x01
  978. #define RXRPC_DEBUG_KLEAVE 0x02
  979. #define RXRPC_DEBUG_KDEBUG 0x04
  980. #define RXRPC_DEBUG_KPROTO 0x08
  981. #define RXRPC_DEBUG_KNET 0x10
  982. #define _enter(FMT,...) \
  983. do { \
  984. if (unlikely(rxrpc_debug & RXRPC_DEBUG_KENTER)) \
  985. kenter(FMT,##__VA_ARGS__); \
  986. } while (0)
  987. #define _leave(FMT,...) \
  988. do { \
  989. if (unlikely(rxrpc_debug & RXRPC_DEBUG_KLEAVE)) \
  990. kleave(FMT,##__VA_ARGS__); \
  991. } while (0)
  992. #define _debug(FMT,...) \
  993. do { \
  994. if (unlikely(rxrpc_debug & RXRPC_DEBUG_KDEBUG)) \
  995. kdebug(FMT,##__VA_ARGS__); \
  996. } while (0)
  997. #define _proto(FMT,...) \
  998. do { \
  999. if (unlikely(rxrpc_debug & RXRPC_DEBUG_KPROTO)) \
  1000. kproto(FMT,##__VA_ARGS__); \
  1001. } while (0)
  1002. #define _net(FMT,...) \
  1003. do { \
  1004. if (unlikely(rxrpc_debug & RXRPC_DEBUG_KNET)) \
  1005. knet(FMT,##__VA_ARGS__); \
  1006. } while (0)
  1007. #else
  1008. #define _enter(FMT,...) no_printk("==> %s("FMT")",__func__ ,##__VA_ARGS__)
  1009. #define _leave(FMT,...) no_printk("<== %s()"FMT"",__func__ ,##__VA_ARGS__)
  1010. #define _debug(FMT,...) no_printk(" "FMT ,##__VA_ARGS__)
  1011. #define _proto(FMT,...) no_printk("### "FMT ,##__VA_ARGS__)
  1012. #define _net(FMT,...) no_printk("@@@ "FMT ,##__VA_ARGS__)
  1013. #endif
  1014. /*
  1015. * debug assertion checking
  1016. */
  1017. #if 1 // defined(__KDEBUGALL)
  1018. #define ASSERT(X) \
  1019. do { \
  1020. if (unlikely(!(X))) { \
  1021. pr_err("Assertion failed\n"); \
  1022. BUG(); \
  1023. } \
  1024. } while (0)
  1025. #define ASSERTCMP(X, OP, Y) \
  1026. do { \
  1027. __typeof__(X) _x = (X); \
  1028. __typeof__(Y) _y = (__typeof__(X))(Y); \
  1029. if (unlikely(!(_x OP _y))) { \
  1030. pr_err("Assertion failed - %lu(0x%lx) %s %lu(0x%lx) is false\n", \
  1031. (unsigned long)_x, (unsigned long)_x, #OP, \
  1032. (unsigned long)_y, (unsigned long)_y); \
  1033. BUG(); \
  1034. } \
  1035. } while (0)
  1036. #define ASSERTIF(C, X) \
  1037. do { \
  1038. if (unlikely((C) && !(X))) { \
  1039. pr_err("Assertion failed\n"); \
  1040. BUG(); \
  1041. } \
  1042. } while (0)
  1043. #define ASSERTIFCMP(C, X, OP, Y) \
  1044. do { \
  1045. __typeof__(X) _x = (X); \
  1046. __typeof__(Y) _y = (__typeof__(X))(Y); \
  1047. if (unlikely((C) && !(_x OP _y))) { \
  1048. pr_err("Assertion failed - %lu(0x%lx) %s %lu(0x%lx) is false\n", \
  1049. (unsigned long)_x, (unsigned long)_x, #OP, \
  1050. (unsigned long)_y, (unsigned long)_y); \
  1051. BUG(); \
  1052. } \
  1053. } while (0)
  1054. #else
  1055. #define ASSERT(X) \
  1056. do { \
  1057. } while (0)
  1058. #define ASSERTCMP(X, OP, Y) \
  1059. do { \
  1060. } while (0)
  1061. #define ASSERTIF(C, X) \
  1062. do { \
  1063. } while (0)
  1064. #define ASSERTIFCMP(C, X, OP, Y) \
  1065. do { \
  1066. } while (0)
  1067. #endif /* __KDEBUGALL */