ar-internal.h 36 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136
  1. /* AF_RXRPC internal definitions
  2. *
  3. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <linux/atomic.h>
  12. #include <linux/seqlock.h>
  13. #include <net/sock.h>
  14. #include <net/af_rxrpc.h>
  15. #include <rxrpc/packet.h>
  16. #if 0
  17. #define CHECK_SLAB_OKAY(X) \
  18. BUG_ON(atomic_read((X)) >> (sizeof(atomic_t) - 2) == \
  19. (POISON_FREE << 8 | POISON_FREE))
  20. #else
  21. #define CHECK_SLAB_OKAY(X) do {} while (0)
  22. #endif
  23. #define FCRYPT_BSIZE 8
  24. struct rxrpc_crypt {
  25. union {
  26. u8 x[FCRYPT_BSIZE];
  27. __be32 n[2];
  28. };
  29. } __attribute__((aligned(8)));
  30. #define rxrpc_queue_work(WS) queue_work(rxrpc_workqueue, (WS))
  31. #define rxrpc_queue_delayed_work(WS,D) \
  32. queue_delayed_work(rxrpc_workqueue, (WS), (D))
  33. struct rxrpc_connection;
  34. /*
  35. * Mark applied to socket buffers.
  36. */
  37. enum rxrpc_skb_mark {
  38. RXRPC_SKB_MARK_DATA, /* data message */
  39. RXRPC_SKB_MARK_FINAL_ACK, /* final ACK received message */
  40. RXRPC_SKB_MARK_BUSY, /* server busy message */
  41. RXRPC_SKB_MARK_REMOTE_ABORT, /* remote abort message */
  42. RXRPC_SKB_MARK_LOCAL_ABORT, /* local abort message */
  43. RXRPC_SKB_MARK_NET_ERROR, /* network error message */
  44. RXRPC_SKB_MARK_LOCAL_ERROR, /* local error message */
  45. RXRPC_SKB_MARK_NEW_CALL, /* local error message */
  46. };
  47. /*
  48. * sk_state for RxRPC sockets
  49. */
  50. enum {
  51. RXRPC_UNBOUND = 0,
  52. RXRPC_CLIENT_UNBOUND, /* Unbound socket used as client */
  53. RXRPC_CLIENT_BOUND, /* client local address bound */
  54. RXRPC_SERVER_BOUND, /* server local address bound */
  55. RXRPC_SERVER_LISTENING, /* server listening for connections */
  56. RXRPC_CLOSE, /* socket is being closed */
  57. };
  58. /*
  59. * Service backlog preallocation.
  60. *
  61. * This contains circular buffers of preallocated peers, connections and calls
  62. * for incoming service calls and their head and tail pointers. This allows
  63. * calls to be set up in the data_ready handler, thereby avoiding the need to
  64. * shuffle packets around so much.
  65. */
  66. struct rxrpc_backlog {
  67. unsigned short peer_backlog_head;
  68. unsigned short peer_backlog_tail;
  69. unsigned short conn_backlog_head;
  70. unsigned short conn_backlog_tail;
  71. unsigned short call_backlog_head;
  72. unsigned short call_backlog_tail;
  73. #define RXRPC_BACKLOG_MAX 32
  74. struct rxrpc_peer *peer_backlog[RXRPC_BACKLOG_MAX];
  75. struct rxrpc_connection *conn_backlog[RXRPC_BACKLOG_MAX];
  76. struct rxrpc_call *call_backlog[RXRPC_BACKLOG_MAX];
  77. };
  78. /*
  79. * RxRPC socket definition
  80. */
  81. struct rxrpc_sock {
  82. /* WARNING: sk has to be the first member */
  83. struct sock sk;
  84. rxrpc_notify_new_call_t notify_new_call; /* Func to notify of new call */
  85. rxrpc_discard_new_call_t discard_new_call; /* Func to discard a new call */
  86. struct rxrpc_local *local; /* local endpoint */
  87. struct rxrpc_backlog *backlog; /* Preallocation for services */
  88. spinlock_t incoming_lock; /* Incoming call vs service shutdown lock */
  89. struct list_head sock_calls; /* List of calls owned by this socket */
  90. struct list_head to_be_accepted; /* calls awaiting acceptance */
  91. struct list_head recvmsg_q; /* Calls awaiting recvmsg's attention */
  92. rwlock_t recvmsg_lock; /* Lock for recvmsg_q */
  93. struct key *key; /* security for this socket */
  94. struct key *securities; /* list of server security descriptors */
  95. struct rb_root calls; /* User ID -> call mapping */
  96. unsigned long flags;
  97. #define RXRPC_SOCK_CONNECTED 0 /* connect_srx is set */
  98. rwlock_t call_lock; /* lock for calls */
  99. u32 min_sec_level; /* minimum security level */
  100. #define RXRPC_SECURITY_MAX RXRPC_SECURITY_ENCRYPT
  101. bool exclusive; /* Exclusive connection for a client socket */
  102. sa_family_t family; /* Protocol family created with */
  103. struct sockaddr_rxrpc srx; /* local address */
  104. struct sockaddr_rxrpc connect_srx; /* Default client address from connect() */
  105. };
  106. #define rxrpc_sk(__sk) container_of((__sk), struct rxrpc_sock, sk)
  107. /*
  108. * CPU-byteorder normalised Rx packet header.
  109. */
  110. struct rxrpc_host_header {
  111. u32 epoch; /* client boot timestamp */
  112. u32 cid; /* connection and channel ID */
  113. u32 callNumber; /* call ID (0 for connection-level packets) */
  114. u32 seq; /* sequence number of pkt in call stream */
  115. u32 serial; /* serial number of pkt sent to network */
  116. u8 type; /* packet type */
  117. u8 flags; /* packet flags */
  118. u8 userStatus; /* app-layer defined status */
  119. u8 securityIndex; /* security protocol ID */
  120. union {
  121. u16 _rsvd; /* reserved */
  122. u16 cksum; /* kerberos security checksum */
  123. };
  124. u16 serviceId; /* service ID */
  125. } __packed;
  126. /*
  127. * RxRPC socket buffer private variables
  128. * - max 48 bytes (struct sk_buff::cb)
  129. */
  130. struct rxrpc_skb_priv {
  131. union {
  132. u8 nr_jumbo; /* Number of jumbo subpackets */
  133. };
  134. union {
  135. int remain; /* amount of space remaining for next write */
  136. };
  137. struct rxrpc_host_header hdr; /* RxRPC packet header from this packet */
  138. };
  139. #define rxrpc_skb(__skb) ((struct rxrpc_skb_priv *) &(__skb)->cb)
  140. /*
  141. * RxRPC security module interface
  142. */
  143. struct rxrpc_security {
  144. const char *name; /* name of this service */
  145. u8 security_index; /* security type provided */
  146. /* Initialise a security service */
  147. int (*init)(void);
  148. /* Clean up a security service */
  149. void (*exit)(void);
  150. /* initialise a connection's security */
  151. int (*init_connection_security)(struct rxrpc_connection *);
  152. /* prime a connection's packet security */
  153. int (*prime_packet_security)(struct rxrpc_connection *);
  154. /* impose security on a packet */
  155. int (*secure_packet)(struct rxrpc_call *,
  156. struct sk_buff *,
  157. size_t,
  158. void *);
  159. /* verify the security on a received packet */
  160. int (*verify_packet)(struct rxrpc_call *, struct sk_buff *,
  161. unsigned int, unsigned int, rxrpc_seq_t, u16);
  162. /* Locate the data in a received packet that has been verified. */
  163. void (*locate_data)(struct rxrpc_call *, struct sk_buff *,
  164. unsigned int *, unsigned int *);
  165. /* issue a challenge */
  166. int (*issue_challenge)(struct rxrpc_connection *);
  167. /* respond to a challenge */
  168. int (*respond_to_challenge)(struct rxrpc_connection *,
  169. struct sk_buff *,
  170. u32 *);
  171. /* verify a response */
  172. int (*verify_response)(struct rxrpc_connection *,
  173. struct sk_buff *,
  174. u32 *);
  175. /* clear connection security */
  176. void (*clear)(struct rxrpc_connection *);
  177. };
  178. /*
  179. * RxRPC local transport endpoint description
  180. * - owned by a single AF_RXRPC socket
  181. * - pointed to by transport socket struct sk_user_data
  182. */
  183. struct rxrpc_local {
  184. struct rcu_head rcu;
  185. atomic_t usage;
  186. struct list_head link;
  187. struct socket *socket; /* my UDP socket */
  188. struct work_struct processor;
  189. struct rxrpc_sock __rcu *service; /* Service(s) listening on this endpoint */
  190. struct rw_semaphore defrag_sem; /* control re-enablement of IP DF bit */
  191. struct sk_buff_head reject_queue; /* packets awaiting rejection */
  192. struct sk_buff_head event_queue; /* endpoint event packets awaiting processing */
  193. struct rb_root client_conns; /* Client connections by socket params */
  194. spinlock_t client_conns_lock; /* Lock for client_conns */
  195. spinlock_t lock; /* access lock */
  196. rwlock_t services_lock; /* lock for services list */
  197. int debug_id; /* debug ID for printks */
  198. bool dead;
  199. struct sockaddr_rxrpc srx; /* local address */
  200. };
  201. /*
  202. * RxRPC remote transport endpoint definition
  203. * - matched by local endpoint, remote port, address and protocol type
  204. */
  205. struct rxrpc_peer {
  206. struct rcu_head rcu; /* This must be first */
  207. atomic_t usage;
  208. unsigned long hash_key;
  209. struct hlist_node hash_link;
  210. struct rxrpc_local *local;
  211. struct hlist_head error_targets; /* targets for net error distribution */
  212. struct work_struct error_distributor;
  213. struct rb_root service_conns; /* Service connections */
  214. seqlock_t service_conn_lock;
  215. spinlock_t lock; /* access lock */
  216. unsigned int if_mtu; /* interface MTU for this peer */
  217. unsigned int mtu; /* network MTU for this peer */
  218. unsigned int maxdata; /* data size (MTU - hdrsize) */
  219. unsigned short hdrsize; /* header size (IP + UDP + RxRPC) */
  220. int debug_id; /* debug ID for printks */
  221. int error_report; /* Net (+0) or local (+1000000) to distribute */
  222. #define RXRPC_LOCAL_ERROR_OFFSET 1000000
  223. struct sockaddr_rxrpc srx; /* remote address */
  224. /* calculated RTT cache */
  225. #define RXRPC_RTT_CACHE_SIZE 32
  226. ktime_t rtt_last_req; /* Time of last RTT request */
  227. u64 rtt; /* Current RTT estimate (in nS) */
  228. u64 rtt_sum; /* Sum of cache contents */
  229. u64 rtt_cache[RXRPC_RTT_CACHE_SIZE]; /* Determined RTT cache */
  230. u8 rtt_cursor; /* next entry at which to insert */
  231. u8 rtt_usage; /* amount of cache actually used */
  232. };
  233. /*
  234. * Keys for matching a connection.
  235. */
  236. struct rxrpc_conn_proto {
  237. union {
  238. struct {
  239. u32 epoch; /* epoch of this connection */
  240. u32 cid; /* connection ID */
  241. };
  242. u64 index_key;
  243. };
  244. };
  245. struct rxrpc_conn_parameters {
  246. struct rxrpc_local *local; /* Representation of local endpoint */
  247. struct rxrpc_peer *peer; /* Remote endpoint */
  248. struct key *key; /* Security details */
  249. bool exclusive; /* T if conn is exclusive */
  250. u16 service_id; /* Service ID for this connection */
  251. u32 security_level; /* Security level selected */
  252. };
  253. /*
  254. * Bits in the connection flags.
  255. */
  256. enum rxrpc_conn_flag {
  257. RXRPC_CONN_HAS_IDR, /* Has a client conn ID assigned */
  258. RXRPC_CONN_IN_SERVICE_CONNS, /* Conn is in peer->service_conns */
  259. RXRPC_CONN_IN_CLIENT_CONNS, /* Conn is in local->client_conns */
  260. RXRPC_CONN_EXPOSED, /* Conn has extra ref for exposure */
  261. RXRPC_CONN_DONT_REUSE, /* Don't reuse this connection */
  262. RXRPC_CONN_COUNTED, /* Counted by rxrpc_nr_client_conns */
  263. };
  264. /*
  265. * Events that can be raised upon a connection.
  266. */
  267. enum rxrpc_conn_event {
  268. RXRPC_CONN_EV_CHALLENGE, /* Send challenge packet */
  269. };
  270. /*
  271. * The connection cache state.
  272. */
  273. enum rxrpc_conn_cache_state {
  274. RXRPC_CONN_CLIENT_INACTIVE, /* Conn is not yet listed */
  275. RXRPC_CONN_CLIENT_WAITING, /* Conn is on wait list, waiting for capacity */
  276. RXRPC_CONN_CLIENT_ACTIVE, /* Conn is on active list, doing calls */
  277. RXRPC_CONN_CLIENT_CULLED, /* Conn is culled and delisted, doing calls */
  278. RXRPC_CONN_CLIENT_IDLE, /* Conn is on idle list, doing mostly nothing */
  279. RXRPC_CONN__NR_CACHE_STATES
  280. };
  281. /*
  282. * The connection protocol state.
  283. */
  284. enum rxrpc_conn_proto_state {
  285. RXRPC_CONN_UNUSED, /* Connection not yet attempted */
  286. RXRPC_CONN_CLIENT, /* Client connection */
  287. RXRPC_CONN_SERVICE_PREALLOC, /* Service connection preallocation */
  288. RXRPC_CONN_SERVICE_UNSECURED, /* Service unsecured connection */
  289. RXRPC_CONN_SERVICE_CHALLENGING, /* Service challenging for security */
  290. RXRPC_CONN_SERVICE, /* Service secured connection */
  291. RXRPC_CONN_REMOTELY_ABORTED, /* Conn aborted by peer */
  292. RXRPC_CONN_LOCALLY_ABORTED, /* Conn aborted locally */
  293. RXRPC_CONN__NR_STATES
  294. };
  295. /*
  296. * RxRPC connection definition
  297. * - matched by { local, peer, epoch, conn_id, direction }
  298. * - each connection can only handle four simultaneous calls
  299. */
  300. struct rxrpc_connection {
  301. struct rxrpc_conn_proto proto;
  302. struct rxrpc_conn_parameters params;
  303. atomic_t usage;
  304. struct rcu_head rcu;
  305. struct list_head cache_link;
  306. spinlock_t channel_lock;
  307. unsigned char active_chans; /* Mask of active channels */
  308. #define RXRPC_ACTIVE_CHANS_MASK ((1 << RXRPC_MAXCALLS) - 1)
  309. struct list_head waiting_calls; /* Calls waiting for channels */
  310. struct rxrpc_channel {
  311. struct rxrpc_call __rcu *call; /* Active call */
  312. u32 call_id; /* ID of current call */
  313. u32 call_counter; /* Call ID counter */
  314. u32 last_call; /* ID of last call */
  315. u8 last_type; /* Type of last packet */
  316. u16 last_service_id;
  317. union {
  318. u32 last_seq;
  319. u32 last_abort;
  320. };
  321. } channels[RXRPC_MAXCALLS];
  322. struct work_struct processor; /* connection event processor */
  323. union {
  324. struct rb_node client_node; /* Node in local->client_conns */
  325. struct rb_node service_node; /* Node in peer->service_conns */
  326. };
  327. struct list_head proc_link; /* link in procfs list */
  328. struct list_head link; /* link in master connection list */
  329. struct sk_buff_head rx_queue; /* received conn-level packets */
  330. const struct rxrpc_security *security; /* applied security module */
  331. struct key *server_key; /* security for this service */
  332. struct crypto_skcipher *cipher; /* encryption handle */
  333. struct rxrpc_crypt csum_iv; /* packet checksum base */
  334. unsigned long flags;
  335. unsigned long events;
  336. unsigned long idle_timestamp; /* Time at which last became idle */
  337. spinlock_t state_lock; /* state-change lock */
  338. enum rxrpc_conn_cache_state cache_state;
  339. enum rxrpc_conn_proto_state state; /* current state of connection */
  340. u32 local_abort; /* local abort code */
  341. u32 remote_abort; /* remote abort code */
  342. int debug_id; /* debug ID for printks */
  343. atomic_t serial; /* packet serial number counter */
  344. unsigned int hi_serial; /* highest serial number received */
  345. u32 security_nonce; /* response re-use preventer */
  346. u8 size_align; /* data size alignment (for security) */
  347. u8 security_size; /* security header size */
  348. u8 security_ix; /* security type */
  349. u8 out_clientflag; /* RXRPC_CLIENT_INITIATED if we are client */
  350. };
  351. /*
  352. * Flags in call->flags.
  353. */
  354. enum rxrpc_call_flag {
  355. RXRPC_CALL_RELEASED, /* call has been released - no more message to userspace */
  356. RXRPC_CALL_HAS_USERID, /* has a user ID attached */
  357. RXRPC_CALL_IS_SERVICE, /* Call is service call */
  358. RXRPC_CALL_EXPOSED, /* The call was exposed to the world */
  359. RXRPC_CALL_RX_LAST, /* Received the last packet (at rxtx_top) */
  360. RXRPC_CALL_TX_LAST, /* Last packet in Tx buffer (at rxtx_top) */
  361. RXRPC_CALL_SEND_PING, /* A ping will need to be sent */
  362. RXRPC_CALL_PINGING, /* Ping in process */
  363. RXRPC_CALL_RETRANS_TIMEOUT, /* Retransmission due to timeout occurred */
  364. };
  365. /*
  366. * Events that can be raised on a call.
  367. */
  368. enum rxrpc_call_event {
  369. RXRPC_CALL_EV_ACK, /* need to generate ACK */
  370. RXRPC_CALL_EV_ABORT, /* need to generate abort */
  371. RXRPC_CALL_EV_TIMER, /* Timer expired */
  372. RXRPC_CALL_EV_RESEND, /* Tx resend required */
  373. RXRPC_CALL_EV_PING, /* Ping send required */
  374. };
  375. /*
  376. * The states that a call can be in.
  377. */
  378. enum rxrpc_call_state {
  379. RXRPC_CALL_UNINITIALISED,
  380. RXRPC_CALL_CLIENT_AWAIT_CONN, /* - client waiting for connection to become available */
  381. RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
  382. RXRPC_CALL_CLIENT_AWAIT_REPLY, /* - client awaiting reply */
  383. RXRPC_CALL_CLIENT_RECV_REPLY, /* - client receiving reply phase */
  384. RXRPC_CALL_SERVER_PREALLOC, /* - service preallocation */
  385. RXRPC_CALL_SERVER_SECURING, /* - server securing request connection */
  386. RXRPC_CALL_SERVER_ACCEPTING, /* - server accepting request */
  387. RXRPC_CALL_SERVER_RECV_REQUEST, /* - server receiving request */
  388. RXRPC_CALL_SERVER_ACK_REQUEST, /* - server pending ACK of request */
  389. RXRPC_CALL_SERVER_SEND_REPLY, /* - server sending reply */
  390. RXRPC_CALL_SERVER_AWAIT_ACK, /* - server awaiting final ACK */
  391. RXRPC_CALL_COMPLETE, /* - call complete */
  392. NR__RXRPC_CALL_STATES
  393. };
  394. /*
  395. * Call completion condition (state == RXRPC_CALL_COMPLETE).
  396. */
  397. enum rxrpc_call_completion {
  398. RXRPC_CALL_SUCCEEDED, /* - Normal termination */
  399. RXRPC_CALL_REMOTELY_ABORTED, /* - call aborted by peer */
  400. RXRPC_CALL_LOCALLY_ABORTED, /* - call aborted locally on error or close */
  401. RXRPC_CALL_LOCAL_ERROR, /* - call failed due to local error */
  402. RXRPC_CALL_NETWORK_ERROR, /* - call terminated by network error */
  403. NR__RXRPC_CALL_COMPLETIONS
  404. };
  405. /*
  406. * Call Tx congestion management modes.
  407. */
  408. enum rxrpc_congest_mode {
  409. RXRPC_CALL_SLOW_START,
  410. RXRPC_CALL_CONGEST_AVOIDANCE,
  411. RXRPC_CALL_PACKET_LOSS,
  412. RXRPC_CALL_FAST_RETRANSMIT,
  413. NR__RXRPC_CONGEST_MODES
  414. };
  415. /*
  416. * RxRPC call definition
  417. * - matched by { connection, call_id }
  418. */
  419. struct rxrpc_call {
  420. struct rcu_head rcu;
  421. struct rxrpc_connection *conn; /* connection carrying call */
  422. struct rxrpc_peer *peer; /* Peer record for remote address */
  423. struct rxrpc_sock __rcu *socket; /* socket responsible */
  424. ktime_t ack_at; /* When deferred ACK needs to happen */
  425. ktime_t resend_at; /* When next resend needs to happen */
  426. ktime_t ping_at; /* When next to send a ping */
  427. ktime_t expire_at; /* When the call times out */
  428. struct timer_list timer; /* Combined event timer */
  429. struct work_struct processor; /* Event processor */
  430. rxrpc_notify_rx_t notify_rx; /* kernel service Rx notification function */
  431. struct list_head link; /* link in master call list */
  432. struct list_head chan_wait_link; /* Link in conn->waiting_calls */
  433. struct hlist_node error_link; /* link in error distribution list */
  434. struct list_head accept_link; /* Link in rx->acceptq */
  435. struct list_head recvmsg_link; /* Link in rx->recvmsg_q */
  436. struct list_head sock_link; /* Link in rx->sock_calls */
  437. struct rb_node sock_node; /* Node in rx->calls */
  438. struct sk_buff *tx_pending; /* Tx socket buffer being filled */
  439. wait_queue_head_t waitq; /* Wait queue for channel or Tx */
  440. __be32 crypto_buf[2]; /* Temporary packet crypto buffer */
  441. unsigned long user_call_ID; /* user-defined call ID */
  442. unsigned long flags;
  443. unsigned long events;
  444. spinlock_t lock;
  445. rwlock_t state_lock; /* lock for state transition */
  446. u32 abort_code; /* Local/remote abort code */
  447. int error; /* Local error incurred */
  448. enum rxrpc_call_state state; /* current state of call */
  449. enum rxrpc_call_completion completion; /* Call completion condition */
  450. atomic_t usage;
  451. u16 service_id; /* service ID */
  452. u8 security_ix; /* Security type */
  453. u32 call_id; /* call ID on connection */
  454. u32 cid; /* connection ID plus channel index */
  455. int debug_id; /* debug ID for printks */
  456. unsigned short rx_pkt_offset; /* Current recvmsg packet offset */
  457. unsigned short rx_pkt_len; /* Current recvmsg packet len */
  458. /* Rx/Tx circular buffer, depending on phase.
  459. *
  460. * In the Rx phase, packets are annotated with 0 or the number of the
  461. * segment of a jumbo packet each buffer refers to. There can be up to
  462. * 47 segments in a maximum-size UDP packet.
  463. *
  464. * In the Tx phase, packets are annotated with which buffers have been
  465. * acked.
  466. */
  467. #define RXRPC_RXTX_BUFF_SIZE 64
  468. #define RXRPC_RXTX_BUFF_MASK (RXRPC_RXTX_BUFF_SIZE - 1)
  469. #define RXRPC_INIT_RX_WINDOW_SIZE 32
  470. struct sk_buff **rxtx_buffer;
  471. u8 *rxtx_annotations;
  472. #define RXRPC_TX_ANNO_ACK 0
  473. #define RXRPC_TX_ANNO_UNACK 1
  474. #define RXRPC_TX_ANNO_NAK 2
  475. #define RXRPC_TX_ANNO_RETRANS 3
  476. #define RXRPC_TX_ANNO_MASK 0x03
  477. #define RXRPC_TX_ANNO_LAST 0x04
  478. #define RXRPC_TX_ANNO_RESENT 0x08
  479. #define RXRPC_RX_ANNO_JUMBO 0x3f /* Jumbo subpacket number + 1 if not zero */
  480. #define RXRPC_RX_ANNO_JLAST 0x40 /* Set if last element of a jumbo packet */
  481. #define RXRPC_RX_ANNO_VERIFIED 0x80 /* Set if verified and decrypted */
  482. rxrpc_seq_t tx_hard_ack; /* Dead slot in buffer; the first transmitted but
  483. * not hard-ACK'd packet follows this.
  484. */
  485. rxrpc_seq_t tx_top; /* Highest Tx slot allocated. */
  486. /* TCP-style slow-start congestion control [RFC5681]. Since the SMSS
  487. * is fixed, we keep these numbers in terms of segments (ie. DATA
  488. * packets) rather than bytes.
  489. */
  490. #define RXRPC_TX_SMSS RXRPC_JUMBO_DATALEN
  491. u8 cong_cwnd; /* Congestion window size */
  492. u8 cong_extra; /* Extra to send for congestion management */
  493. u8 cong_ssthresh; /* Slow-start threshold */
  494. enum rxrpc_congest_mode cong_mode:8; /* Congestion management mode */
  495. u8 cong_dup_acks; /* Count of ACKs showing missing packets */
  496. u8 cong_cumul_acks; /* Cumulative ACK count */
  497. ktime_t cong_tstamp; /* Last time cwnd was changed */
  498. rxrpc_seq_t rx_hard_ack; /* Dead slot in buffer; the first received but not
  499. * consumed packet follows this.
  500. */
  501. rxrpc_seq_t rx_top; /* Highest Rx slot allocated. */
  502. rxrpc_seq_t rx_expect_next; /* Expected next packet sequence number */
  503. u8 rx_winsize; /* Size of Rx window */
  504. u8 tx_winsize; /* Maximum size of Tx window */
  505. bool tx_phase; /* T if transmission phase, F if receive phase */
  506. u8 nr_jumbo_bad; /* Number of jumbo dups/exceeds-windows */
  507. /* receive-phase ACK management */
  508. u8 ackr_reason; /* reason to ACK */
  509. u16 ackr_skew; /* skew on packet being ACK'd */
  510. rxrpc_serial_t ackr_serial; /* serial of packet being ACK'd */
  511. rxrpc_seq_t ackr_prev_seq; /* previous sequence number received */
  512. rxrpc_seq_t ackr_consumed; /* Highest packet shown consumed */
  513. rxrpc_seq_t ackr_seen; /* Highest packet shown seen */
  514. /* ping management */
  515. rxrpc_serial_t ping_serial; /* Last ping sent */
  516. ktime_t ping_time; /* Time last ping sent */
  517. /* transmission-phase ACK management */
  518. ktime_t acks_latest_ts; /* Timestamp of latest ACK received */
  519. rxrpc_serial_t acks_latest; /* serial number of latest ACK received */
  520. rxrpc_seq_t acks_lowest_nak; /* Lowest NACK in the buffer (or ==tx_hard_ack) */
  521. };
  522. /*
  523. * Summary of a new ACK and the changes it made to the Tx buffer packet states.
  524. */
  525. struct rxrpc_ack_summary {
  526. u8 ack_reason;
  527. u8 nr_acks; /* Number of ACKs in packet */
  528. u8 nr_nacks; /* Number of NACKs in packet */
  529. u8 nr_new_acks; /* Number of new ACKs in packet */
  530. u8 nr_new_nacks; /* Number of new NACKs in packet */
  531. u8 nr_rot_new_acks; /* Number of rotated new ACKs */
  532. bool new_low_nack; /* T if new low NACK found */
  533. bool retrans_timeo; /* T if reTx due to timeout happened */
  534. u8 flight_size; /* Number of unreceived transmissions */
  535. /* Place to stash values for tracing */
  536. enum rxrpc_congest_mode mode:8;
  537. u8 cwnd;
  538. u8 ssthresh;
  539. u8 dup_acks;
  540. u8 cumulative_acks;
  541. };
  542. #include <trace/events/rxrpc.h>
  543. /*
  544. * af_rxrpc.c
  545. */
  546. extern atomic_t rxrpc_n_tx_skbs, rxrpc_n_rx_skbs;
  547. extern u32 rxrpc_epoch;
  548. extern atomic_t rxrpc_debug_id;
  549. extern struct workqueue_struct *rxrpc_workqueue;
  550. /*
  551. * call_accept.c
  552. */
  553. int rxrpc_service_prealloc(struct rxrpc_sock *, gfp_t);
  554. void rxrpc_discard_prealloc(struct rxrpc_sock *);
  555. struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *,
  556. struct rxrpc_connection *,
  557. struct sk_buff *);
  558. void rxrpc_accept_incoming_calls(struct rxrpc_local *);
  559. struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *, unsigned long,
  560. rxrpc_notify_rx_t);
  561. int rxrpc_reject_call(struct rxrpc_sock *);
  562. /*
  563. * call_event.c
  564. */
  565. void __rxrpc_set_timer(struct rxrpc_call *, enum rxrpc_timer_trace, ktime_t);
  566. void rxrpc_set_timer(struct rxrpc_call *, enum rxrpc_timer_trace, ktime_t);
  567. void rxrpc_propose_ACK(struct rxrpc_call *, u8, u16, u32, bool, bool,
  568. enum rxrpc_propose_ack_trace);
  569. void rxrpc_process_call(struct work_struct *);
  570. /*
  571. * call_object.c
  572. */
  573. extern const char *const rxrpc_call_states[];
  574. extern const char *const rxrpc_call_completions[];
  575. extern unsigned int rxrpc_max_call_lifetime;
  576. extern struct kmem_cache *rxrpc_call_jar;
  577. extern struct list_head rxrpc_calls;
  578. extern rwlock_t rxrpc_call_lock;
  579. struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *, unsigned long);
  580. struct rxrpc_call *rxrpc_alloc_call(gfp_t);
  581. struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *,
  582. struct rxrpc_conn_parameters *,
  583. struct sockaddr_rxrpc *,
  584. unsigned long, gfp_t);
  585. void rxrpc_incoming_call(struct rxrpc_sock *, struct rxrpc_call *,
  586. struct sk_buff *);
  587. void rxrpc_release_call(struct rxrpc_sock *, struct rxrpc_call *);
  588. void rxrpc_release_calls_on_socket(struct rxrpc_sock *);
  589. bool __rxrpc_queue_call(struct rxrpc_call *);
  590. bool rxrpc_queue_call(struct rxrpc_call *);
  591. void rxrpc_see_call(struct rxrpc_call *);
  592. void rxrpc_get_call(struct rxrpc_call *, enum rxrpc_call_trace);
  593. void rxrpc_put_call(struct rxrpc_call *, enum rxrpc_call_trace);
  594. void rxrpc_cleanup_call(struct rxrpc_call *);
  595. void __exit rxrpc_destroy_all_calls(void);
  596. static inline bool rxrpc_is_service_call(const struct rxrpc_call *call)
  597. {
  598. return test_bit(RXRPC_CALL_IS_SERVICE, &call->flags);
  599. }
  600. static inline bool rxrpc_is_client_call(const struct rxrpc_call *call)
  601. {
  602. return !rxrpc_is_service_call(call);
  603. }
  604. /*
  605. * Transition a call to the complete state.
  606. */
  607. static inline bool __rxrpc_set_call_completion(struct rxrpc_call *call,
  608. enum rxrpc_call_completion compl,
  609. u32 abort_code,
  610. int error)
  611. {
  612. if (call->state < RXRPC_CALL_COMPLETE) {
  613. call->abort_code = abort_code;
  614. call->error = error;
  615. call->completion = compl,
  616. call->state = RXRPC_CALL_COMPLETE;
  617. wake_up(&call->waitq);
  618. return true;
  619. }
  620. return false;
  621. }
  622. static inline bool rxrpc_set_call_completion(struct rxrpc_call *call,
  623. enum rxrpc_call_completion compl,
  624. u32 abort_code,
  625. int error)
  626. {
  627. bool ret;
  628. write_lock_bh(&call->state_lock);
  629. ret = __rxrpc_set_call_completion(call, compl, abort_code, error);
  630. write_unlock_bh(&call->state_lock);
  631. return ret;
  632. }
  633. /*
  634. * Record that a call successfully completed.
  635. */
  636. static inline bool __rxrpc_call_completed(struct rxrpc_call *call)
  637. {
  638. return __rxrpc_set_call_completion(call, RXRPC_CALL_SUCCEEDED, 0, 0);
  639. }
  640. static inline bool rxrpc_call_completed(struct rxrpc_call *call)
  641. {
  642. bool ret;
  643. write_lock_bh(&call->state_lock);
  644. ret = __rxrpc_call_completed(call);
  645. write_unlock_bh(&call->state_lock);
  646. return ret;
  647. }
  648. /*
  649. * Record that a call is locally aborted.
  650. */
  651. static inline bool __rxrpc_abort_call(const char *why, struct rxrpc_call *call,
  652. rxrpc_seq_t seq,
  653. u32 abort_code, int error)
  654. {
  655. trace_rxrpc_abort(why, call->cid, call->call_id, seq,
  656. abort_code, error);
  657. return __rxrpc_set_call_completion(call, RXRPC_CALL_LOCALLY_ABORTED,
  658. abort_code, error);
  659. }
  660. static inline bool rxrpc_abort_call(const char *why, struct rxrpc_call *call,
  661. rxrpc_seq_t seq, u32 abort_code, int error)
  662. {
  663. bool ret;
  664. write_lock_bh(&call->state_lock);
  665. ret = __rxrpc_abort_call(why, call, seq, abort_code, error);
  666. write_unlock_bh(&call->state_lock);
  667. return ret;
  668. }
  669. /*
  670. * conn_client.c
  671. */
  672. extern unsigned int rxrpc_max_client_connections;
  673. extern unsigned int rxrpc_reap_client_connections;
  674. extern unsigned int rxrpc_conn_idle_client_expiry;
  675. extern unsigned int rxrpc_conn_idle_client_fast_expiry;
  676. extern struct idr rxrpc_client_conn_ids;
  677. void rxrpc_destroy_client_conn_ids(void);
  678. int rxrpc_connect_call(struct rxrpc_call *, struct rxrpc_conn_parameters *,
  679. struct sockaddr_rxrpc *, gfp_t);
  680. void rxrpc_expose_client_call(struct rxrpc_call *);
  681. void rxrpc_disconnect_client_call(struct rxrpc_call *);
  682. void rxrpc_put_client_conn(struct rxrpc_connection *);
  683. void __exit rxrpc_destroy_all_client_connections(void);
  684. /*
  685. * conn_event.c
  686. */
  687. void rxrpc_process_connection(struct work_struct *);
  688. /*
  689. * conn_object.c
  690. */
  691. extern unsigned int rxrpc_connection_expiry;
  692. extern struct list_head rxrpc_connections;
  693. extern struct list_head rxrpc_connection_proc_list;
  694. extern rwlock_t rxrpc_connection_lock;
  695. int rxrpc_extract_addr_from_skb(struct sockaddr_rxrpc *, struct sk_buff *);
  696. struct rxrpc_connection *rxrpc_alloc_connection(gfp_t);
  697. struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *,
  698. struct sk_buff *);
  699. void __rxrpc_disconnect_call(struct rxrpc_connection *, struct rxrpc_call *);
  700. void rxrpc_disconnect_call(struct rxrpc_call *);
  701. void rxrpc_kill_connection(struct rxrpc_connection *);
  702. bool rxrpc_queue_conn(struct rxrpc_connection *);
  703. void rxrpc_see_connection(struct rxrpc_connection *);
  704. void rxrpc_get_connection(struct rxrpc_connection *);
  705. struct rxrpc_connection *rxrpc_get_connection_maybe(struct rxrpc_connection *);
  706. void rxrpc_put_service_conn(struct rxrpc_connection *);
  707. void __exit rxrpc_destroy_all_connections(void);
  708. static inline bool rxrpc_conn_is_client(const struct rxrpc_connection *conn)
  709. {
  710. return conn->out_clientflag;
  711. }
  712. static inline bool rxrpc_conn_is_service(const struct rxrpc_connection *conn)
  713. {
  714. return !rxrpc_conn_is_client(conn);
  715. }
  716. static inline void rxrpc_put_connection(struct rxrpc_connection *conn)
  717. {
  718. if (!conn)
  719. return;
  720. if (rxrpc_conn_is_client(conn))
  721. rxrpc_put_client_conn(conn);
  722. else
  723. rxrpc_put_service_conn(conn);
  724. }
  725. /*
  726. * conn_service.c
  727. */
  728. struct rxrpc_connection *rxrpc_find_service_conn_rcu(struct rxrpc_peer *,
  729. struct sk_buff *);
  730. struct rxrpc_connection *rxrpc_prealloc_service_connection(gfp_t);
  731. void rxrpc_new_incoming_connection(struct rxrpc_connection *, struct sk_buff *);
  732. void rxrpc_unpublish_service_conn(struct rxrpc_connection *);
  733. /*
  734. * input.c
  735. */
  736. void rxrpc_data_ready(struct sock *);
  737. /*
  738. * insecure.c
  739. */
  740. extern const struct rxrpc_security rxrpc_no_security;
  741. /*
  742. * key.c
  743. */
  744. extern struct key_type key_type_rxrpc;
  745. extern struct key_type key_type_rxrpc_s;
  746. int rxrpc_request_key(struct rxrpc_sock *, char __user *, int);
  747. int rxrpc_server_keyring(struct rxrpc_sock *, char __user *, int);
  748. int rxrpc_get_server_data_key(struct rxrpc_connection *, const void *, time_t,
  749. u32);
  750. /*
  751. * local_event.c
  752. */
  753. extern void rxrpc_process_local_events(struct rxrpc_local *);
  754. /*
  755. * local_object.c
  756. */
  757. struct rxrpc_local *rxrpc_lookup_local(const struct sockaddr_rxrpc *);
  758. void __rxrpc_put_local(struct rxrpc_local *);
  759. void __exit rxrpc_destroy_all_locals(void);
  760. static inline void rxrpc_get_local(struct rxrpc_local *local)
  761. {
  762. atomic_inc(&local->usage);
  763. }
  764. static inline
  765. struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local)
  766. {
  767. return atomic_inc_not_zero(&local->usage) ? local : NULL;
  768. }
  769. static inline void rxrpc_put_local(struct rxrpc_local *local)
  770. {
  771. if (local && atomic_dec_and_test(&local->usage))
  772. __rxrpc_put_local(local);
  773. }
  774. static inline void rxrpc_queue_local(struct rxrpc_local *local)
  775. {
  776. rxrpc_queue_work(&local->processor);
  777. }
  778. /*
  779. * misc.c
  780. */
  781. extern unsigned int rxrpc_max_backlog __read_mostly;
  782. extern unsigned int rxrpc_requested_ack_delay;
  783. extern unsigned int rxrpc_soft_ack_delay;
  784. extern unsigned int rxrpc_idle_ack_delay;
  785. extern unsigned int rxrpc_rx_window_size;
  786. extern unsigned int rxrpc_rx_mtu;
  787. extern unsigned int rxrpc_rx_jumbo_max;
  788. extern unsigned int rxrpc_resend_timeout;
  789. extern const s8 rxrpc_ack_priority[];
  790. /*
  791. * output.c
  792. */
  793. int rxrpc_send_ack_packet(struct rxrpc_call *, bool);
  794. int rxrpc_send_abort_packet(struct rxrpc_call *);
  795. int rxrpc_send_data_packet(struct rxrpc_call *, struct sk_buff *, bool);
  796. void rxrpc_reject_packets(struct rxrpc_local *);
  797. /*
  798. * peer_event.c
  799. */
  800. void rxrpc_error_report(struct sock *);
  801. void rxrpc_peer_error_distributor(struct work_struct *);
  802. void rxrpc_peer_add_rtt(struct rxrpc_call *, enum rxrpc_rtt_rx_trace,
  803. rxrpc_serial_t, rxrpc_serial_t, ktime_t, ktime_t);
  804. /*
  805. * peer_object.c
  806. */
  807. struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *,
  808. const struct sockaddr_rxrpc *);
  809. struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *,
  810. struct sockaddr_rxrpc *, gfp_t);
  811. struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *, gfp_t);
  812. struct rxrpc_peer *rxrpc_lookup_incoming_peer(struct rxrpc_local *,
  813. struct rxrpc_peer *);
  814. static inline struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *peer)
  815. {
  816. atomic_inc(&peer->usage);
  817. return peer;
  818. }
  819. static inline
  820. struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *peer)
  821. {
  822. return atomic_inc_not_zero(&peer->usage) ? peer : NULL;
  823. }
  824. extern void __rxrpc_put_peer(struct rxrpc_peer *peer);
  825. static inline void rxrpc_put_peer(struct rxrpc_peer *peer)
  826. {
  827. if (peer && atomic_dec_and_test(&peer->usage))
  828. __rxrpc_put_peer(peer);
  829. }
  830. /*
  831. * proc.c
  832. */
  833. extern const struct file_operations rxrpc_call_seq_fops;
  834. extern const struct file_operations rxrpc_connection_seq_fops;
  835. /*
  836. * recvmsg.c
  837. */
  838. void rxrpc_notify_socket(struct rxrpc_call *);
  839. int rxrpc_recvmsg(struct socket *, struct msghdr *, size_t, int);
  840. /*
  841. * rxkad.c
  842. */
  843. #ifdef CONFIG_RXKAD
  844. extern const struct rxrpc_security rxkad;
  845. #endif
  846. /*
  847. * security.c
  848. */
  849. int __init rxrpc_init_security(void);
  850. void rxrpc_exit_security(void);
  851. int rxrpc_init_client_conn_security(struct rxrpc_connection *);
  852. int rxrpc_init_server_conn_security(struct rxrpc_connection *);
  853. /*
  854. * sendmsg.c
  855. */
  856. int rxrpc_do_sendmsg(struct rxrpc_sock *, struct msghdr *, size_t);
  857. /*
  858. * skbuff.c
  859. */
  860. void rxrpc_kernel_data_consumed(struct rxrpc_call *, struct sk_buff *);
  861. void rxrpc_packet_destructor(struct sk_buff *);
  862. void rxrpc_new_skb(struct sk_buff *, enum rxrpc_skb_trace);
  863. void rxrpc_see_skb(struct sk_buff *, enum rxrpc_skb_trace);
  864. void rxrpc_get_skb(struct sk_buff *, enum rxrpc_skb_trace);
  865. void rxrpc_free_skb(struct sk_buff *, enum rxrpc_skb_trace);
  866. void rxrpc_lose_skb(struct sk_buff *, enum rxrpc_skb_trace);
  867. void rxrpc_purge_queue(struct sk_buff_head *);
  868. /*
  869. * sysctl.c
  870. */
  871. #ifdef CONFIG_SYSCTL
  872. extern int __init rxrpc_sysctl_init(void);
  873. extern void rxrpc_sysctl_exit(void);
  874. #else
  875. static inline int __init rxrpc_sysctl_init(void) { return 0; }
  876. static inline void rxrpc_sysctl_exit(void) {}
  877. #endif
  878. /*
  879. * utils.c
  880. */
  881. int rxrpc_extract_addr_from_skb(struct sockaddr_rxrpc *, struct sk_buff *);
  882. static inline bool before(u32 seq1, u32 seq2)
  883. {
  884. return (s32)(seq1 - seq2) < 0;
  885. }
  886. static inline bool before_eq(u32 seq1, u32 seq2)
  887. {
  888. return (s32)(seq1 - seq2) <= 0;
  889. }
  890. static inline bool after(u32 seq1, u32 seq2)
  891. {
  892. return (s32)(seq1 - seq2) > 0;
  893. }
  894. static inline bool after_eq(u32 seq1, u32 seq2)
  895. {
  896. return (s32)(seq1 - seq2) >= 0;
  897. }
  898. /*
  899. * debug tracing
  900. */
  901. extern unsigned int rxrpc_debug;
  902. #define dbgprintk(FMT,...) \
  903. printk("[%-6.6s] "FMT"\n", current->comm ,##__VA_ARGS__)
  904. #define kenter(FMT,...) dbgprintk("==> %s("FMT")",__func__ ,##__VA_ARGS__)
  905. #define kleave(FMT,...) dbgprintk("<== %s()"FMT"",__func__ ,##__VA_ARGS__)
  906. #define kdebug(FMT,...) dbgprintk(" "FMT ,##__VA_ARGS__)
  907. #define kproto(FMT,...) dbgprintk("### "FMT ,##__VA_ARGS__)
  908. #define knet(FMT,...) dbgprintk("@@@ "FMT ,##__VA_ARGS__)
  909. #if defined(__KDEBUG)
  910. #define _enter(FMT,...) kenter(FMT,##__VA_ARGS__)
  911. #define _leave(FMT,...) kleave(FMT,##__VA_ARGS__)
  912. #define _debug(FMT,...) kdebug(FMT,##__VA_ARGS__)
  913. #define _proto(FMT,...) kproto(FMT,##__VA_ARGS__)
  914. #define _net(FMT,...) knet(FMT,##__VA_ARGS__)
  915. #elif defined(CONFIG_AF_RXRPC_DEBUG)
  916. #define RXRPC_DEBUG_KENTER 0x01
  917. #define RXRPC_DEBUG_KLEAVE 0x02
  918. #define RXRPC_DEBUG_KDEBUG 0x04
  919. #define RXRPC_DEBUG_KPROTO 0x08
  920. #define RXRPC_DEBUG_KNET 0x10
  921. #define _enter(FMT,...) \
  922. do { \
  923. if (unlikely(rxrpc_debug & RXRPC_DEBUG_KENTER)) \
  924. kenter(FMT,##__VA_ARGS__); \
  925. } while (0)
  926. #define _leave(FMT,...) \
  927. do { \
  928. if (unlikely(rxrpc_debug & RXRPC_DEBUG_KLEAVE)) \
  929. kleave(FMT,##__VA_ARGS__); \
  930. } while (0)
  931. #define _debug(FMT,...) \
  932. do { \
  933. if (unlikely(rxrpc_debug & RXRPC_DEBUG_KDEBUG)) \
  934. kdebug(FMT,##__VA_ARGS__); \
  935. } while (0)
  936. #define _proto(FMT,...) \
  937. do { \
  938. if (unlikely(rxrpc_debug & RXRPC_DEBUG_KPROTO)) \
  939. kproto(FMT,##__VA_ARGS__); \
  940. } while (0)
  941. #define _net(FMT,...) \
  942. do { \
  943. if (unlikely(rxrpc_debug & RXRPC_DEBUG_KNET)) \
  944. knet(FMT,##__VA_ARGS__); \
  945. } while (0)
  946. #else
  947. #define _enter(FMT,...) no_printk("==> %s("FMT")",__func__ ,##__VA_ARGS__)
  948. #define _leave(FMT,...) no_printk("<== %s()"FMT"",__func__ ,##__VA_ARGS__)
  949. #define _debug(FMT,...) no_printk(" "FMT ,##__VA_ARGS__)
  950. #define _proto(FMT,...) no_printk("### "FMT ,##__VA_ARGS__)
  951. #define _net(FMT,...) no_printk("@@@ "FMT ,##__VA_ARGS__)
  952. #endif
  953. /*
  954. * debug assertion checking
  955. */
  956. #if 1 // defined(__KDEBUGALL)
  957. #define ASSERT(X) \
  958. do { \
  959. if (unlikely(!(X))) { \
  960. pr_err("Assertion failed\n"); \
  961. BUG(); \
  962. } \
  963. } while (0)
  964. #define ASSERTCMP(X, OP, Y) \
  965. do { \
  966. __typeof__(X) _x = (X); \
  967. __typeof__(Y) _y = (__typeof__(X))(Y); \
  968. if (unlikely(!(_x OP _y))) { \
  969. pr_err("Assertion failed - %lu(0x%lx) %s %lu(0x%lx) is false\n", \
  970. (unsigned long)_x, (unsigned long)_x, #OP, \
  971. (unsigned long)_y, (unsigned long)_y); \
  972. BUG(); \
  973. } \
  974. } while (0)
  975. #define ASSERTIF(C, X) \
  976. do { \
  977. if (unlikely((C) && !(X))) { \
  978. pr_err("Assertion failed\n"); \
  979. BUG(); \
  980. } \
  981. } while (0)
  982. #define ASSERTIFCMP(C, X, OP, Y) \
  983. do { \
  984. __typeof__(X) _x = (X); \
  985. __typeof__(Y) _y = (__typeof__(X))(Y); \
  986. if (unlikely((C) && !(_x OP _y))) { \
  987. pr_err("Assertion failed - %lu(0x%lx) %s %lu(0x%lx) is false\n", \
  988. (unsigned long)_x, (unsigned long)_x, #OP, \
  989. (unsigned long)_y, (unsigned long)_y); \
  990. BUG(); \
  991. } \
  992. } while (0)
  993. #else
  994. #define ASSERT(X) \
  995. do { \
  996. } while (0)
  997. #define ASSERTCMP(X, OP, Y) \
  998. do { \
  999. } while (0)
  1000. #define ASSERTIF(C, X) \
  1001. do { \
  1002. } while (0)
  1003. #define ASSERTIFCMP(C, X, OP, Y) \
  1004. do { \
  1005. } while (0)
  1006. #endif /* __KDEBUGALL */