peer_event.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450
  1. /* Peer event handling, typically ICMP messages.
  2. *
  3. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <linux/module.h>
  12. #include <linux/net.h>
  13. #include <linux/skbuff.h>
  14. #include <linux/errqueue.h>
  15. #include <linux/udp.h>
  16. #include <linux/in.h>
  17. #include <linux/in6.h>
  18. #include <linux/icmp.h>
  19. #include <net/sock.h>
  20. #include <net/af_rxrpc.h>
  21. #include <net/ip.h>
  22. #include "ar-internal.h"
  23. static void rxrpc_store_error(struct rxrpc_peer *, struct sock_exterr_skb *);
  24. static void rxrpc_distribute_error(struct rxrpc_peer *, int,
  25. enum rxrpc_call_completion);
  26. /*
  27. * Find the peer associated with an ICMP packet.
  28. */
  29. static struct rxrpc_peer *rxrpc_lookup_peer_icmp_rcu(struct rxrpc_local *local,
  30. const struct sk_buff *skb,
  31. struct sockaddr_rxrpc *srx)
  32. {
  33. struct sock_exterr_skb *serr = SKB_EXT_ERR(skb);
  34. _enter("");
  35. memset(srx, 0, sizeof(*srx));
  36. srx->transport_type = local->srx.transport_type;
  37. srx->transport_len = local->srx.transport_len;
  38. srx->transport.family = local->srx.transport.family;
  39. /* Can we see an ICMP4 packet on an ICMP6 listening socket? and vice
  40. * versa?
  41. */
  42. switch (srx->transport.family) {
  43. case AF_INET:
  44. srx->transport_len = sizeof(srx->transport.sin);
  45. srx->transport.family = AF_INET;
  46. srx->transport.sin.sin_port = serr->port;
  47. switch (serr->ee.ee_origin) {
  48. case SO_EE_ORIGIN_ICMP:
  49. _net("Rx ICMP");
  50. memcpy(&srx->transport.sin.sin_addr,
  51. skb_network_header(skb) + serr->addr_offset,
  52. sizeof(struct in_addr));
  53. break;
  54. case SO_EE_ORIGIN_ICMP6:
  55. _net("Rx ICMP6 on v4 sock");
  56. memcpy(&srx->transport.sin.sin_addr,
  57. skb_network_header(skb) + serr->addr_offset + 12,
  58. sizeof(struct in_addr));
  59. break;
  60. default:
  61. memcpy(&srx->transport.sin.sin_addr, &ip_hdr(skb)->saddr,
  62. sizeof(struct in_addr));
  63. break;
  64. }
  65. break;
  66. #ifdef CONFIG_AF_RXRPC_IPV6
  67. case AF_INET6:
  68. switch (serr->ee.ee_origin) {
  69. case SO_EE_ORIGIN_ICMP6:
  70. _net("Rx ICMP6");
  71. srx->transport.sin6.sin6_port = serr->port;
  72. memcpy(&srx->transport.sin6.sin6_addr,
  73. skb_network_header(skb) + serr->addr_offset,
  74. sizeof(struct in6_addr));
  75. break;
  76. case SO_EE_ORIGIN_ICMP:
  77. _net("Rx ICMP on v6 sock");
  78. srx->transport_len = sizeof(srx->transport.sin);
  79. srx->transport.family = AF_INET;
  80. srx->transport.sin.sin_port = serr->port;
  81. memcpy(&srx->transport.sin.sin_addr,
  82. skb_network_header(skb) + serr->addr_offset,
  83. sizeof(struct in_addr));
  84. break;
  85. default:
  86. memcpy(&srx->transport.sin6.sin6_addr,
  87. &ipv6_hdr(skb)->saddr,
  88. sizeof(struct in6_addr));
  89. break;
  90. }
  91. break;
  92. #endif
  93. default:
  94. BUG();
  95. }
  96. return rxrpc_lookup_peer_rcu(local, srx);
  97. }
  98. /*
  99. * Handle an MTU/fragmentation problem.
  100. */
  101. static void rxrpc_adjust_mtu(struct rxrpc_peer *peer, struct sock_exterr_skb *serr)
  102. {
  103. u32 mtu = serr->ee.ee_info;
  104. _net("Rx ICMP Fragmentation Needed (%d)", mtu);
  105. /* wind down the local interface MTU */
  106. if (mtu > 0 && peer->if_mtu == 65535 && mtu < peer->if_mtu) {
  107. peer->if_mtu = mtu;
  108. _net("I/F MTU %u", mtu);
  109. }
  110. if (mtu == 0) {
  111. /* they didn't give us a size, estimate one */
  112. mtu = peer->if_mtu;
  113. if (mtu > 1500) {
  114. mtu >>= 1;
  115. if (mtu < 1500)
  116. mtu = 1500;
  117. } else {
  118. mtu -= 100;
  119. if (mtu < peer->hdrsize)
  120. mtu = peer->hdrsize + 4;
  121. }
  122. }
  123. if (mtu < peer->mtu) {
  124. spin_lock_bh(&peer->lock);
  125. peer->mtu = mtu;
  126. peer->maxdata = peer->mtu - peer->hdrsize;
  127. spin_unlock_bh(&peer->lock);
  128. _net("Net MTU %u (maxdata %u)",
  129. peer->mtu, peer->maxdata);
  130. }
  131. }
  132. /*
  133. * Handle an error received on the local endpoint.
  134. */
  135. void rxrpc_error_report(struct sock *sk)
  136. {
  137. struct sock_exterr_skb *serr;
  138. struct sockaddr_rxrpc srx;
  139. struct rxrpc_local *local = sk->sk_user_data;
  140. struct rxrpc_peer *peer;
  141. struct sk_buff *skb;
  142. _enter("%p{%d}", sk, local->debug_id);
  143. skb = sock_dequeue_err_skb(sk);
  144. if (!skb) {
  145. _leave("UDP socket errqueue empty");
  146. return;
  147. }
  148. rxrpc_new_skb(skb, rxrpc_skb_rx_received);
  149. serr = SKB_EXT_ERR(skb);
  150. if (!skb->len && serr->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) {
  151. _leave("UDP empty message");
  152. rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
  153. return;
  154. }
  155. rcu_read_lock();
  156. peer = rxrpc_lookup_peer_icmp_rcu(local, skb, &srx);
  157. if (peer && !rxrpc_get_peer_maybe(peer))
  158. peer = NULL;
  159. if (!peer) {
  160. rcu_read_unlock();
  161. rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
  162. _leave(" [no peer]");
  163. return;
  164. }
  165. trace_rxrpc_rx_icmp(peer, &serr->ee, &srx);
  166. if ((serr->ee.ee_origin == SO_EE_ORIGIN_ICMP &&
  167. serr->ee.ee_type == ICMP_DEST_UNREACH &&
  168. serr->ee.ee_code == ICMP_FRAG_NEEDED)) {
  169. rxrpc_adjust_mtu(peer, serr);
  170. rcu_read_unlock();
  171. rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
  172. rxrpc_put_peer(peer);
  173. _leave(" [MTU update]");
  174. return;
  175. }
  176. rxrpc_store_error(peer, serr);
  177. rcu_read_unlock();
  178. rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
  179. rxrpc_put_peer(peer);
  180. _leave("");
  181. }
  182. /*
  183. * Map an error report to error codes on the peer record.
  184. */
  185. static void rxrpc_store_error(struct rxrpc_peer *peer,
  186. struct sock_exterr_skb *serr)
  187. {
  188. enum rxrpc_call_completion compl = RXRPC_CALL_NETWORK_ERROR;
  189. struct sock_extended_err *ee;
  190. int err;
  191. _enter("");
  192. ee = &serr->ee;
  193. err = ee->ee_errno;
  194. switch (ee->ee_origin) {
  195. case SO_EE_ORIGIN_ICMP:
  196. switch (ee->ee_type) {
  197. case ICMP_DEST_UNREACH:
  198. switch (ee->ee_code) {
  199. case ICMP_NET_UNREACH:
  200. _net("Rx Received ICMP Network Unreachable");
  201. break;
  202. case ICMP_HOST_UNREACH:
  203. _net("Rx Received ICMP Host Unreachable");
  204. break;
  205. case ICMP_PORT_UNREACH:
  206. _net("Rx Received ICMP Port Unreachable");
  207. break;
  208. case ICMP_NET_UNKNOWN:
  209. _net("Rx Received ICMP Unknown Network");
  210. break;
  211. case ICMP_HOST_UNKNOWN:
  212. _net("Rx Received ICMP Unknown Host");
  213. break;
  214. default:
  215. _net("Rx Received ICMP DestUnreach code=%u",
  216. ee->ee_code);
  217. break;
  218. }
  219. break;
  220. case ICMP_TIME_EXCEEDED:
  221. _net("Rx Received ICMP TTL Exceeded");
  222. break;
  223. default:
  224. _proto("Rx Received ICMP error { type=%u code=%u }",
  225. ee->ee_type, ee->ee_code);
  226. break;
  227. }
  228. break;
  229. case SO_EE_ORIGIN_NONE:
  230. case SO_EE_ORIGIN_LOCAL:
  231. _proto("Rx Received local error { error=%d }", err);
  232. compl = RXRPC_CALL_LOCAL_ERROR;
  233. break;
  234. case SO_EE_ORIGIN_ICMP6:
  235. default:
  236. _proto("Rx Received error report { orig=%u }", ee->ee_origin);
  237. break;
  238. }
  239. rxrpc_distribute_error(peer, err, compl);
  240. }
  241. /*
  242. * Distribute an error that occurred on a peer.
  243. */
  244. static void rxrpc_distribute_error(struct rxrpc_peer *peer, int error,
  245. enum rxrpc_call_completion compl)
  246. {
  247. struct rxrpc_call *call;
  248. hlist_for_each_entry_rcu(call, &peer->error_targets, error_link) {
  249. rxrpc_see_call(call);
  250. if (call->state < RXRPC_CALL_COMPLETE &&
  251. rxrpc_set_call_completion(call, compl, 0, -error))
  252. rxrpc_notify_socket(call);
  253. }
  254. }
  255. /*
  256. * Add RTT information to cache. This is called in softirq mode and has
  257. * exclusive access to the peer RTT data.
  258. */
  259. void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
  260. rxrpc_serial_t send_serial, rxrpc_serial_t resp_serial,
  261. ktime_t send_time, ktime_t resp_time)
  262. {
  263. struct rxrpc_peer *peer = call->peer;
  264. s64 rtt;
  265. u64 sum = peer->rtt_sum, avg;
  266. u8 cursor = peer->rtt_cursor, usage = peer->rtt_usage;
  267. rtt = ktime_to_ns(ktime_sub(resp_time, send_time));
  268. if (rtt < 0)
  269. return;
  270. spin_lock(&peer->rtt_input_lock);
  271. /* Replace the oldest datum in the RTT buffer */
  272. sum -= peer->rtt_cache[cursor];
  273. sum += rtt;
  274. peer->rtt_cache[cursor] = rtt;
  275. peer->rtt_cursor = (cursor + 1) & (RXRPC_RTT_CACHE_SIZE - 1);
  276. peer->rtt_sum = sum;
  277. if (usage < RXRPC_RTT_CACHE_SIZE) {
  278. usage++;
  279. peer->rtt_usage = usage;
  280. }
  281. spin_unlock(&peer->rtt_input_lock);
  282. /* Now recalculate the average */
  283. if (usage == RXRPC_RTT_CACHE_SIZE) {
  284. avg = sum / RXRPC_RTT_CACHE_SIZE;
  285. } else {
  286. avg = sum;
  287. do_div(avg, usage);
  288. }
  289. /* Don't need to update this under lock */
  290. peer->rtt = avg;
  291. trace_rxrpc_rtt_rx(call, why, send_serial, resp_serial, rtt,
  292. usage, avg);
  293. }
  294. /*
  295. * Perform keep-alive pings.
  296. */
  297. static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
  298. struct list_head *collector,
  299. time64_t base,
  300. u8 cursor)
  301. {
  302. struct rxrpc_peer *peer;
  303. const u8 mask = ARRAY_SIZE(rxnet->peer_keepalive) - 1;
  304. time64_t keepalive_at;
  305. int slot;
  306. spin_lock_bh(&rxnet->peer_hash_lock);
  307. while (!list_empty(collector)) {
  308. peer = list_entry(collector->next,
  309. struct rxrpc_peer, keepalive_link);
  310. list_del_init(&peer->keepalive_link);
  311. if (!rxrpc_get_peer_maybe(peer))
  312. continue;
  313. spin_unlock_bh(&rxnet->peer_hash_lock);
  314. keepalive_at = peer->last_tx_at + RXRPC_KEEPALIVE_TIME;
  315. slot = keepalive_at - base;
  316. _debug("%02x peer %u t=%d {%pISp}",
  317. cursor, peer->debug_id, slot, &peer->srx.transport);
  318. if (keepalive_at <= base ||
  319. keepalive_at > base + RXRPC_KEEPALIVE_TIME) {
  320. rxrpc_send_keepalive(peer);
  321. slot = RXRPC_KEEPALIVE_TIME;
  322. }
  323. /* A transmission to this peer occurred since last we examined
  324. * it so put it into the appropriate future bucket.
  325. */
  326. slot += cursor;
  327. slot &= mask;
  328. spin_lock_bh(&rxnet->peer_hash_lock);
  329. list_add_tail(&peer->keepalive_link,
  330. &rxnet->peer_keepalive[slot & mask]);
  331. rxrpc_put_peer(peer);
  332. }
  333. spin_unlock_bh(&rxnet->peer_hash_lock);
  334. }
  335. /*
  336. * Perform keep-alive pings with VERSION packets to keep any NAT alive.
  337. */
  338. void rxrpc_peer_keepalive_worker(struct work_struct *work)
  339. {
  340. struct rxrpc_net *rxnet =
  341. container_of(work, struct rxrpc_net, peer_keepalive_work);
  342. const u8 mask = ARRAY_SIZE(rxnet->peer_keepalive) - 1;
  343. time64_t base, now, delay;
  344. u8 cursor, stop;
  345. LIST_HEAD(collector);
  346. now = ktime_get_seconds();
  347. base = rxnet->peer_keepalive_base;
  348. cursor = rxnet->peer_keepalive_cursor;
  349. _enter("%lld,%u", base - now, cursor);
  350. if (!rxnet->live)
  351. return;
  352. /* Remove to a temporary list all the peers that are currently lodged
  353. * in expired buckets plus all new peers.
  354. *
  355. * Everything in the bucket at the cursor is processed this
  356. * second; the bucket at cursor + 1 goes at now + 1s and so
  357. * on...
  358. */
  359. spin_lock_bh(&rxnet->peer_hash_lock);
  360. list_splice_init(&rxnet->peer_keepalive_new, &collector);
  361. stop = cursor + ARRAY_SIZE(rxnet->peer_keepalive);
  362. while (base <= now && (s8)(cursor - stop) < 0) {
  363. list_splice_tail_init(&rxnet->peer_keepalive[cursor & mask],
  364. &collector);
  365. base++;
  366. cursor++;
  367. }
  368. base = now;
  369. spin_unlock_bh(&rxnet->peer_hash_lock);
  370. rxnet->peer_keepalive_base = base;
  371. rxnet->peer_keepalive_cursor = cursor;
  372. rxrpc_peer_keepalive_dispatch(rxnet, &collector, base, cursor);
  373. ASSERT(list_empty(&collector));
  374. /* Schedule the timer for the next occupied timeslot. */
  375. cursor = rxnet->peer_keepalive_cursor;
  376. stop = cursor + RXRPC_KEEPALIVE_TIME - 1;
  377. for (; (s8)(cursor - stop) < 0; cursor++) {
  378. if (!list_empty(&rxnet->peer_keepalive[cursor & mask]))
  379. break;
  380. base++;
  381. }
  382. now = ktime_get_seconds();
  383. delay = base - now;
  384. if (delay < 1)
  385. delay = 1;
  386. delay *= HZ;
  387. if (rxnet->live)
  388. timer_reduce(&rxnet->peer_keepalive_timer, jiffies + delay);
  389. _leave("");
  390. }