|
@@ -1190,6 +1190,16 @@ out:
|
|
return NULL;
|
|
return NULL;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static void tcp_v6_restore_cb(struct sk_buff *skb)
|
|
|
|
+{
|
|
|
|
+ /* We need to move header back to the beginning if xfrm6_policy_check()
|
|
|
|
+ * and tcp_v6_fill_cb() are going to be called again.
|
|
|
|
+ * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
|
|
|
|
+ */
|
|
|
|
+ memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
|
|
|
|
+ sizeof(struct inet6_skb_parm));
|
|
|
|
+}
|
|
|
|
+
|
|
/* The socket must have it's spinlock held when we get
|
|
/* The socket must have it's spinlock held when we get
|
|
* here, unless it is a TCP_LISTEN socket.
|
|
* here, unless it is a TCP_LISTEN socket.
|
|
*
|
|
*
|
|
@@ -1319,6 +1329,7 @@ ipv6_pktoptions:
|
|
np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
|
|
np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
|
|
if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
|
|
if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
|
|
skb_set_owner_r(opt_skb, sk);
|
|
skb_set_owner_r(opt_skb, sk);
|
|
|
|
+ tcp_v6_restore_cb(opt_skb);
|
|
opt_skb = xchg(&np->pktoptions, opt_skb);
|
|
opt_skb = xchg(&np->pktoptions, opt_skb);
|
|
} else {
|
|
} else {
|
|
__kfree_skb(opt_skb);
|
|
__kfree_skb(opt_skb);
|
|
@@ -1352,15 +1363,6 @@ static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
|
|
TCP_SKB_CB(skb)->sacked = 0;
|
|
TCP_SKB_CB(skb)->sacked = 0;
|
|
}
|
|
}
|
|
|
|
|
|
-static void tcp_v6_restore_cb(struct sk_buff *skb)
|
|
|
|
-{
|
|
|
|
- /* We need to move header back to the beginning if xfrm6_policy_check()
|
|
|
|
- * and tcp_v6_fill_cb() are going to be called again.
|
|
|
|
- */
|
|
|
|
- memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
|
|
|
|
- sizeof(struct inet6_skb_parm));
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
static int tcp_v6_rcv(struct sk_buff *skb)
|
|
static int tcp_v6_rcv(struct sk_buff *skb)
|
|
{
|
|
{
|
|
const struct tcphdr *th;
|
|
const struct tcphdr *th;
|