|
@@ -1383,7 +1383,7 @@ struct uncached_list {
|
|
|
|
|
|
static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt_uncached_list);
|
|
|
|
|
|
-static void rt_add_uncached_list(struct rtable *rt)
|
|
|
+void rt_add_uncached_list(struct rtable *rt)
|
|
|
{
|
|
|
struct uncached_list *ul = raw_cpu_ptr(&rt_uncached_list);
|
|
|
|
|
@@ -1394,14 +1394,8 @@ static void rt_add_uncached_list(struct rtable *rt)
|
|
|
spin_unlock_bh(&ul->lock);
|
|
|
}
|
|
|
|
|
|
-static void ipv4_dst_destroy(struct dst_entry *dst)
|
|
|
+void rt_del_uncached_list(struct rtable *rt)
|
|
|
{
|
|
|
- struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst);
|
|
|
- struct rtable *rt = (struct rtable *) dst;
|
|
|
-
|
|
|
- if (p != &dst_default_metrics && refcount_dec_and_test(&p->refcnt))
|
|
|
- kfree(p);
|
|
|
-
|
|
|
if (!list_empty(&rt->rt_uncached)) {
|
|
|
struct uncached_list *ul = rt->rt_uncached_list;
|
|
|
|
|
@@ -1411,6 +1405,17 @@ static void ipv4_dst_destroy(struct dst_entry *dst)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+static void ipv4_dst_destroy(struct dst_entry *dst)
|
|
|
+{
|
|
|
+ struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst);
|
|
|
+ struct rtable *rt = (struct rtable *)dst;
|
|
|
+
|
|
|
+ if (p != &dst_default_metrics && refcount_dec_and_test(&p->refcnt))
|
|
|
+ kfree(p);
|
|
|
+
|
|
|
+ rt_del_uncached_list(rt);
|
|
|
+}
|
|
|
+
|
|
|
void rt_flush_dev(struct net_device *dev)
|
|
|
{
|
|
|
struct net *net = dev_net(dev);
|