|
@@ -161,6 +161,7 @@ static DEFINE_SPINLOCK(xfrm_state_gc_lock);
|
|
|
int __xfrm_state_delete(struct xfrm_state *x);
|
|
|
|
|
|
int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
|
|
|
+bool km_is_alive(const struct km_event *c);
|
|
|
void km_state_expired(struct xfrm_state *x, int hard, u32 portid);
|
|
|
|
|
|
static DEFINE_SPINLOCK(xfrm_type_lock);
|
|
@@ -788,6 +789,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
|
|
|
struct xfrm_state *best = NULL;
|
|
|
u32 mark = pol->mark.v & pol->mark.m;
|
|
|
unsigned short encap_family = tmpl->encap_family;
|
|
|
+ struct km_event c;
|
|
|
|
|
|
to_put = NULL;
|
|
|
|
|
@@ -832,6 +834,17 @@ found:
|
|
|
error = -EEXIST;
|
|
|
goto out;
|
|
|
}
|
|
|
+
|
|
|
+ c.net = net;
|
|
|
+ /* If the KMs have no listeners (yet...), avoid allocating an SA
|
|
|
+ * for each and every packet - garbage collection might not
|
|
|
+ * handle the flood.
|
|
|
+ */
|
|
|
+ if (!km_is_alive(&c)) {
|
|
|
+ error = -ESRCH;
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+
|
|
|
x = xfrm_state_alloc(net);
|
|
|
if (x == NULL) {
|
|
|
error = -ENOMEM;
|
|
@@ -1793,6 +1806,24 @@ int km_report(struct net *net, u8 proto, struct xfrm_selector *sel, xfrm_address
|
|
|
}
|
|
|
EXPORT_SYMBOL(km_report);
|
|
|
|
|
|
+bool km_is_alive(const struct km_event *c)
|
|
|
+{
|
|
|
+ struct xfrm_mgr *km;
|
|
|
+ bool is_alive = false;
|
|
|
+
|
|
|
+ rcu_read_lock();
|
|
|
+ list_for_each_entry_rcu(km, &xfrm_km_list, list) {
|
|
|
+ if (km->is_alive && km->is_alive(c)) {
|
|
|
+ is_alive = true;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ rcu_read_unlock();
|
|
|
+
|
|
|
+ return is_alive;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(km_is_alive);
|
|
|
+
|
|
|
int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
|
|
|
{
|
|
|
int err;
|