|
|
@@ -83,6 +83,7 @@ static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet,
|
|
|
local = kzalloc(sizeof(struct rxrpc_local), GFP_KERNEL);
|
|
|
if (local) {
|
|
|
atomic_set(&local->usage, 1);
|
|
|
+ atomic_set(&local->active_users, 1);
|
|
|
local->rxnet = rxnet;
|
|
|
INIT_LIST_HEAD(&local->link);
|
|
|
INIT_WORK(&local->processor, rxrpc_local_processor);
|
|
|
@@ -270,11 +271,8 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *net,
|
|
|
* bind the transport socket may still fail if we're attempting
|
|
|
* to use a local address that the dying object is still using.
|
|
|
*/
|
|
|
- if (!rxrpc_get_local_maybe(local)) {
|
|
|
- cursor = cursor->next;
|
|
|
- list_del_init(&local->link);
|
|
|
+ if (!rxrpc_use_local(local))
|
|
|
break;
|
|
|
- }
|
|
|
|
|
|
age = "old";
|
|
|
goto found;
|
|
|
@@ -288,7 +286,10 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *net,
|
|
|
if (ret < 0)
|
|
|
goto sock_error;
|
|
|
|
|
|
- list_add_tail(&local->link, cursor);
|
|
|
+ if (cursor != &rxnet->local_endpoints)
|
|
|
+ list_replace(cursor, &local->link);
|
|
|
+ else
|
|
|
+ list_add_tail(&local->link, cursor);
|
|
|
age = "new";
|
|
|
|
|
|
found:
|
|
|
@@ -346,7 +347,8 @@ struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local)
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * Queue a local endpoint.
|
|
|
+ * Queue a local endpoint unless it has become unreferenced and pass the
|
|
|
+ * caller's reference to the work item.
|
|
|
*/
|
|
|
void rxrpc_queue_local(struct rxrpc_local *local)
|
|
|
{
|
|
|
@@ -355,15 +357,8 @@ void rxrpc_queue_local(struct rxrpc_local *local)
|
|
|
if (rxrpc_queue_work(&local->processor))
|
|
|
trace_rxrpc_local(local, rxrpc_local_queued,
|
|
|
atomic_read(&local->usage), here);
|
|
|
-}
|
|
|
-
|
|
|
-/*
|
|
|
- * A local endpoint reached its end of life.
|
|
|
- */
|
|
|
-static void __rxrpc_put_local(struct rxrpc_local *local)
|
|
|
-{
|
|
|
- _enter("%d", local->debug_id);
|
|
|
- rxrpc_queue_work(&local->processor);
|
|
|
+ else
|
|
|
+ rxrpc_put_local(local);
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
@@ -379,10 +374,45 @@ void rxrpc_put_local(struct rxrpc_local *local)
|
|
|
trace_rxrpc_local(local, rxrpc_local_put, n, here);
|
|
|
|
|
|
if (n == 0)
|
|
|
- __rxrpc_put_local(local);
|
|
|
+ call_rcu(&local->rcu, rxrpc_local_rcu);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Start using a local endpoint.
|
|
|
+ */
|
|
|
+struct rxrpc_local *rxrpc_use_local(struct rxrpc_local *local)
|
|
|
+{
|
|
|
+ unsigned int au;
|
|
|
+
|
|
|
+ local = rxrpc_get_local_maybe(local);
|
|
|
+ if (!local)
|
|
|
+ return NULL;
|
|
|
+
|
|
|
+ au = atomic_fetch_add_unless(&local->active_users, 1, 0);
|
|
|
+ if (au == 0) {
|
|
|
+ rxrpc_put_local(local);
|
|
|
+ return NULL;
|
|
|
+ }
|
|
|
+
|
|
|
+ return local;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Cease using a local endpoint. Once the number of active users reaches 0, we
|
|
|
+ * start the closure of the transport in the work processor.
|
|
|
+ */
|
|
|
+void rxrpc_unuse_local(struct rxrpc_local *local)
|
|
|
+{
|
|
|
+ unsigned int au;
|
|
|
+
|
|
|
+ au = atomic_dec_return(&local->active_users);
|
|
|
+ if (au == 0)
|
|
|
+ rxrpc_queue_local(local);
|
|
|
+ else
|
|
|
+ rxrpc_put_local(local);
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Destroy a local endpoint's socket and then hand the record to RCU to dispose
|
|
|
* of.
|
|
|
@@ -397,16 +427,6 @@ static void rxrpc_local_destroyer(struct rxrpc_local *local)
|
|
|
|
|
|
_enter("%d", local->debug_id);
|
|
|
|
|
|
- /* We can get a race between an incoming call packet queueing the
|
|
|
- * processor again and the work processor starting the destruction
|
|
|
- * process which will shut down the UDP socket.
|
|
|
- */
|
|
|
- if (local->dead) {
|
|
|
- _leave(" [already dead]");
|
|
|
- return;
|
|
|
- }
|
|
|
- local->dead = true;
|
|
|
-
|
|
|
mutex_lock(&rxnet->local_mutex);
|
|
|
list_del_init(&local->link);
|
|
|
mutex_unlock(&rxnet->local_mutex);
|
|
|
@@ -426,13 +446,11 @@ static void rxrpc_local_destroyer(struct rxrpc_local *local)
|
|
|
*/
|
|
|
rxrpc_purge_queue(&local->reject_queue);
|
|
|
rxrpc_purge_queue(&local->event_queue);
|
|
|
-
|
|
|
- _debug("rcu local %d", local->debug_id);
|
|
|
- call_rcu(&local->rcu, rxrpc_local_rcu);
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * Process events on an endpoint
|
|
|
+ * Process events on an endpoint. The work item carries a ref which
|
|
|
+ * we must release.
|
|
|
*/
|
|
|
static void rxrpc_local_processor(struct work_struct *work)
|
|
|
{
|
|
|
@@ -445,8 +463,10 @@ static void rxrpc_local_processor(struct work_struct *work)
|
|
|
|
|
|
do {
|
|
|
again = false;
|
|
|
- if (atomic_read(&local->usage) == 0)
|
|
|
- return rxrpc_local_destroyer(local);
|
|
|
+ if (atomic_read(&local->active_users) == 0) {
|
|
|
+ rxrpc_local_destroyer(local);
|
|
|
+ break;
|
|
|
+ }
|
|
|
|
|
|
if (!skb_queue_empty(&local->reject_queue)) {
|
|
|
rxrpc_reject_packets(local);
|
|
|
@@ -458,6 +478,8 @@ static void rxrpc_local_processor(struct work_struct *work)
|
|
|
again = true;
|
|
|
}
|
|
|
} while (again);
|
|
|
+
|
|
|
+ rxrpc_put_local(local);
|
|
|
}
|
|
|
|
|
|
/*
|