|
@@ -428,7 +428,6 @@ void vmbus_free_channels(void)
|
|
|
{
|
|
|
struct vmbus_channel *channel, *tmp;
|
|
|
|
|
|
- mutex_lock(&vmbus_connection.channel_mutex);
|
|
|
list_for_each_entry_safe(channel, tmp, &vmbus_connection.chn_list,
|
|
|
listentry) {
|
|
|
/* hv_process_channel_removal() needs this */
|
|
@@ -436,7 +435,6 @@ void vmbus_free_channels(void)
|
|
|
|
|
|
vmbus_device_unregister(channel->device_obj);
|
|
|
}
|
|
|
- mutex_unlock(&vmbus_connection.channel_mutex);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -483,8 +481,10 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
|
|
|
list_add_tail(&newchannel->sc_list, &channel->sc_list);
|
|
|
channel->num_sc++;
|
|
|
spin_unlock_irqrestore(&channel->lock, flags);
|
|
|
- } else
|
|
|
+ } else {
|
|
|
+ atomic_dec(&vmbus_connection.offer_in_progress);
|
|
|
goto err_free_chan;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
dev_type = hv_get_dev_type(newchannel);
|
|
@@ -511,6 +511,7 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
|
|
|
if (!fnew) {
|
|
|
if (channel->sc_creation_callback != NULL)
|
|
|
channel->sc_creation_callback(newchannel);
|
|
|
+ atomic_dec(&vmbus_connection.offer_in_progress);
|
|
|
return;
|
|
|
}
|
|
|
|
|
@@ -532,9 +533,7 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
|
|
|
* binding which eventually invokes the device driver's AddDevice()
|
|
|
* method.
|
|
|
*/
|
|
|
- mutex_lock(&vmbus_connection.channel_mutex);
|
|
|
ret = vmbus_device_register(newchannel->device_obj);
|
|
|
- mutex_unlock(&vmbus_connection.channel_mutex);
|
|
|
|
|
|
if (ret != 0) {
|
|
|
pr_err("unable to add child device object (relid %d)\n",
|
|
@@ -542,6 +541,8 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
|
|
|
kfree(newchannel->device_obj);
|
|
|
goto err_deq_chan;
|
|
|
}
|
|
|
+
|
|
|
+ atomic_dec(&vmbus_connection.offer_in_progress);
|
|
|
return;
|
|
|
|
|
|
err_deq_chan:
|
|
@@ -797,6 +798,7 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
|
|
|
newchannel = alloc_channel();
|
|
|
if (!newchannel) {
|
|
|
vmbus_release_relid(offer->child_relid);
|
|
|
+ atomic_dec(&vmbus_connection.offer_in_progress);
|
|
|
pr_err("Unable to allocate channel object\n");
|
|
|
return;
|
|
|
}
|
|
@@ -843,16 +845,38 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
|
|
|
|
|
|
rescind = (struct vmbus_channel_rescind_offer *)hdr;
|
|
|
|
|
|
+ /*
|
|
|
+ * The offer msg and the corresponding rescind msg
|
|
|
+ * from the host are guranteed to be ordered -
|
|
|
+ * offer comes in first and then the rescind.
|
|
|
+ * Since we process these events in work elements,
|
|
|
+ * and with preemption, we may end up processing
|
|
|
+ * the events out of order. Given that we handle these
|
|
|
+ * work elements on the same CPU, this is possible only
|
|
|
+ * in the case of preemption. In any case wait here
|
|
|
+ * until the offer processing has moved beyond the
|
|
|
+ * point where the channel is discoverable.
|
|
|
+ */
|
|
|
+
|
|
|
+ while (atomic_read(&vmbus_connection.offer_in_progress) != 0) {
|
|
|
+ /*
|
|
|
+ * We wait here until any channel offer is currently
|
|
|
+ * being processed.
|
|
|
+ */
|
|
|
+ msleep(1);
|
|
|
+ }
|
|
|
+
|
|
|
mutex_lock(&vmbus_connection.channel_mutex);
|
|
|
channel = relid2channel(rescind->child_relid);
|
|
|
+ mutex_unlock(&vmbus_connection.channel_mutex);
|
|
|
|
|
|
if (channel == NULL) {
|
|
|
/*
|
|
|
- * This is very impossible, because in
|
|
|
- * vmbus_process_offer(), we have already invoked
|
|
|
- * vmbus_release_relid() on error.
|
|
|
+ * We failed in processing the offer message;
|
|
|
+ * we would have cleaned up the relid in that
|
|
|
+ * failure path.
|
|
|
*/
|
|
|
- goto out;
|
|
|
+ return;
|
|
|
}
|
|
|
|
|
|
spin_lock_irqsave(&channel->lock, flags);
|
|
@@ -864,7 +888,7 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
|
|
|
if (channel->device_obj) {
|
|
|
if (channel->chn_rescind_callback) {
|
|
|
channel->chn_rescind_callback(channel);
|
|
|
- goto out;
|
|
|
+ return;
|
|
|
}
|
|
|
/*
|
|
|
* We will have to unregister this device from the
|
|
@@ -875,13 +899,26 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
|
|
|
vmbus_device_unregister(channel->device_obj);
|
|
|
put_device(dev);
|
|
|
}
|
|
|
- } else {
|
|
|
- hv_process_channel_removal(channel,
|
|
|
- channel->offermsg.child_relid);
|
|
|
}
|
|
|
-
|
|
|
-out:
|
|
|
- mutex_unlock(&vmbus_connection.channel_mutex);
|
|
|
+ if (channel->primary_channel != NULL) {
|
|
|
+ /*
|
|
|
+ * Sub-channel is being rescinded. Following is the channel
|
|
|
+ * close sequence when initiated from the driveri (refer to
|
|
|
+ * vmbus_close() for details):
|
|
|
+ * 1. Close all sub-channels first
|
|
|
+ * 2. Then close the primary channel.
|
|
|
+ */
|
|
|
+ if (channel->state == CHANNEL_OPEN_STATE) {
|
|
|
+ /*
|
|
|
+ * The channel is currently not open;
|
|
|
+ * it is safe for us to cleanup the channel.
|
|
|
+ */
|
|
|
+ mutex_lock(&vmbus_connection.channel_mutex);
|
|
|
+ hv_process_channel_removal(channel,
|
|
|
+ channel->offermsg.child_relid);
|
|
|
+ mutex_unlock(&vmbus_connection.channel_mutex);
|
|
|
+ }
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
void vmbus_hvsock_device_unregister(struct vmbus_channel *channel)
|