|
@@ -478,7 +478,7 @@ static void reset_channel_cb(void *arg)
|
|
channel->onchannel_callback = NULL;
|
|
channel->onchannel_callback = NULL;
|
|
}
|
|
}
|
|
|
|
|
|
-static void vmbus_close_internal(struct vmbus_channel *channel)
|
|
|
|
|
|
+static int vmbus_close_internal(struct vmbus_channel *channel)
|
|
{
|
|
{
|
|
struct vmbus_channel_close_channel *msg;
|
|
struct vmbus_channel_close_channel *msg;
|
|
int ret;
|
|
int ret;
|
|
@@ -501,11 +501,28 @@ static void vmbus_close_internal(struct vmbus_channel *channel)
|
|
|
|
|
|
ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_close_channel));
|
|
ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_close_channel));
|
|
|
|
|
|
- BUG_ON(ret != 0);
|
|
|
|
|
|
+ if (ret) {
|
|
|
|
+ pr_err("Close failed: close post msg return is %d\n", ret);
|
|
|
|
+ /*
|
|
|
|
+ * If we failed to post the close msg,
|
|
|
|
+ * it is perhaps better to leak memory.
|
|
|
|
+ */
|
|
|
|
+ return ret;
|
|
|
|
+ }
|
|
|
|
+
|
|
/* Tear down the gpadl for the channel's ring buffer */
|
|
/* Tear down the gpadl for the channel's ring buffer */
|
|
- if (channel->ringbuffer_gpadlhandle)
|
|
|
|
- vmbus_teardown_gpadl(channel,
|
|
|
|
- channel->ringbuffer_gpadlhandle);
|
|
|
|
|
|
+ if (channel->ringbuffer_gpadlhandle) {
|
|
|
|
+ ret = vmbus_teardown_gpadl(channel,
|
|
|
|
+ channel->ringbuffer_gpadlhandle);
|
|
|
|
+ if (ret) {
|
|
|
|
+ pr_err("Close failed: teardown gpadl return %d\n", ret);
|
|
|
|
+ /*
|
|
|
|
+ * If we failed to teardown gpadl,
|
|
|
|
+ * it is perhaps better to leak memory.
|
|
|
|
+ */
|
|
|
|
+ return ret;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
|
|
/* Cleanup the ring buffers for this channel */
|
|
/* Cleanup the ring buffers for this channel */
|
|
hv_ringbuffer_cleanup(&channel->outbound);
|
|
hv_ringbuffer_cleanup(&channel->outbound);
|
|
@@ -514,7 +531,7 @@ static void vmbus_close_internal(struct vmbus_channel *channel)
|
|
free_pages((unsigned long)channel->ringbuffer_pages,
|
|
free_pages((unsigned long)channel->ringbuffer_pages,
|
|
get_order(channel->ringbuffer_pagecount * PAGE_SIZE));
|
|
get_order(channel->ringbuffer_pagecount * PAGE_SIZE));
|
|
|
|
|
|
-
|
|
|
|
|
|
+ return ret;
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|