|
@@ -73,6 +73,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
int ret, err = 0;
|
|
int ret, err = 0;
|
|
unsigned long t;
|
|
unsigned long t;
|
|
|
|
+ struct page *page;
|
|
|
|
|
|
spin_lock_irqsave(&newchannel->lock, flags);
|
|
spin_lock_irqsave(&newchannel->lock, flags);
|
|
if (newchannel->state == CHANNEL_OPEN_STATE) {
|
|
if (newchannel->state == CHANNEL_OPEN_STATE) {
|
|
@@ -87,8 +88,17 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
|
|
newchannel->channel_callback_context = context;
|
|
newchannel->channel_callback_context = context;
|
|
|
|
|
|
/* Allocate the ring buffer */
|
|
/* Allocate the ring buffer */
|
|
- out = (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
|
|
|
|
- get_order(send_ringbuffer_size + recv_ringbuffer_size));
|
|
|
|
|
|
+ page = alloc_pages_node(cpu_to_node(newchannel->target_cpu),
|
|
|
|
+ GFP_KERNEL|__GFP_ZERO,
|
|
|
|
+ get_order(send_ringbuffer_size +
|
|
|
|
+ recv_ringbuffer_size));
|
|
|
|
+
|
|
|
|
+ if (!page)
|
|
|
|
+ out = (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
|
|
|
|
+ get_order(send_ringbuffer_size +
|
|
|
|
+ recv_ringbuffer_size));
|
|
|
|
+ else
|
|
|
|
+ out = (void *)page_address(page);
|
|
|
|
|
|
if (!out) {
|
|
if (!out) {
|
|
err = -ENOMEM;
|
|
err = -ENOMEM;
|