|
@@ -796,13 +796,31 @@ static unsigned long addr_to_vb_idx(unsigned long addr)
|
|
return addr;
|
|
return addr;
|
|
}
|
|
}
|
|
|
|
|
|
-static struct vmap_block *new_vmap_block(gfp_t gfp_mask)
|
|
|
|
|
|
+static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off)
|
|
|
|
+{
|
|
|
|
+ unsigned long addr;
|
|
|
|
+
|
|
|
|
+ addr = va_start + (pages_off << PAGE_SHIFT);
|
|
|
|
+ BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start));
|
|
|
|
+ return (void *)addr;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/**
|
|
|
|
+ * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this
|
|
|
|
+ * block. Of course pages number can't exceed VMAP_BBMAP_BITS
|
|
|
|
+ * @order: how many 2^order pages should be occupied in newly allocated block
|
|
|
|
+ * @gfp_mask: flags for the page level allocator
|
|
|
|
+ *
|
|
|
|
+ * Returns: virtual address in a newly allocated block or ERR_PTR(-errno)
|
|
|
|
+ */
|
|
|
|
+static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
|
|
{
|
|
{
|
|
struct vmap_block_queue *vbq;
|
|
struct vmap_block_queue *vbq;
|
|
struct vmap_block *vb;
|
|
struct vmap_block *vb;
|
|
struct vmap_area *va;
|
|
struct vmap_area *va;
|
|
unsigned long vb_idx;
|
|
unsigned long vb_idx;
|
|
int node, err;
|
|
int node, err;
|
|
|
|
+ void *vaddr;
|
|
|
|
|
|
node = numa_node_id();
|
|
node = numa_node_id();
|
|
|
|
|
|
@@ -826,9 +844,12 @@ static struct vmap_block *new_vmap_block(gfp_t gfp_mask)
|
|
return ERR_PTR(err);
|
|
return ERR_PTR(err);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ vaddr = vmap_block_vaddr(va->va_start, 0);
|
|
spin_lock_init(&vb->lock);
|
|
spin_lock_init(&vb->lock);
|
|
vb->va = va;
|
|
vb->va = va;
|
|
- vb->free = VMAP_BBMAP_BITS;
|
|
|
|
|
|
+ /* At least something should be left free */
|
|
|
|
+ BUG_ON(VMAP_BBMAP_BITS <= (1UL << order));
|
|
|
|
+ vb->free = VMAP_BBMAP_BITS - (1UL << order);
|
|
vb->dirty = 0;
|
|
vb->dirty = 0;
|
|
bitmap_zero(vb->dirty_map, VMAP_BBMAP_BITS);
|
|
bitmap_zero(vb->dirty_map, VMAP_BBMAP_BITS);
|
|
INIT_LIST_HEAD(&vb->free_list);
|
|
INIT_LIST_HEAD(&vb->free_list);
|
|
@@ -846,7 +867,7 @@ static struct vmap_block *new_vmap_block(gfp_t gfp_mask)
|
|
spin_unlock(&vbq->lock);
|
|
spin_unlock(&vbq->lock);
|
|
put_cpu_var(vmap_block_queue);
|
|
put_cpu_var(vmap_block_queue);
|
|
|
|
|
|
- return vb;
|
|
|
|
|
|
+ return vaddr;
|
|
}
|
|
}
|
|
|
|
|
|
static void free_vmap_block(struct vmap_block *vb)
|
|
static void free_vmap_block(struct vmap_block *vb)
|
|
@@ -910,7 +931,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
|
|
{
|
|
{
|
|
struct vmap_block_queue *vbq;
|
|
struct vmap_block_queue *vbq;
|
|
struct vmap_block *vb;
|
|
struct vmap_block *vb;
|
|
- unsigned long addr = 0;
|
|
|
|
|
|
+ void *vaddr = NULL;
|
|
unsigned int order;
|
|
unsigned int order;
|
|
|
|
|
|
BUG_ON(size & ~PAGE_MASK);
|
|
BUG_ON(size & ~PAGE_MASK);
|
|
@@ -925,43 +946,38 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
|
|
}
|
|
}
|
|
order = get_order(size);
|
|
order = get_order(size);
|
|
|
|
|
|
-again:
|
|
|
|
rcu_read_lock();
|
|
rcu_read_lock();
|
|
vbq = &get_cpu_var(vmap_block_queue);
|
|
vbq = &get_cpu_var(vmap_block_queue);
|
|
list_for_each_entry_rcu(vb, &vbq->free, free_list) {
|
|
list_for_each_entry_rcu(vb, &vbq->free, free_list) {
|
|
- int i;
|
|
|
|
|
|
+ unsigned long pages_off;
|
|
|
|
|
|
spin_lock(&vb->lock);
|
|
spin_lock(&vb->lock);
|
|
- if (vb->free < 1UL << order)
|
|
|
|
- goto next;
|
|
|
|
|
|
+ if (vb->free < (1UL << order)) {
|
|
|
|
+ spin_unlock(&vb->lock);
|
|
|
|
+ continue;
|
|
|
|
+ }
|
|
|
|
|
|
- i = VMAP_BBMAP_BITS - vb->free;
|
|
|
|
- addr = vb->va->va_start + (i << PAGE_SHIFT);
|
|
|
|
- BUG_ON(addr_to_vb_idx(addr) !=
|
|
|
|
- addr_to_vb_idx(vb->va->va_start));
|
|
|
|
|
|
+ pages_off = VMAP_BBMAP_BITS - vb->free;
|
|
|
|
+ vaddr = vmap_block_vaddr(vb->va->va_start, pages_off);
|
|
vb->free -= 1UL << order;
|
|
vb->free -= 1UL << order;
|
|
if (vb->free == 0) {
|
|
if (vb->free == 0) {
|
|
spin_lock(&vbq->lock);
|
|
spin_lock(&vbq->lock);
|
|
list_del_rcu(&vb->free_list);
|
|
list_del_rcu(&vb->free_list);
|
|
spin_unlock(&vbq->lock);
|
|
spin_unlock(&vbq->lock);
|
|
}
|
|
}
|
|
|
|
+
|
|
spin_unlock(&vb->lock);
|
|
spin_unlock(&vb->lock);
|
|
break;
|
|
break;
|
|
-next:
|
|
|
|
- spin_unlock(&vb->lock);
|
|
|
|
}
|
|
}
|
|
|
|
|
|
put_cpu_var(vmap_block_queue);
|
|
put_cpu_var(vmap_block_queue);
|
|
rcu_read_unlock();
|
|
rcu_read_unlock();
|
|
|
|
|
|
- if (!addr) {
|
|
|
|
- vb = new_vmap_block(gfp_mask);
|
|
|
|
- if (IS_ERR(vb))
|
|
|
|
- return vb;
|
|
|
|
- goto again;
|
|
|
|
- }
|
|
|
|
|
|
+ /* Allocate new block if nothing was found */
|
|
|
|
+ if (!vaddr)
|
|
|
|
+ vaddr = new_vmap_block(order, gfp_mask);
|
|
|
|
|
|
- return (void *)addr;
|
|
|
|
|
|
+ return vaddr;
|
|
}
|
|
}
|
|
|
|
|
|
static void vb_free(const void *addr, unsigned long size)
|
|
static void vb_free(const void *addr, unsigned long size)
|