|
@@ -91,7 +91,6 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
|
|
|
int i;
|
|
|
unsigned long dma_attrs = 0;
|
|
|
struct scatterlist *sg, *sg_list_start;
|
|
|
- int need_release = 0;
|
|
|
unsigned int gup_flags = FOLL_WRITE;
|
|
|
|
|
|
if (dmasync)
|
|
@@ -120,10 +119,8 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
|
|
|
|
|
|
if (access & IB_ACCESS_ON_DEMAND) {
|
|
|
ret = ib_umem_odp_get(context, umem, access);
|
|
|
- if (ret) {
|
|
|
- kfree(umem);
|
|
|
- return ERR_PTR(ret);
|
|
|
- }
|
|
|
+ if (ret)
|
|
|
+ goto umem_kfree;
|
|
|
return umem;
|
|
|
}
|
|
|
|
|
@@ -134,8 +131,8 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
|
|
|
|
|
|
page_list = (struct page **) __get_free_page(GFP_KERNEL);
|
|
|
if (!page_list) {
|
|
|
- kfree(umem);
|
|
|
- return ERR_PTR(-ENOMEM);
|
|
|
+ ret = -ENOMEM;
|
|
|
+ goto umem_kfree;
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -155,7 +152,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
|
|
|
if ((current->mm->pinned_vm > lock_limit) && !capable(CAP_IPC_LOCK)) {
|
|
|
up_write(¤t->mm->mmap_sem);
|
|
|
ret = -ENOMEM;
|
|
|
- goto out;
|
|
|
+ goto vma;
|
|
|
}
|
|
|
up_write(¤t->mm->mmap_sem);
|
|
|
|
|
@@ -163,17 +160,16 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
|
|
|
|
|
|
if (npages == 0 || npages > UINT_MAX) {
|
|
|
ret = -EINVAL;
|
|
|
- goto out;
|
|
|
+ goto vma;
|
|
|
}
|
|
|
|
|
|
ret = sg_alloc_table(&umem->sg_head, npages, GFP_KERNEL);
|
|
|
if (ret)
|
|
|
- goto out;
|
|
|
+ goto vma;
|
|
|
|
|
|
if (!umem->writable)
|
|
|
gup_flags |= FOLL_FORCE;
|
|
|
|
|
|
- need_release = 1;
|
|
|
sg_list_start = umem->sg_head.sgl;
|
|
|
|
|
|
down_read(¤t->mm->mmap_sem);
|
|
@@ -184,7 +180,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
|
|
|
gup_flags, page_list, vma_list);
|
|
|
if (ret < 0) {
|
|
|
up_read(¤t->mm->mmap_sem);
|
|
|
- goto out;
|
|
|
+ goto umem_release;
|
|
|
}
|
|
|
|
|
|
umem->npages += ret;
|
|
@@ -211,26 +207,26 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
|
|
|
|
|
|
if (!umem->nmap) {
|
|
|
ret = -ENOMEM;
|
|
|
- goto out;
|
|
|
+ goto umem_release;
|
|
|
}
|
|
|
|
|
|
ret = 0;
|
|
|
+ goto out;
|
|
|
|
|
|
+umem_release:
|
|
|
+ __ib_umem_release(context->device, umem, 0);
|
|
|
+vma:
|
|
|
+ down_write(¤t->mm->mmap_sem);
|
|
|
+ current->mm->pinned_vm -= ib_umem_num_pages(umem);
|
|
|
+ up_write(¤t->mm->mmap_sem);
|
|
|
out:
|
|
|
- if (ret < 0) {
|
|
|
- down_write(¤t->mm->mmap_sem);
|
|
|
- current->mm->pinned_vm -= ib_umem_num_pages(umem);
|
|
|
- up_write(¤t->mm->mmap_sem);
|
|
|
- if (need_release)
|
|
|
- __ib_umem_release(context->device, umem, 0);
|
|
|
- kfree(umem);
|
|
|
- }
|
|
|
-
|
|
|
if (vma_list)
|
|
|
free_page((unsigned long) vma_list);
|
|
|
free_page((unsigned long) page_list);
|
|
|
-
|
|
|
- return ret < 0 ? ERR_PTR(ret) : umem;
|
|
|
+umem_kfree:
|
|
|
+ if (ret)
|
|
|
+ kfree(umem);
|
|
|
+ return ret ? ERR_PTR(ret) : umem;
|
|
|
}
|
|
|
EXPORT_SYMBOL(ib_umem_get);
|
|
|
|