|
@@ -1138,144 +1138,50 @@ static int mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static void mlx4_ib_vma_open(struct vm_area_struct *area)
|
|
|
-{
|
|
|
- /* vma_open is called when a new VMA is created on top of our VMA.
|
|
|
- * This is done through either mremap flow or split_vma (usually due
|
|
|
- * to mlock, madvise, munmap, etc.). We do not support a clone of the
|
|
|
- * vma, as this VMA is strongly hardware related. Therefore we set the
|
|
|
- * vm_ops of the newly created/cloned VMA to NULL, to prevent it from
|
|
|
- * calling us again and trying to do incorrect actions. We assume that
|
|
|
- * the original vma size is exactly a single page that there will be no
|
|
|
- * "splitting" operations on.
|
|
|
- */
|
|
|
- area->vm_ops = NULL;
|
|
|
-}
|
|
|
-
|
|
|
-static void mlx4_ib_vma_close(struct vm_area_struct *area)
|
|
|
-{
|
|
|
- struct mlx4_ib_vma_private_data *mlx4_ib_vma_priv_data;
|
|
|
-
|
|
|
- /* It's guaranteed that all VMAs opened on a FD are closed before the
|
|
|
- * file itself is closed, therefore no sync is needed with the regular
|
|
|
- * closing flow. (e.g. mlx4_ib_dealloc_ucontext) However need a sync
|
|
|
- * with accessing the vma as part of mlx4_ib_disassociate_ucontext.
|
|
|
- * The close operation is usually called under mm->mmap_sem except when
|
|
|
- * process is exiting. The exiting case is handled explicitly as part
|
|
|
- * of mlx4_ib_disassociate_ucontext.
|
|
|
- */
|
|
|
- mlx4_ib_vma_priv_data = (struct mlx4_ib_vma_private_data *)
|
|
|
- area->vm_private_data;
|
|
|
-
|
|
|
- /* set the vma context pointer to null in the mlx4_ib driver's private
|
|
|
- * data to protect against a race condition in mlx4_ib_dissassociate_ucontext().
|
|
|
- */
|
|
|
- mlx4_ib_vma_priv_data->vma = NULL;
|
|
|
-}
|
|
|
-
|
|
|
-static const struct vm_operations_struct mlx4_ib_vm_ops = {
|
|
|
- .open = mlx4_ib_vma_open,
|
|
|
- .close = mlx4_ib_vma_close
|
|
|
-};
|
|
|
-
|
|
|
static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
|
|
|
{
|
|
|
- int i;
|
|
|
- struct vm_area_struct *vma;
|
|
|
- struct mlx4_ib_ucontext *context = to_mucontext(ibcontext);
|
|
|
-
|
|
|
- /* need to protect from a race on closing the vma as part of
|
|
|
- * mlx4_ib_vma_close().
|
|
|
- */
|
|
|
- for (i = 0; i < HW_BAR_COUNT; i++) {
|
|
|
- vma = context->hw_bar_info[i].vma;
|
|
|
- if (!vma)
|
|
|
- continue;
|
|
|
-
|
|
|
- zap_vma_ptes(context->hw_bar_info[i].vma,
|
|
|
- context->hw_bar_info[i].vma->vm_start, PAGE_SIZE);
|
|
|
-
|
|
|
- context->hw_bar_info[i].vma->vm_flags &=
|
|
|
- ~(VM_SHARED | VM_MAYSHARE);
|
|
|
- /* context going to be destroyed, should not access ops any more */
|
|
|
- context->hw_bar_info[i].vma->vm_ops = NULL;
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-static void mlx4_ib_set_vma_data(struct vm_area_struct *vma,
|
|
|
- struct mlx4_ib_vma_private_data *vma_private_data)
|
|
|
-{
|
|
|
- vma_private_data->vma = vma;
|
|
|
- vma->vm_private_data = vma_private_data;
|
|
|
- vma->vm_ops = &mlx4_ib_vm_ops;
|
|
|
}
|
|
|
|
|
|
static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
|
|
|
{
|
|
|
struct mlx4_ib_dev *dev = to_mdev(context->device);
|
|
|
- struct mlx4_ib_ucontext *mucontext = to_mucontext(context);
|
|
|
-
|
|
|
- if (vma->vm_end - vma->vm_start != PAGE_SIZE)
|
|
|
- return -EINVAL;
|
|
|
-
|
|
|
- if (vma->vm_pgoff == 0) {
|
|
|
- /* We prevent double mmaping on same context */
|
|
|
- if (mucontext->hw_bar_info[HW_BAR_DB].vma)
|
|
|
- return -EINVAL;
|
|
|
-
|
|
|
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
|
|
-
|
|
|
- if (io_remap_pfn_range(vma, vma->vm_start,
|
|
|
- to_mucontext(context)->uar.pfn,
|
|
|
- PAGE_SIZE, vma->vm_page_prot))
|
|
|
- return -EAGAIN;
|
|
|
|
|
|
- mlx4_ib_set_vma_data(vma, &mucontext->hw_bar_info[HW_BAR_DB]);
|
|
|
+ switch (vma->vm_pgoff) {
|
|
|
+ case 0:
|
|
|
+ return rdma_user_mmap_io(context, vma,
|
|
|
+ to_mucontext(context)->uar.pfn,
|
|
|
+ PAGE_SIZE,
|
|
|
+ pgprot_noncached(vma->vm_page_prot));
|
|
|
|
|
|
- } else if (vma->vm_pgoff == 1 && dev->dev->caps.bf_reg_size != 0) {
|
|
|
- /* We prevent double mmaping on same context */
|
|
|
- if (mucontext->hw_bar_info[HW_BAR_BF].vma)
|
|
|
+ case 1:
|
|
|
+ if (dev->dev->caps.bf_reg_size == 0)
|
|
|
return -EINVAL;
|
|
|
+ return rdma_user_mmap_io(
|
|
|
+ context, vma,
|
|
|
+ to_mucontext(context)->uar.pfn +
|
|
|
+ dev->dev->caps.num_uars,
|
|
|
+ PAGE_SIZE, pgprot_writecombine(vma->vm_page_prot));
|
|
|
|
|
|
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
|
|
|
-
|
|
|
- if (io_remap_pfn_range(vma, vma->vm_start,
|
|
|
- to_mucontext(context)->uar.pfn +
|
|
|
- dev->dev->caps.num_uars,
|
|
|
- PAGE_SIZE, vma->vm_page_prot))
|
|
|
- return -EAGAIN;
|
|
|
-
|
|
|
- mlx4_ib_set_vma_data(vma, &mucontext->hw_bar_info[HW_BAR_BF]);
|
|
|
-
|
|
|
- } else if (vma->vm_pgoff == 3) {
|
|
|
+ case 3: {
|
|
|
struct mlx4_clock_params params;
|
|
|
int ret;
|
|
|
|
|
|
- /* We prevent double mmaping on same context */
|
|
|
- if (mucontext->hw_bar_info[HW_BAR_CLOCK].vma)
|
|
|
- return -EINVAL;
|
|
|
-
|
|
|
ret = mlx4_get_internal_clock_params(dev->dev, ¶ms);
|
|
|
-
|
|
|
if (ret)
|
|
|
return ret;
|
|
|
|
|
|
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
|
|
- if (io_remap_pfn_range(vma, vma->vm_start,
|
|
|
- (pci_resource_start(dev->dev->persist->pdev,
|
|
|
- params.bar) +
|
|
|
- params.offset)
|
|
|
- >> PAGE_SHIFT,
|
|
|
- PAGE_SIZE, vma->vm_page_prot))
|
|
|
- return -EAGAIN;
|
|
|
-
|
|
|
- mlx4_ib_set_vma_data(vma,
|
|
|
- &mucontext->hw_bar_info[HW_BAR_CLOCK]);
|
|
|
- } else {
|
|
|
- return -EINVAL;
|
|
|
+ return rdma_user_mmap_io(
|
|
|
+ context, vma,
|
|
|
+ (pci_resource_start(dev->dev->persist->pdev,
|
|
|
+ params.bar) +
|
|
|
+ params.offset) >>
|
|
|
+ PAGE_SHIFT,
|
|
|
+ PAGE_SIZE, pgprot_noncached(vma->vm_page_prot));
|
|
|
}
|
|
|
|
|
|
- return 0;
|
|
|
+ default:
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
static struct ib_pd *mlx4_ib_alloc_pd(struct ib_device *ibdev,
|