|
@@ -38,6 +38,9 @@
|
|
|
#include <linux/dma-mapping.h>
|
|
|
#include <linux/slab.h>
|
|
|
#include <linux/io-mapping.h>
|
|
|
+#if defined(CONFIG_X86)
|
|
|
+#include <asm/pat.h>
|
|
|
+#endif
|
|
|
#include <linux/sched.h>
|
|
|
#include <rdma/ib_user_verbs.h>
|
|
|
#include <rdma/ib_addr.h>
|
|
@@ -1068,38 +1071,89 @@ static int get_index(unsigned long offset)
|
|
|
return get_arg(offset);
|
|
|
}
|
|
|
|
|
|
+static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd)
|
|
|
+{
|
|
|
+ switch (cmd) {
|
|
|
+ case MLX5_IB_MMAP_WC_PAGE:
|
|
|
+ return "WC";
|
|
|
+ case MLX5_IB_MMAP_REGULAR_PAGE:
|
|
|
+ return "best effort WC";
|
|
|
+ case MLX5_IB_MMAP_NC_PAGE:
|
|
|
+ return "NC";
|
|
|
+ default:
|
|
|
+ return NULL;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
|
|
|
+ struct vm_area_struct *vma, struct mlx5_uuar_info *uuari)
|
|
|
+{
|
|
|
+ int err;
|
|
|
+ unsigned long idx;
|
|
|
+ phys_addr_t pfn, pa;
|
|
|
+ pgprot_t prot;
|
|
|
+
|
|
|
+ switch (cmd) {
|
|
|
+ case MLX5_IB_MMAP_WC_PAGE:
|
|
|
+/* Some architectures don't support WC memory */
|
|
|
+#if defined(CONFIG_X86)
|
|
|
+ if (!pat_enabled())
|
|
|
+ return -EPERM;
|
|
|
+#elif !(defined(CONFIG_PPC) || (defined(CONFIG_ARM) && defined(CONFIG_MMU)))
|
|
|
+ return -EPERM;
|
|
|
+#endif
|
|
|
+ /* fall through */
|
|
|
+ case MLX5_IB_MMAP_REGULAR_PAGE:
|
|
|
+ /* For MLX5_IB_MMAP_REGULAR_PAGE do the best effort to get WC */
|
|
|
+ prot = pgprot_writecombine(vma->vm_page_prot);
|
|
|
+ break;
|
|
|
+ case MLX5_IB_MMAP_NC_PAGE:
|
|
|
+ prot = pgprot_noncached(vma->vm_page_prot);
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (vma->vm_end - vma->vm_start != PAGE_SIZE)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ idx = get_index(vma->vm_pgoff);
|
|
|
+ if (idx >= uuari->num_uars)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ pfn = uar_index2pfn(dev, uuari->uars[idx].index);
|
|
|
+ mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn);
|
|
|
+
|
|
|
+ vma->vm_page_prot = prot;
|
|
|
+ err = io_remap_pfn_range(vma, vma->vm_start, pfn,
|
|
|
+ PAGE_SIZE, vma->vm_page_prot);
|
|
|
+ if (err) {
|
|
|
+ mlx5_ib_err(dev, "io_remap_pfn_range failed with error=%d, vm_start=0x%lx, pfn=%pa, mmap_cmd=%s\n",
|
|
|
+ err, vma->vm_start, &pfn, mmap_cmd2str(cmd));
|
|
|
+ return -EAGAIN;
|
|
|
+ }
|
|
|
+
|
|
|
+ pa = pfn << PAGE_SHIFT;
|
|
|
+ mlx5_ib_dbg(dev, "mapped %s at 0x%lx, PA %pa\n", mmap_cmd2str(cmd),
|
|
|
+ vma->vm_start, &pa);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
|
|
|
{
|
|
|
struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
|
|
|
struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
|
|
|
struct mlx5_uuar_info *uuari = &context->uuari;
|
|
|
unsigned long command;
|
|
|
- unsigned long idx;
|
|
|
phys_addr_t pfn;
|
|
|
|
|
|
command = get_command(vma->vm_pgoff);
|
|
|
switch (command) {
|
|
|
+ case MLX5_IB_MMAP_WC_PAGE:
|
|
|
+ case MLX5_IB_MMAP_NC_PAGE:
|
|
|
case MLX5_IB_MMAP_REGULAR_PAGE:
|
|
|
- if (vma->vm_end - vma->vm_start != PAGE_SIZE)
|
|
|
- return -EINVAL;
|
|
|
-
|
|
|
- idx = get_index(vma->vm_pgoff);
|
|
|
- if (idx >= uuari->num_uars)
|
|
|
- return -EINVAL;
|
|
|
-
|
|
|
- pfn = uar_index2pfn(dev, uuari->uars[idx].index);
|
|
|
- mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn 0x%llx\n", idx,
|
|
|
- (unsigned long long)pfn);
|
|
|
-
|
|
|
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
|
|
|
- if (io_remap_pfn_range(vma, vma->vm_start, pfn,
|
|
|
- PAGE_SIZE, vma->vm_page_prot))
|
|
|
- return -EAGAIN;
|
|
|
-
|
|
|
- mlx5_ib_dbg(dev, "mapped WC at 0x%lx, PA 0x%llx\n",
|
|
|
- vma->vm_start,
|
|
|
- (unsigned long long)pfn << PAGE_SHIFT);
|
|
|
- break;
|
|
|
+ return uar_mmap(dev, command, vma, uuari);
|
|
|
|
|
|
case MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES:
|
|
|
return -ENOSYS;
|