|
@@ -263,24 +263,45 @@ static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
|
|
|
#define __ttm_kunmap_atomic(__addr) vunmap(__addr)
|
|
|
#endif
|
|
|
|
|
|
-static void *ttm_kmap_atomic_prot(struct page *page,
|
|
|
- pgprot_t prot)
|
|
|
+
|
|
|
+/**
|
|
|
+ * ttm_kmap_atomic_prot - Efficient kernel map of a single page with
|
|
|
+ * specified page protection.
|
|
|
+ *
|
|
|
+ * @page: The page to map.
|
|
|
+ * @prot: The page protection.
|
|
|
+ *
|
|
|
+ * This function maps a TTM page using the kmap_atomic api if available,
|
|
|
+ * otherwise falls back to vmap. The user must make sure that the
|
|
|
+ * specified page does not have an aliased mapping with a different caching
|
|
|
+ * policy unless the architecture explicitly allows it. Also mapping and
|
|
|
+ * unmapping using this api must be correctly nested. Unmapping should
|
|
|
+ * occur in the reverse order of mapping.
|
|
|
+ */
|
|
|
+void *ttm_kmap_atomic_prot(struct page *page, pgprot_t prot)
|
|
|
{
|
|
|
if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL))
|
|
|
return kmap_atomic(page);
|
|
|
else
|
|
|
return __ttm_kmap_atomic_prot(page, prot);
|
|
|
}
|
|
|
+EXPORT_SYMBOL(ttm_kmap_atomic_prot);
|
|
|
|
|
|
-
|
|
|
-static void ttm_kunmap_atomic_prot(void *addr,
|
|
|
- pgprot_t prot)
|
|
|
+/**
|
|
|
+ * ttm_kunmap_atomic_prot - Unmap a page that was mapped using
|
|
|
+ * ttm_kmap_atomic_prot.
|
|
|
+ *
|
|
|
+ * @addr: The virtual address from the map.
|
|
|
+ * @prot: The page protection.
|
|
|
+ */
|
|
|
+void ttm_kunmap_atomic_prot(void *addr, pgprot_t prot)
|
|
|
{
|
|
|
if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL))
|
|
|
kunmap_atomic(addr);
|
|
|
else
|
|
|
__ttm_kunmap_atomic(addr);
|
|
|
}
|
|
|
+EXPORT_SYMBOL(ttm_kunmap_atomic_prot);
|
|
|
|
|
|
static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
|
|
|
unsigned long page,
|