|
@@ -62,7 +62,7 @@ static void print_use(void)
|
|
|
int i;
|
|
|
int j = 0;
|
|
|
|
|
|
- printk("dvma entry usage:\n");
|
|
|
+ pr_info("dvma entry usage:\n");
|
|
|
|
|
|
for(i = 0; i < IOMMU_TOTAL_ENTRIES; i++) {
|
|
|
if(!iommu_use[i])
|
|
@@ -70,16 +70,15 @@ static void print_use(void)
|
|
|
|
|
|
j++;
|
|
|
|
|
|
- printk("dvma entry: %08lx len %08lx\n",
|
|
|
- ( i << DVMA_PAGE_SHIFT) + DVMA_START,
|
|
|
- iommu_use[i]);
|
|
|
+ pr_info("dvma entry: %08x len %08lx\n",
|
|
|
+ (i << DVMA_PAGE_SHIFT) + DVMA_START, iommu_use[i]);
|
|
|
}
|
|
|
|
|
|
- printk("%d entries in use total\n", j);
|
|
|
+ pr_info("%d entries in use total\n", j);
|
|
|
|
|
|
- printk("allocation/free calls: %lu/%lu\n", dvma_allocs, dvma_frees);
|
|
|
- printk("allocation/free bytes: %Lx/%Lx\n", dvma_alloc_bytes,
|
|
|
- dvma_free_bytes);
|
|
|
+ pr_info("allocation/free calls: %lu/%lu\n", dvma_allocs, dvma_frees);
|
|
|
+ pr_info("allocation/free bytes: %Lx/%Lx\n", dvma_alloc_bytes,
|
|
|
+ dvma_free_bytes);
|
|
|
}
|
|
|
|
|
|
static void print_holes(struct list_head *holes)
|
|
@@ -88,18 +87,18 @@ static void print_holes(struct list_head *holes)
|
|
|
struct list_head *cur;
|
|
|
struct hole *hole;
|
|
|
|
|
|
- printk("listing dvma holes\n");
|
|
|
+ pr_info("listing dvma holes\n");
|
|
|
list_for_each(cur, holes) {
|
|
|
hole = list_entry(cur, struct hole, list);
|
|
|
|
|
|
if((hole->start == 0) && (hole->end == 0) && (hole->size == 0))
|
|
|
continue;
|
|
|
|
|
|
- printk("hole: start %08lx end %08lx size %08lx\n", hole->start, hole->end, hole->size);
|
|
|
+ pr_info("hole: start %08lx end %08lx size %08lx\n",
|
|
|
+ hole->start, hole->end, hole->size);
|
|
|
}
|
|
|
|
|
|
- printk("end of hole listing...\n");
|
|
|
-
|
|
|
+ pr_info("end of hole listing...\n");
|
|
|
}
|
|
|
#endif /* DVMA_DEBUG */
|
|
|
|
|
@@ -137,7 +136,7 @@ static inline struct hole *rmcache(void)
|
|
|
|
|
|
if(list_empty(&hole_cache)) {
|
|
|
if(!refill()) {
|
|
|
- printk("out of dvma hole cache!\n");
|
|
|
+ pr_crit("out of dvma hole cache!\n");
|
|
|
BUG();
|
|
|
}
|
|
|
}
|
|
@@ -157,7 +156,7 @@ static inline unsigned long get_baddr(int len, unsigned long align)
|
|
|
|
|
|
if(list_empty(&hole_list)) {
|
|
|
#ifdef DVMA_DEBUG
|
|
|
- printk("out of dvma holes! (printing hole cache)\n");
|
|
|
+ pr_crit("out of dvma holes! (printing hole cache)\n");
|
|
|
print_holes(&hole_cache);
|
|
|
print_use();
|
|
|
#endif
|
|
@@ -195,7 +194,7 @@ static inline unsigned long get_baddr(int len, unsigned long align)
|
|
|
|
|
|
}
|
|
|
|
|
|
- printk("unable to find dvma hole!\n");
|
|
|
+ pr_crit("unable to find dvma hole!\n");
|
|
|
BUG();
|
|
|
return 0;
|
|
|
}
|
|
@@ -287,15 +286,12 @@ unsigned long dvma_map_align(unsigned long kaddr, int len, int align)
|
|
|
len = 0x800;
|
|
|
|
|
|
if(!kaddr || !len) {
|
|
|
-// printk("error: kaddr %lx len %x\n", kaddr, len);
|
|
|
+// pr_err("error: kaddr %lx len %x\n", kaddr, len);
|
|
|
// *(int *)4 = 0;
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-#ifdef DEBUG
|
|
|
- printk("dvma_map request %08lx bytes from %08lx\n",
|
|
|
- len, kaddr);
|
|
|
-#endif
|
|
|
+ pr_debug("dvma_map request %08x bytes from %08lx\n", len, kaddr);
|
|
|
off = kaddr & ~DVMA_PAGE_MASK;
|
|
|
kaddr &= PAGE_MASK;
|
|
|
len += off;
|
|
@@ -307,12 +303,13 @@ unsigned long dvma_map_align(unsigned long kaddr, int len, int align)
|
|
|
align = ((align + (DVMA_PAGE_SIZE-1)) & DVMA_PAGE_MASK);
|
|
|
|
|
|
baddr = get_baddr(len, align);
|
|
|
-// printk("using baddr %lx\n", baddr);
|
|
|
+// pr_info("using baddr %lx\n", baddr);
|
|
|
|
|
|
if(!dvma_map_iommu(kaddr, baddr, len))
|
|
|
return (baddr + off);
|
|
|
|
|
|
- printk("dvma_map failed kaddr %lx baddr %lx len %x\n", kaddr, baddr, len);
|
|
|
+ pr_crit("dvma_map failed kaddr %lx baddr %lx len %x\n", kaddr, baddr,
|
|
|
+ len);
|
|
|
BUG();
|
|
|
return 0;
|
|
|
}
|
|
@@ -343,9 +340,7 @@ void *dvma_malloc_align(unsigned long len, unsigned long align)
|
|
|
if(!len)
|
|
|
return NULL;
|
|
|
|
|
|
-#ifdef DEBUG
|
|
|
- printk("dvma_malloc request %lx bytes\n", len);
|
|
|
-#endif
|
|
|
+ pr_debug("dvma_malloc request %lx bytes\n", len);
|
|
|
len = ((len + (DVMA_PAGE_SIZE-1)) & DVMA_PAGE_MASK);
|
|
|
|
|
|
if((kaddr = __get_free_pages(GFP_ATOMIC, get_order(len))) == 0)
|
|
@@ -364,10 +359,8 @@ void *dvma_malloc_align(unsigned long len, unsigned long align)
|
|
|
return NULL;
|
|
|
}
|
|
|
|
|
|
-#ifdef DEBUG
|
|
|
- printk("mapped %08lx bytes %08lx kern -> %08lx bus\n",
|
|
|
- len, kaddr, baddr);
|
|
|
-#endif
|
|
|
+ pr_debug("mapped %08lx bytes %08lx kern -> %08lx bus\n", len, kaddr,
|
|
|
+ baddr);
|
|
|
|
|
|
return (void *)vaddr;
|
|
|
|