Browse Source

swiotlb: Add swiotlb_free() function

swiotlb_free() function frees all allocated memory for swiotlb.

We need to initialize swiotlb before IOMMU initialization (x86
and powerpc needs to allocate memory from bootmem allocator). If
IOMMU initialization is successful, we need to free swiotlb
resource (don't want to waste 64MB).

Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Cc: chrisw@sous-sol.org
Cc: dwmw2@infradead.org
Cc: joerg.roedel@amd.com
Cc: muli@il.ibm.com
LKML-Reference: <1257849980-22640-8-git-send-email-fujita.tomonori@lab.ntt.co.jp>
[ -v2: build fix for the !CONFIG_SWIOTLB case ]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
FUJITA Tomonori 15 years ago
parent
commit
5740afdb68
2 changed files with 36 additions and 0 deletions
  1. 6 0
      include/linux/swiotlb.h
  2. 30 0
      lib/swiotlb.c

+ 6 - 0
include/linux/swiotlb.h

@@ -88,4 +88,10 @@ swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr);
 extern int
 extern int
 swiotlb_dma_supported(struct device *hwdev, u64 mask);
 swiotlb_dma_supported(struct device *hwdev, u64 mask);
 
 
+#ifdef CONFIG_SWIOTLB
+extern void __init swiotlb_free(void);
+#else
+static inline void swiotlb_free(void) { }
+#endif
+
 #endif /* __LINUX_SWIOTLB_H */
 #endif /* __LINUX_SWIOTLB_H */

+ 30 - 0
lib/swiotlb.c

@@ -97,6 +97,8 @@ static phys_addr_t *io_tlb_orig_addr;
  */
  */
 static DEFINE_SPINLOCK(io_tlb_lock);
 static DEFINE_SPINLOCK(io_tlb_lock);
 
 
+static int late_alloc;
+
 static int __init
 static int __init
 setup_io_tlb_npages(char *str)
 setup_io_tlb_npages(char *str)
 {
 {
@@ -262,6 +264,8 @@ swiotlb_late_init_with_default_size(size_t default_size)
 
 
 	swiotlb_print_info(bytes);
 	swiotlb_print_info(bytes);
 
 
+	late_alloc = 1;
+
 	return 0;
 	return 0;
 
 
 cleanup4:
 cleanup4:
@@ -281,6 +285,32 @@ cleanup1:
 	return -ENOMEM;
 	return -ENOMEM;
 }
 }
 
 
+void __init swiotlb_free(void)
+{
+	if (!io_tlb_overflow_buffer)
+		return;
+
+	if (late_alloc) {
+		free_pages((unsigned long)io_tlb_overflow_buffer,
+			   get_order(io_tlb_overflow));
+		free_pages((unsigned long)io_tlb_orig_addr,
+			   get_order(io_tlb_nslabs * sizeof(phys_addr_t)));
+		free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
+								 sizeof(int)));
+		free_pages((unsigned long)io_tlb_start,
+			   get_order(io_tlb_nslabs << IO_TLB_SHIFT));
+	} else {
+		free_bootmem_late(__pa(io_tlb_overflow_buffer),
+				  io_tlb_overflow);
+		free_bootmem_late(__pa(io_tlb_orig_addr),
+				  io_tlb_nslabs * sizeof(phys_addr_t));
+		free_bootmem_late(__pa(io_tlb_list),
+				  io_tlb_nslabs * sizeof(int));
+		free_bootmem_late(__pa(io_tlb_start),
+				  io_tlb_nslabs << IO_TLB_SHIFT);
+	}
+}
+
 static int is_swiotlb_buffer(phys_addr_t paddr)
 static int is_swiotlb_buffer(phys_addr_t paddr)
 {
 {
 	return paddr >= virt_to_phys(io_tlb_start) &&
 	return paddr >= virt_to_phys(io_tlb_start) &&