|
@@ -322,11 +322,11 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
|
|
|
ret = entry << tbl->it_page_shift; /* Set the return dma address */
|
|
|
|
|
|
/* Put the TCEs in the HW table */
|
|
|
- build_fail = ppc_md.tce_build(tbl, entry, npages,
|
|
|
+ build_fail = tbl->it_ops->set(tbl, entry, npages,
|
|
|
(unsigned long)page &
|
|
|
IOMMU_PAGE_MASK(tbl), direction, attrs);
|
|
|
|
|
|
- /* ppc_md.tce_build() only returns non-zero for transient errors.
|
|
|
+ /* tbl->it_ops->set() only returns non-zero for transient errors.
|
|
|
* Clean up the table bitmap in this case and return
|
|
|
* DMA_ERROR_CODE. For all other errors the functionality is
|
|
|
* not altered.
|
|
@@ -337,8 +337,8 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
|
|
|
}
|
|
|
|
|
|
/* Flush/invalidate TLB caches if necessary */
|
|
|
- if (ppc_md.tce_flush)
|
|
|
- ppc_md.tce_flush(tbl);
|
|
|
+ if (tbl->it_ops->flush)
|
|
|
+ tbl->it_ops->flush(tbl);
|
|
|
|
|
|
/* Make sure updates are seen by hardware */
|
|
|
mb();
|
|
@@ -408,7 +408,7 @@ static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
|
|
|
if (!iommu_free_check(tbl, dma_addr, npages))
|
|
|
return;
|
|
|
|
|
|
- ppc_md.tce_free(tbl, entry, npages);
|
|
|
+ tbl->it_ops->clear(tbl, entry, npages);
|
|
|
|
|
|
spin_lock_irqsave(&(pool->lock), flags);
|
|
|
bitmap_clear(tbl->it_map, free_entry, npages);
|
|
@@ -424,8 +424,8 @@ static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
|
|
|
* not do an mb() here on purpose, it is not needed on any of
|
|
|
* the current platforms.
|
|
|
*/
|
|
|
- if (ppc_md.tce_flush)
|
|
|
- ppc_md.tce_flush(tbl);
|
|
|
+ if (tbl->it_ops->flush)
|
|
|
+ tbl->it_ops->flush(tbl);
|
|
|
}
|
|
|
|
|
|
int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
|
|
@@ -495,7 +495,7 @@ int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
|
|
|
npages, entry, dma_addr);
|
|
|
|
|
|
/* Insert into HW table */
|
|
|
- build_fail = ppc_md.tce_build(tbl, entry, npages,
|
|
|
+ build_fail = tbl->it_ops->set(tbl, entry, npages,
|
|
|
vaddr & IOMMU_PAGE_MASK(tbl),
|
|
|
direction, attrs);
|
|
|
if(unlikely(build_fail))
|
|
@@ -534,8 +534,8 @@ int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
|
|
|
}
|
|
|
|
|
|
/* Flush/invalidate TLB caches if necessary */
|
|
|
- if (ppc_md.tce_flush)
|
|
|
- ppc_md.tce_flush(tbl);
|
|
|
+ if (tbl->it_ops->flush)
|
|
|
+ tbl->it_ops->flush(tbl);
|
|
|
|
|
|
DBG("mapped %d elements:\n", outcount);
|
|
|
|
|
@@ -600,8 +600,8 @@ void ppc_iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
|
|
|
* do not do an mb() here, the affected platforms do not need it
|
|
|
* when freeing.
|
|
|
*/
|
|
|
- if (ppc_md.tce_flush)
|
|
|
- ppc_md.tce_flush(tbl);
|
|
|
+ if (tbl->it_ops->flush)
|
|
|
+ tbl->it_ops->flush(tbl);
|
|
|
}
|
|
|
|
|
|
static void iommu_table_clear(struct iommu_table *tbl)
|
|
@@ -613,17 +613,17 @@ static void iommu_table_clear(struct iommu_table *tbl)
|
|
|
*/
|
|
|
if (!is_kdump_kernel() || is_fadump_active()) {
|
|
|
/* Clear the table in case firmware left allocations in it */
|
|
|
- ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size);
|
|
|
+ tbl->it_ops->clear(tbl, tbl->it_offset, tbl->it_size);
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_CRASH_DUMP
|
|
|
- if (ppc_md.tce_get) {
|
|
|
+ if (tbl->it_ops->get) {
|
|
|
unsigned long index, tceval, tcecount = 0;
|
|
|
|
|
|
/* Reserve the existing mappings left by the first kernel. */
|
|
|
for (index = 0; index < tbl->it_size; index++) {
|
|
|
- tceval = ppc_md.tce_get(tbl, index + tbl->it_offset);
|
|
|
+ tceval = tbl->it_ops->get(tbl, index + tbl->it_offset);
|
|
|
/*
|
|
|
* Freed TCE entry contains 0x7fffffffffffffff on JS20
|
|
|
*/
|
|
@@ -657,6 +657,8 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
|
|
|
unsigned int i;
|
|
|
struct iommu_pool *p;
|
|
|
|
|
|
+ BUG_ON(!tbl->it_ops);
|
|
|
+
|
|
|
/* number of bytes needed for the bitmap */
|
|
|
sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
|
|
|
|
|
@@ -929,8 +931,8 @@ EXPORT_SYMBOL_GPL(iommu_tce_direction);
|
|
|
void iommu_flush_tce(struct iommu_table *tbl)
|
|
|
{
|
|
|
/* Flush/invalidate TLB caches if necessary */
|
|
|
- if (ppc_md.tce_flush)
|
|
|
- ppc_md.tce_flush(tbl);
|
|
|
+ if (tbl->it_ops->flush)
|
|
|
+ tbl->it_ops->flush(tbl);
|
|
|
|
|
|
/* Make sure updates are seen by hardware */
|
|
|
mb();
|
|
@@ -941,7 +943,7 @@ int iommu_tce_clear_param_check(struct iommu_table *tbl,
|
|
|
unsigned long ioba, unsigned long tce_value,
|
|
|
unsigned long npages)
|
|
|
{
|
|
|
- /* ppc_md.tce_free() does not support any value but 0 */
|
|
|
+ /* tbl->it_ops->clear() does not support any value but 0 */
|
|
|
if (tce_value)
|
|
|
return -EINVAL;
|
|
|
|
|
@@ -989,9 +991,9 @@ unsigned long iommu_clear_tce(struct iommu_table *tbl, unsigned long entry)
|
|
|
|
|
|
spin_lock(&(pool->lock));
|
|
|
|
|
|
- oldtce = ppc_md.tce_get(tbl, entry);
|
|
|
+ oldtce = tbl->it_ops->get(tbl, entry);
|
|
|
if (oldtce & (TCE_PCI_WRITE | TCE_PCI_READ))
|
|
|
- ppc_md.tce_free(tbl, entry, 1);
|
|
|
+ tbl->it_ops->clear(tbl, entry, 1);
|
|
|
else
|
|
|
oldtce = 0;
|
|
|
|
|
@@ -1014,10 +1016,10 @@ int iommu_tce_build(struct iommu_table *tbl, unsigned long entry,
|
|
|
|
|
|
spin_lock(&(pool->lock));
|
|
|
|
|
|
- oldtce = ppc_md.tce_get(tbl, entry);
|
|
|
+ oldtce = tbl->it_ops->get(tbl, entry);
|
|
|
/* Add new entry if it is not busy */
|
|
|
if (!(oldtce & (TCE_PCI_WRITE | TCE_PCI_READ)))
|
|
|
- ret = ppc_md.tce_build(tbl, entry, 1, hwaddr, direction, NULL);
|
|
|
+ ret = tbl->it_ops->set(tbl, entry, 1, hwaddr, direction, NULL);
|
|
|
|
|
|
spin_unlock(&(pool->lock));
|
|
|
|