|
@@ -1841,8 +1841,6 @@ struct ib_port_immutable {
|
|
|
};
|
|
|
|
|
|
struct ib_device {
|
|
|
- struct device *dma_device;
|
|
|
-
|
|
|
char name[IB_DEVICE_NAME_MAX];
|
|
|
|
|
|
struct list_head event_handler_list;
|
|
@@ -2969,7 +2967,7 @@ static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
|
|
|
{
|
|
|
if (dev->dma_ops)
|
|
|
return dev->dma_ops->mapping_error(dev, dma_addr);
|
|
|
- return dma_mapping_error(dev->dma_device, dma_addr);
|
|
|
+ return dma_mapping_error(&dev->dev, dma_addr);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -2985,7 +2983,7 @@ static inline u64 ib_dma_map_single(struct ib_device *dev,
|
|
|
{
|
|
|
if (dev->dma_ops)
|
|
|
return dev->dma_ops->map_single(dev, cpu_addr, size, direction);
|
|
|
- return dma_map_single(dev->dma_device, cpu_addr, size, direction);
|
|
|
+ return dma_map_single(&dev->dev, cpu_addr, size, direction);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -3002,7 +3000,7 @@ static inline void ib_dma_unmap_single(struct ib_device *dev,
|
|
|
if (dev->dma_ops)
|
|
|
dev->dma_ops->unmap_single(dev, addr, size, direction);
|
|
|
else
|
|
|
- dma_unmap_single(dev->dma_device, addr, size, direction);
|
|
|
+ dma_unmap_single(&dev->dev, addr, size, direction);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -3021,7 +3019,7 @@ static inline u64 ib_dma_map_page(struct ib_device *dev,
|
|
|
{
|
|
|
if (dev->dma_ops)
|
|
|
return dev->dma_ops->map_page(dev, page, offset, size, direction);
|
|
|
- return dma_map_page(dev->dma_device, page, offset, size, direction);
|
|
|
+ return dma_map_page(&dev->dev, page, offset, size, direction);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -3038,7 +3036,7 @@ static inline void ib_dma_unmap_page(struct ib_device *dev,
|
|
|
if (dev->dma_ops)
|
|
|
dev->dma_ops->unmap_page(dev, addr, size, direction);
|
|
|
else
|
|
|
- dma_unmap_page(dev->dma_device, addr, size, direction);
|
|
|
+ dma_unmap_page(&dev->dev, addr, size, direction);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -3054,7 +3052,7 @@ static inline int ib_dma_map_sg(struct ib_device *dev,
|
|
|
{
|
|
|
if (dev->dma_ops)
|
|
|
return dev->dma_ops->map_sg(dev, sg, nents, direction);
|
|
|
- return dma_map_sg(dev->dma_device, sg, nents, direction);
|
|
|
+ return dma_map_sg(&dev->dev, sg, nents, direction);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -3071,7 +3069,7 @@ static inline void ib_dma_unmap_sg(struct ib_device *dev,
|
|
|
if (dev->dma_ops)
|
|
|
dev->dma_ops->unmap_sg(dev, sg, nents, direction);
|
|
|
else
|
|
|
- dma_unmap_sg(dev->dma_device, sg, nents, direction);
|
|
|
+ dma_unmap_sg(&dev->dev, sg, nents, direction);
|
|
|
}
|
|
|
|
|
|
static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
|
|
@@ -3082,9 +3080,7 @@ static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
|
|
|
if (dev->dma_ops)
|
|
|
return dev->dma_ops->map_sg_attrs(dev, sg, nents, direction,
|
|
|
dma_attrs);
|
|
|
- else
|
|
|
- return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
|
|
|
- dma_attrs);
|
|
|
+ return dma_map_sg_attrs(&dev->dev, sg, nents, direction, dma_attrs);
|
|
|
}
|
|
|
|
|
|
static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
|
|
@@ -3096,8 +3092,7 @@ static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
|
|
|
return dev->dma_ops->unmap_sg_attrs(dev, sg, nents, direction,
|
|
|
dma_attrs);
|
|
|
else
|
|
|
- dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction,
|
|
|
- dma_attrs);
|
|
|
+ dma_unmap_sg_attrs(&dev->dev, sg, nents, direction, dma_attrs);
|
|
|
}
|
|
|
/**
|
|
|
* ib_sg_dma_address - Return the DMA address from a scatter/gather entry
|
|
@@ -3142,7 +3137,7 @@ static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
|
|
|
if (dev->dma_ops)
|
|
|
dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir);
|
|
|
else
|
|
|
- dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
|
|
|
+ dma_sync_single_for_cpu(&dev->dev, addr, size, dir);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -3160,7 +3155,7 @@ static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
|
|
|
if (dev->dma_ops)
|
|
|
dev->dma_ops->sync_single_for_device(dev, addr, size, dir);
|
|
|
else
|
|
|
- dma_sync_single_for_device(dev->dma_device, addr, size, dir);
|
|
|
+ dma_sync_single_for_device(&dev->dev, addr, size, dir);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -3183,7 +3178,7 @@ static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
|
|
|
*dma_handle = handle;
|
|
|
return ret;
|
|
|
}
|
|
|
- return dma_alloc_coherent(dev->dma_device, size, dma_handle, flag);
|
|
|
+ return dma_alloc_coherent(&dev->dev, size, dma_handle, flag);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -3200,7 +3195,7 @@ static inline void ib_dma_free_coherent(struct ib_device *dev,
|
|
|
if (dev->dma_ops)
|
|
|
dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
|
|
|
else
|
|
|
- dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
|
|
|
+ dma_free_coherent(&dev->dev, size, cpu_addr, dma_handle);
|
|
|
}
|
|
|
|
|
|
/**
|