|
@@ -1127,26 +1127,33 @@ EXPORT_SYMBOL_GPL(iommu_unmap);
|
|
|
size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
|
|
|
struct scatterlist *sg, unsigned int nents, int prot)
|
|
|
{
|
|
|
- int ret;
|
|
|
+ struct scatterlist *s;
|
|
|
size_t mapped = 0;
|
|
|
unsigned int i;
|
|
|
- struct scatterlist *s;
|
|
|
+ int ret;
|
|
|
|
|
|
for_each_sg(sg, s, nents, i) {
|
|
|
phys_addr_t phys = page_to_phys(sg_page(s));
|
|
|
- size_t page_len = s->offset + s->length;
|
|
|
|
|
|
- ret = iommu_map(domain, iova + mapped, phys, page_len, prot);
|
|
|
- if (ret) {
|
|
|
- /* undo mappings already done */
|
|
|
- iommu_unmap(domain, iova, mapped);
|
|
|
- mapped = 0;
|
|
|
- break;
|
|
|
- }
|
|
|
- mapped += page_len;
|
|
|
+ /* We are mapping on page boundarys, so offset must be 0 */
|
|
|
+ if (s->offset)
|
|
|
+ goto out_err;
|
|
|
+
|
|
|
+ ret = iommu_map(domain, iova + mapped, phys, s->length, prot);
|
|
|
+ if (ret)
|
|
|
+ goto out_err;
|
|
|
+
|
|
|
+ mapped += s->length;
|
|
|
}
|
|
|
|
|
|
return mapped;
|
|
|
+
|
|
|
+out_err:
|
|
|
+ /* undo mappings already done */
|
|
|
+ iommu_unmap(domain, iova, mapped);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(default_iommu_map_sg);
|
|
|
|