diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 98ba927aee1a..168434cf920a 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -972,7 +972,7 @@ static int iommu_dma_map_sg_swiotlb(struct device *dev, struct scatterlist *sg, out_unmap: iommu_dma_unmap_sg_swiotlb(dev, sg, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC); - return 0; + return -EIO; } /* @@ -993,11 +993,13 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, dma_addr_t iova; size_t iova_len = 0; unsigned long mask = dma_get_seg_boundary(dev); + ssize_t ret; int i; - if (static_branch_unlikely(&iommu_deferred_attach_enabled) && - iommu_deferred_attach(dev, domain)) - return 0; + if (static_branch_unlikely(&iommu_deferred_attach_enabled)) { + ret = iommu_deferred_attach(dev, domain); + goto out; + } if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) iommu_dma_sync_sg_for_device(dev, sg, nents, dir); @@ -1045,14 +1047,17 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, } iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev); - if (!iova) + if (!iova) { + ret = -ENOMEM; goto out_restore_sg; + } /* * We'll leave any physical concatenation to the IOMMU driver's * implementation - it knows better than we do. */ - if (iommu_map_sg_atomic(domain, iova, sg, nents, prot) < iova_len) + ret = iommu_map_sg_atomic(domain, iova, sg, nents, prot); + if (ret < iova_len) goto out_free_iova; return __finalise_sg(dev, sg, nents, iova); @@ -1061,7 +1066,10 @@ out_free_iova: iommu_dma_free_iova(cookie, iova, iova_len, NULL); out_restore_sg: __invalidate_sg(sg, nents); - return 0; +out: + if (ret != -ENOMEM) + return -EINVAL; + return ret; } static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,