dma-mapping updates for Linux 5.15
- fix debugfs initialization order (Anthony Iliopoulos) - use memory_intersects() directly (Kefeng Wang) - allow to return specific errors from ->map_sg (Logan Gunthorpe, Martin Oliveira) - turn the dma_map_sg return value into an unsigned int (me) - provide a common global coherent pool іmplementation (me) -----BEGIN PGP SIGNATURE----- iQI/BAABCgApFiEEgdbnc3r/njty3Iq9D55TZVIEUYMFAmEvY+8LHGhjaEBsc3Qu ZGUACgkQD55TZVIEUYPaehAAsgnBzzzoLHO83pgs0KL92c+0DiMNHYmaMCJOvZXk x2Irv+O74WikRJc4S7uQ26p2spjmUxjmiOjld+8+NN0liD4QO9BQ/SZpIp8emuKS /yPG6Xh86xSl/OrPL1y7kGeHkRi5sm3mRhcTdILFQFPLcSReupe++GRfnvrpbOPk tj3pBGXluD6iJH12BBt00ushUVzZ0F2xaF6xUDAs94RSZ3tlqsfx6c928Y1KxSZh f89q/KuaokyogFG7Ujj/nYgIUETaIs2W6UmxBfRzdEMJFSffwomUMbw+M+qGJ7/d 2UjamFYRX16FReE8WNsndbX1E6k5JBW12E1qwV3dUwatlNLWEaRq3PNiWkF7zcFH LDkpDYN6s5bIDPTfDp21XfPygoH8KQhnD9lVf0aB7n04uu8VJrGB9+10PpkCJVXD 0b2dcuSwCO7hAfTfNGVV8f3EI/1XPflr1hJvMgcVtY53CR96ldp+4QaElzWLXumN MyptirmrVITNVyVwGzhGAblXBLWdarXD0EXudyiaF4Xbrj3AkIOSUCghEwKLpjQf UwMFFwSE8yGxKTRK4HfU5gMzy6G751fU7TUe5lmxZLovDflQoSXMWgHE8e7r0Qel o5v6lmUzoWz2fAISf3xjauo2ncgmfWMwYM6C7OJy5nG73QXLQId9J+ReXbmrgrrN DgI= =spje -----END PGP SIGNATURE----- Merge tag 'dma-mapping-5.15' of git://git.infradead.org/users/hch/dma-mapping Pull dma-mapping updates from Christoph Hellwig: - fix debugfs initialization order (Anthony Iliopoulos) - use memory_intersects() directly (Kefeng Wang) - allow to return specific errors from ->map_sg (Logan Gunthorpe, Martin Oliveira) - turn the dma_map_sg return value into an unsigned int (me) - provide a common global coherent pool іmplementation (me) * tag 'dma-mapping-5.15' of git://git.infradead.org/users/hch/dma-mapping: (31 commits) hexagon: use the generic global coherent pool dma-mapping: make the global coherent pool conditional dma-mapping: add a dma_init_global_coherent helper dma-mapping: simplify dma_init_coherent_memory dma-mapping: allow using the global coherent pool for !ARM ARM/nommu: use the generic dma-direct code for non-coherent devices dma-direct: add support for dma_coherent_default_memory dma-mapping: return an unsigned int from dma_map_sg{,_attrs} dma-mapping: disallow .map_sg operations from returning zero on error dma-mapping: return error code from dma_dummy_map_sg() x86/amd_gart: don't set failed sg dma_address to DMA_MAPPING_ERROR x86/amd_gart: return error code from gart_map_sg() xen: swiotlb: return error code from xen_swiotlb_map_sg() parisc: return error code from .map_sg() ops sparc/iommu: don't set failed sg dma_address to DMA_MAPPING_ERROR sparc/iommu: return error codes from .map_sg() ops s390/pci: don't set failed sg dma_address to DMA_MAPPING_ERROR s390/pci: return error code from s390_dma_map_sg() powerpc/iommu: don't set failed sg dma_address to DMA_MAPPING_ERROR powerpc/iommu: return error code from .map_sg() ops ...
This commit is contained in:
commit
4a3bb4200a
@ -649,7 +649,9 @@ static int alpha_pci_map_sg(struct device *dev, struct scatterlist *sg,
|
|||||||
sg->dma_address
|
sg->dma_address
|
||||||
= pci_map_single_1(pdev, SG_ENT_VIRT_ADDRESS(sg),
|
= pci_map_single_1(pdev, SG_ENT_VIRT_ADDRESS(sg),
|
||||||
sg->length, dac_allowed);
|
sg->length, dac_allowed);
|
||||||
return sg->dma_address != DMA_MAPPING_ERROR;
|
if (sg->dma_address == DMA_MAPPING_ERROR)
|
||||||
|
return -EIO;
|
||||||
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
start = sg;
|
start = sg;
|
||||||
@ -685,8 +687,10 @@ static int alpha_pci_map_sg(struct device *dev, struct scatterlist *sg,
|
|||||||
if (out < end)
|
if (out < end)
|
||||||
out->dma_length = 0;
|
out->dma_length = 0;
|
||||||
|
|
||||||
if (out - start == 0)
|
if (out - start == 0) {
|
||||||
printk(KERN_WARNING "pci_map_sg failed: no entries?\n");
|
printk(KERN_WARNING "pci_map_sg failed: no entries?\n");
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
DBGA("pci_map_sg: %ld entries\n", out - start);
|
DBGA("pci_map_sg: %ld entries\n", out - start);
|
||||||
|
|
||||||
return out - start;
|
return out - start;
|
||||||
@ -699,7 +703,7 @@ static int alpha_pci_map_sg(struct device *dev, struct scatterlist *sg,
|
|||||||
entries. Unmap them now. */
|
entries. Unmap them now. */
|
||||||
if (out > start)
|
if (out > start)
|
||||||
pci_unmap_sg(pdev, start, out - start, dir);
|
pci_unmap_sg(pdev, start, out - start, dir);
|
||||||
return 0;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Unmap a set of streaming mode DMA translations. Again, cpu read
|
/* Unmap a set of streaming mode DMA translations. Again, cpu read
|
||||||
|
@ -18,8 +18,8 @@ config ARM
|
|||||||
select ARCH_HAS_SET_MEMORY
|
select ARCH_HAS_SET_MEMORY
|
||||||
select ARCH_HAS_STRICT_KERNEL_RWX if MMU && !XIP_KERNEL
|
select ARCH_HAS_STRICT_KERNEL_RWX if MMU && !XIP_KERNEL
|
||||||
select ARCH_HAS_STRICT_MODULE_RWX if MMU
|
select ARCH_HAS_STRICT_MODULE_RWX if MMU
|
||||||
select ARCH_HAS_SYNC_DMA_FOR_DEVICE if SWIOTLB
|
select ARCH_HAS_SYNC_DMA_FOR_DEVICE if SWIOTLB || !MMU
|
||||||
select ARCH_HAS_SYNC_DMA_FOR_CPU if SWIOTLB
|
select ARCH_HAS_SYNC_DMA_FOR_CPU if SWIOTLB || !MMU
|
||||||
select ARCH_HAS_TEARDOWN_DMA_OPS if MMU
|
select ARCH_HAS_TEARDOWN_DMA_OPS if MMU
|
||||||
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
|
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
|
||||||
select ARCH_HAVE_CUSTOM_GPIO_H
|
select ARCH_HAVE_CUSTOM_GPIO_H
|
||||||
@ -44,6 +44,7 @@ config ARM
|
|||||||
select CPU_PM if SUSPEND || CPU_IDLE
|
select CPU_PM if SUSPEND || CPU_IDLE
|
||||||
select DCACHE_WORD_ACCESS if HAVE_EFFICIENT_UNALIGNED_ACCESS
|
select DCACHE_WORD_ACCESS if HAVE_EFFICIENT_UNALIGNED_ACCESS
|
||||||
select DMA_DECLARE_COHERENT
|
select DMA_DECLARE_COHERENT
|
||||||
|
select DMA_GLOBAL_POOL if !MMU
|
||||||
select DMA_OPS
|
select DMA_OPS
|
||||||
select DMA_REMAP if MMU
|
select DMA_REMAP if MMU
|
||||||
select EDAC_SUPPORT
|
select EDAC_SUPPORT
|
||||||
|
@ -5,12 +5,7 @@
|
|||||||
* Copyright (C) 2000-2004 Russell King
|
* Copyright (C) 2000-2004 Russell King
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/export.h>
|
|
||||||
#include <linux/mm.h>
|
|
||||||
#include <linux/dma-direct.h>
|
|
||||||
#include <linux/dma-map-ops.h>
|
#include <linux/dma-map-ops.h>
|
||||||
#include <linux/scatterlist.h>
|
|
||||||
|
|
||||||
#include <asm/cachetype.h>
|
#include <asm/cachetype.h>
|
||||||
#include <asm/cacheflush.h>
|
#include <asm/cacheflush.h>
|
||||||
#include <asm/outercache.h>
|
#include <asm/outercache.h>
|
||||||
@ -18,64 +13,7 @@
|
|||||||
|
|
||||||
#include "dma.h"
|
#include "dma.h"
|
||||||
|
|
||||||
/*
|
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
|
||||||
* The generic direct mapping code is used if
|
|
||||||
* - MMU/MPU is off
|
|
||||||
* - cpu is v7m w/o cache support
|
|
||||||
* - device is coherent
|
|
||||||
* otherwise arm_nommu_dma_ops is used.
|
|
||||||
*
|
|
||||||
* arm_nommu_dma_ops rely on consistent DMA memory (please, refer to
|
|
||||||
* [1] on how to declare such memory).
|
|
||||||
*
|
|
||||||
* [1] Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt
|
|
||||||
*/
|
|
||||||
|
|
||||||
static void *arm_nommu_dma_alloc(struct device *dev, size_t size,
|
|
||||||
dma_addr_t *dma_handle, gfp_t gfp,
|
|
||||||
unsigned long attrs)
|
|
||||||
|
|
||||||
{
|
|
||||||
void *ret = dma_alloc_from_global_coherent(dev, size, dma_handle);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* dma_alloc_from_global_coherent() may fail because:
|
|
||||||
*
|
|
||||||
* - no consistent DMA region has been defined, so we can't
|
|
||||||
* continue.
|
|
||||||
* - there is no space left in consistent DMA region, so we
|
|
||||||
* only can fallback to generic allocator if we are
|
|
||||||
* advertised that consistency is not required.
|
|
||||||
*/
|
|
||||||
|
|
||||||
WARN_ON_ONCE(ret == NULL);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void arm_nommu_dma_free(struct device *dev, size_t size,
|
|
||||||
void *cpu_addr, dma_addr_t dma_addr,
|
|
||||||
unsigned long attrs)
|
|
||||||
{
|
|
||||||
int ret = dma_release_from_global_coherent(get_order(size), cpu_addr);
|
|
||||||
|
|
||||||
WARN_ON_ONCE(ret == 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int arm_nommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
|
|
||||||
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
|
||||||
unsigned long attrs)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
if (dma_mmap_from_global_coherent(vma, cpu_addr, size, &ret))
|
|
||||||
return ret;
|
|
||||||
if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
|
|
||||||
return ret;
|
|
||||||
return -ENXIO;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
static void __dma_page_cpu_to_dev(phys_addr_t paddr, size_t size,
|
|
||||||
enum dma_data_direction dir)
|
enum dma_data_direction dir)
|
||||||
{
|
{
|
||||||
dmac_map_area(__va(paddr), size, dir);
|
dmac_map_area(__va(paddr), size, dir);
|
||||||
@ -86,7 +24,7 @@ static void __dma_page_cpu_to_dev(phys_addr_t paddr, size_t size,
|
|||||||
outer_clean_range(paddr, paddr + size);
|
outer_clean_range(paddr, paddr + size);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __dma_page_dev_to_cpu(phys_addr_t paddr, size_t size,
|
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
|
||||||
enum dma_data_direction dir)
|
enum dma_data_direction dir)
|
||||||
{
|
{
|
||||||
if (dir != DMA_TO_DEVICE) {
|
if (dir != DMA_TO_DEVICE) {
|
||||||
@ -95,102 +33,6 @@ static void __dma_page_dev_to_cpu(phys_addr_t paddr, size_t size,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static dma_addr_t arm_nommu_dma_map_page(struct device *dev, struct page *page,
|
|
||||||
unsigned long offset, size_t size,
|
|
||||||
enum dma_data_direction dir,
|
|
||||||
unsigned long attrs)
|
|
||||||
{
|
|
||||||
dma_addr_t handle = page_to_phys(page) + offset;
|
|
||||||
|
|
||||||
__dma_page_cpu_to_dev(handle, size, dir);
|
|
||||||
|
|
||||||
return handle;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void arm_nommu_dma_unmap_page(struct device *dev, dma_addr_t handle,
|
|
||||||
size_t size, enum dma_data_direction dir,
|
|
||||||
unsigned long attrs)
|
|
||||||
{
|
|
||||||
__dma_page_dev_to_cpu(handle, size, dir);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
static int arm_nommu_dma_map_sg(struct device *dev, struct scatterlist *sgl,
|
|
||||||
int nents, enum dma_data_direction dir,
|
|
||||||
unsigned long attrs)
|
|
||||||
{
|
|
||||||
int i;
|
|
||||||
struct scatterlist *sg;
|
|
||||||
|
|
||||||
for_each_sg(sgl, sg, nents, i) {
|
|
||||||
sg_dma_address(sg) = sg_phys(sg);
|
|
||||||
sg_dma_len(sg) = sg->length;
|
|
||||||
__dma_page_cpu_to_dev(sg_dma_address(sg), sg_dma_len(sg), dir);
|
|
||||||
}
|
|
||||||
|
|
||||||
return nents;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void arm_nommu_dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
|
|
||||||
int nents, enum dma_data_direction dir,
|
|
||||||
unsigned long attrs)
|
|
||||||
{
|
|
||||||
struct scatterlist *sg;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
for_each_sg(sgl, sg, nents, i)
|
|
||||||
__dma_page_dev_to_cpu(sg_dma_address(sg), sg_dma_len(sg), dir);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void arm_nommu_dma_sync_single_for_device(struct device *dev,
|
|
||||||
dma_addr_t handle, size_t size, enum dma_data_direction dir)
|
|
||||||
{
|
|
||||||
__dma_page_cpu_to_dev(handle, size, dir);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void arm_nommu_dma_sync_single_for_cpu(struct device *dev,
|
|
||||||
dma_addr_t handle, size_t size, enum dma_data_direction dir)
|
|
||||||
{
|
|
||||||
__dma_page_cpu_to_dev(handle, size, dir);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void arm_nommu_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
|
|
||||||
int nents, enum dma_data_direction dir)
|
|
||||||
{
|
|
||||||
struct scatterlist *sg;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
for_each_sg(sgl, sg, nents, i)
|
|
||||||
__dma_page_cpu_to_dev(sg_dma_address(sg), sg_dma_len(sg), dir);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void arm_nommu_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
|
|
||||||
int nents, enum dma_data_direction dir)
|
|
||||||
{
|
|
||||||
struct scatterlist *sg;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
for_each_sg(sgl, sg, nents, i)
|
|
||||||
__dma_page_dev_to_cpu(sg_dma_address(sg), sg_dma_len(sg), dir);
|
|
||||||
}
|
|
||||||
|
|
||||||
const struct dma_map_ops arm_nommu_dma_ops = {
|
|
||||||
.alloc = arm_nommu_dma_alloc,
|
|
||||||
.free = arm_nommu_dma_free,
|
|
||||||
.alloc_pages = dma_direct_alloc_pages,
|
|
||||||
.free_pages = dma_direct_free_pages,
|
|
||||||
.mmap = arm_nommu_dma_mmap,
|
|
||||||
.map_page = arm_nommu_dma_map_page,
|
|
||||||
.unmap_page = arm_nommu_dma_unmap_page,
|
|
||||||
.map_sg = arm_nommu_dma_map_sg,
|
|
||||||
.unmap_sg = arm_nommu_dma_unmap_sg,
|
|
||||||
.sync_single_for_device = arm_nommu_dma_sync_single_for_device,
|
|
||||||
.sync_single_for_cpu = arm_nommu_dma_sync_single_for_cpu,
|
|
||||||
.sync_sg_for_device = arm_nommu_dma_sync_sg_for_device,
|
|
||||||
.sync_sg_for_cpu = arm_nommu_dma_sync_sg_for_cpu,
|
|
||||||
};
|
|
||||||
EXPORT_SYMBOL(arm_nommu_dma_ops);
|
|
||||||
|
|
||||||
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||||
const struct iommu_ops *iommu, bool coherent)
|
const struct iommu_ops *iommu, bool coherent)
|
||||||
{
|
{
|
||||||
@ -201,14 +43,11 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
|||||||
* enough to check if MPU is in use or not since in absense of
|
* enough to check if MPU is in use or not since in absense of
|
||||||
* MPU system memory map is used.
|
* MPU system memory map is used.
|
||||||
*/
|
*/
|
||||||
dev->archdata.dma_coherent = (cacheid) ? coherent : true;
|
dev->dma_coherent = cacheid ? coherent : true;
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
* Assume coherent DMA in case MMU/MPU has not been set up.
|
* Assume coherent DMA in case MMU/MPU has not been set up.
|
||||||
*/
|
*/
|
||||||
dev->archdata.dma_coherent = (get_cr() & CR_M) ? coherent : true;
|
dev->dma_coherent = (get_cr() & CR_M) ? coherent : true;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!dev->archdata.dma_coherent)
|
|
||||||
set_dma_ops(dev, &arm_nommu_dma_ops);
|
|
||||||
}
|
}
|
||||||
|
@ -980,7 +980,7 @@ int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
|
|||||||
{
|
{
|
||||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||||
struct scatterlist *s;
|
struct scatterlist *s;
|
||||||
int i, j;
|
int i, j, ret;
|
||||||
|
|
||||||
for_each_sg(sg, s, nents, i) {
|
for_each_sg(sg, s, nents, i) {
|
||||||
#ifdef CONFIG_NEED_SG_DMA_LENGTH
|
#ifdef CONFIG_NEED_SG_DMA_LENGTH
|
||||||
@ -988,15 +988,17 @@ int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
|
|||||||
#endif
|
#endif
|
||||||
s->dma_address = ops->map_page(dev, sg_page(s), s->offset,
|
s->dma_address = ops->map_page(dev, sg_page(s), s->offset,
|
||||||
s->length, dir, attrs);
|
s->length, dir, attrs);
|
||||||
if (dma_mapping_error(dev, s->dma_address))
|
if (dma_mapping_error(dev, s->dma_address)) {
|
||||||
|
ret = -EIO;
|
||||||
goto bad_mapping;
|
goto bad_mapping;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
return nents;
|
return nents;
|
||||||
|
|
||||||
bad_mapping:
|
bad_mapping:
|
||||||
for_each_sg(sg, s, i, j)
|
for_each_sg(sg, s, i, j)
|
||||||
ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
|
ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
|
||||||
return 0;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -1622,7 +1624,7 @@ static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
|
|||||||
bool is_coherent)
|
bool is_coherent)
|
||||||
{
|
{
|
||||||
struct scatterlist *s = sg, *dma = sg, *start = sg;
|
struct scatterlist *s = sg, *dma = sg, *start = sg;
|
||||||
int i, count = 0;
|
int i, count = 0, ret;
|
||||||
unsigned int offset = s->offset;
|
unsigned int offset = s->offset;
|
||||||
unsigned int size = s->offset + s->length;
|
unsigned int size = s->offset + s->length;
|
||||||
unsigned int max = dma_get_max_seg_size(dev);
|
unsigned int max = dma_get_max_seg_size(dev);
|
||||||
@ -1630,12 +1632,13 @@ static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
|
|||||||
for (i = 1; i < nents; i++) {
|
for (i = 1; i < nents; i++) {
|
||||||
s = sg_next(s);
|
s = sg_next(s);
|
||||||
|
|
||||||
s->dma_address = DMA_MAPPING_ERROR;
|
|
||||||
s->dma_length = 0;
|
s->dma_length = 0;
|
||||||
|
|
||||||
if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) {
|
if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) {
|
||||||
if (__map_sg_chunk(dev, start, size, &dma->dma_address,
|
ret = __map_sg_chunk(dev, start, size,
|
||||||
dir, attrs, is_coherent) < 0)
|
&dma->dma_address, dir, attrs,
|
||||||
|
is_coherent);
|
||||||
|
if (ret < 0)
|
||||||
goto bad_mapping;
|
goto bad_mapping;
|
||||||
|
|
||||||
dma->dma_address += offset;
|
dma->dma_address += offset;
|
||||||
@ -1648,8 +1651,9 @@ static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
|
|||||||
}
|
}
|
||||||
size += s->length;
|
size += s->length;
|
||||||
}
|
}
|
||||||
if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs,
|
ret = __map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs,
|
||||||
is_coherent) < 0)
|
is_coherent);
|
||||||
|
if (ret < 0)
|
||||||
goto bad_mapping;
|
goto bad_mapping;
|
||||||
|
|
||||||
dma->dma_address += offset;
|
dma->dma_address += offset;
|
||||||
@ -1660,7 +1664,9 @@ static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
|
|||||||
bad_mapping:
|
bad_mapping:
|
||||||
for_each_sg(sg, s, count, i)
|
for_each_sg(sg, s, count, i)
|
||||||
__iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s));
|
__iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s));
|
||||||
return 0;
|
if (ret == -ENOMEM)
|
||||||
|
return ret;
|
||||||
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -7,6 +7,7 @@ config HEXAGON
|
|||||||
select ARCH_32BIT_OFF_T
|
select ARCH_32BIT_OFF_T
|
||||||
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
|
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
|
||||||
select ARCH_NO_PREEMPT
|
select ARCH_NO_PREEMPT
|
||||||
|
select DMA_GLOBAL_POOL
|
||||||
# Other pending projects/to-do items.
|
# Other pending projects/to-do items.
|
||||||
# select HAVE_REGS_AND_STACK_ACCESS_API
|
# select HAVE_REGS_AND_STACK_ACCESS_API
|
||||||
# select HAVE_HW_BREAKPOINT if PERF_EVENTS
|
# select HAVE_HW_BREAKPOINT if PERF_EVENTS
|
||||||
|
@ -7,54 +7,8 @@
|
|||||||
|
|
||||||
#include <linux/dma-map-ops.h>
|
#include <linux/dma-map-ops.h>
|
||||||
#include <linux/memblock.h>
|
#include <linux/memblock.h>
|
||||||
#include <linux/genalloc.h>
|
|
||||||
#include <linux/module.h>
|
|
||||||
#include <asm/page.h>
|
#include <asm/page.h>
|
||||||
|
|
||||||
static struct gen_pool *coherent_pool;
|
|
||||||
|
|
||||||
|
|
||||||
/* Allocates from a pool of uncached memory that was reserved at boot time */
|
|
||||||
|
|
||||||
void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_addr,
|
|
||||||
gfp_t flag, unsigned long attrs)
|
|
||||||
{
|
|
||||||
void *ret;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Our max_low_pfn should have been backed off by 16MB in
|
|
||||||
* mm/init.c to create DMA coherent space. Use that as the VA
|
|
||||||
* for the pool.
|
|
||||||
*/
|
|
||||||
|
|
||||||
if (coherent_pool == NULL) {
|
|
||||||
coherent_pool = gen_pool_create(PAGE_SHIFT, -1);
|
|
||||||
|
|
||||||
if (coherent_pool == NULL)
|
|
||||||
panic("Can't create %s() memory pool!", __func__);
|
|
||||||
else
|
|
||||||
gen_pool_add(coherent_pool,
|
|
||||||
(unsigned long)pfn_to_virt(max_low_pfn),
|
|
||||||
hexagon_coherent_pool_size, -1);
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = (void *) gen_pool_alloc(coherent_pool, size);
|
|
||||||
|
|
||||||
if (ret) {
|
|
||||||
memset(ret, 0, size);
|
|
||||||
*dma_addr = (dma_addr_t) virt_to_phys(ret);
|
|
||||||
} else
|
|
||||||
*dma_addr = ~0;
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
void arch_dma_free(struct device *dev, size_t size, void *vaddr,
|
|
||||||
dma_addr_t dma_addr, unsigned long attrs)
|
|
||||||
{
|
|
||||||
gen_pool_free(coherent_pool, (unsigned long) vaddr, size);
|
|
||||||
}
|
|
||||||
|
|
||||||
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
|
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
|
||||||
enum dma_data_direction dir)
|
enum dma_data_direction dir)
|
||||||
{
|
{
|
||||||
@ -77,3 +31,14 @@ void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
|
|||||||
BUG();
|
BUG();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Our max_low_pfn should have been backed off by 16MB in mm/init.c to create
|
||||||
|
* DMA coherent space. Use that for the pool.
|
||||||
|
*/
|
||||||
|
static int __init hexagon_dma_init(void)
|
||||||
|
{
|
||||||
|
return dma_init_global_coherent(PFN_PHYS(max_low_pfn),
|
||||||
|
hexagon_coherent_pool_size);
|
||||||
|
}
|
||||||
|
core_initcall(hexagon_dma_init);
|
||||||
|
@ -1459,7 +1459,7 @@ static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist,
|
|||||||
sglist->dma_address = sba_map_page(dev, sg_page(sglist),
|
sglist->dma_address = sba_map_page(dev, sg_page(sglist),
|
||||||
sglist->offset, sglist->length, dir, attrs);
|
sglist->offset, sglist->length, dir, attrs);
|
||||||
if (dma_mapping_error(dev, sglist->dma_address))
|
if (dma_mapping_error(dev, sglist->dma_address))
|
||||||
return 0;
|
return -EIO;
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1486,7 +1486,7 @@ static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist,
|
|||||||
coalesced = sba_coalesce_chunks(ioc, dev, sglist, nents);
|
coalesced = sba_coalesce_chunks(ioc, dev, sglist, nents);
|
||||||
if (coalesced < 0) {
|
if (coalesced < 0) {
|
||||||
sba_unmap_sg_attrs(dev, sglist, nents, dir, attrs);
|
sba_unmap_sg_attrs(dev, sglist, nents, dir, attrs);
|
||||||
return 0;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -552,7 +552,7 @@ static int jazz_dma_map_sg(struct device *dev, struct scatterlist *sglist,
|
|||||||
dir);
|
dir);
|
||||||
sg->dma_address = vdma_alloc(sg_phys(sg), sg->length);
|
sg->dma_address = vdma_alloc(sg_phys(sg), sg->length);
|
||||||
if (sg->dma_address == DMA_MAPPING_ERROR)
|
if (sg->dma_address == DMA_MAPPING_ERROR)
|
||||||
return 0;
|
return -EIO;
|
||||||
sg_dma_len(sg) = sg->length;
|
sg_dma_len(sg) = sg->length;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -473,7 +473,7 @@ int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
|
|||||||
BUG_ON(direction == DMA_NONE);
|
BUG_ON(direction == DMA_NONE);
|
||||||
|
|
||||||
if ((nelems == 0) || !tbl)
|
if ((nelems == 0) || !tbl)
|
||||||
return 0;
|
return -EINVAL;
|
||||||
|
|
||||||
outs = s = segstart = &sglist[0];
|
outs = s = segstart = &sglist[0];
|
||||||
outcount = 1;
|
outcount = 1;
|
||||||
@ -575,7 +575,6 @@ int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
|
|||||||
*/
|
*/
|
||||||
if (outcount < incount) {
|
if (outcount < incount) {
|
||||||
outs = sg_next(outs);
|
outs = sg_next(outs);
|
||||||
outs->dma_address = DMA_MAPPING_ERROR;
|
|
||||||
outs->dma_length = 0;
|
outs->dma_length = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -593,13 +592,12 @@ int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
|
|||||||
npages = iommu_num_pages(s->dma_address, s->dma_length,
|
npages = iommu_num_pages(s->dma_address, s->dma_length,
|
||||||
IOMMU_PAGE_SIZE(tbl));
|
IOMMU_PAGE_SIZE(tbl));
|
||||||
__iommu_free(tbl, vaddr, npages);
|
__iommu_free(tbl, vaddr, npages);
|
||||||
s->dma_address = DMA_MAPPING_ERROR;
|
|
||||||
s->dma_length = 0;
|
s->dma_length = 0;
|
||||||
}
|
}
|
||||||
if (s == outs)
|
if (s == outs)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
return 0;
|
return -EIO;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -662,7 +662,7 @@ static int ps3_ioc0_map_sg(struct device *_dev, struct scatterlist *sg,
|
|||||||
unsigned long attrs)
|
unsigned long attrs)
|
||||||
{
|
{
|
||||||
BUG();
|
BUG();
|
||||||
return 0;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ps3_sb_unmap_sg(struct device *_dev, struct scatterlist *sg,
|
static void ps3_sb_unmap_sg(struct device *_dev, struct scatterlist *sg,
|
||||||
|
@ -560,7 +560,8 @@ static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
|
|||||||
for_each_sg(sglist, sgl, nelems, count)
|
for_each_sg(sglist, sgl, nelems, count)
|
||||||
alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE(tbl));
|
alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE(tbl));
|
||||||
|
|
||||||
if (vio_cmo_alloc(viodev, alloc_size))
|
ret = vio_cmo_alloc(viodev, alloc_size);
|
||||||
|
if (ret)
|
||||||
goto out_fail;
|
goto out_fail;
|
||||||
ret = ppc_iommu_map_sg(dev, tbl, sglist, nelems, dma_get_mask(dev),
|
ret = ppc_iommu_map_sg(dev, tbl, sglist, nelems, dma_get_mask(dev),
|
||||||
direction, attrs);
|
direction, attrs);
|
||||||
@ -577,7 +578,7 @@ out_deallocate:
|
|||||||
vio_cmo_dealloc(viodev, alloc_size);
|
vio_cmo_dealloc(viodev, alloc_size);
|
||||||
out_fail:
|
out_fail:
|
||||||
atomic_inc(&viodev->cmo.allocs_failed);
|
atomic_inc(&viodev->cmo.allocs_failed);
|
||||||
return 0;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void vio_dma_iommu_unmap_sg(struct device *dev,
|
static void vio_dma_iommu_unmap_sg(struct device *dev,
|
||||||
|
@ -487,18 +487,18 @@ static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
|
|||||||
unsigned int max = dma_get_max_seg_size(dev);
|
unsigned int max = dma_get_max_seg_size(dev);
|
||||||
unsigned int size = s->offset + s->length;
|
unsigned int size = s->offset + s->length;
|
||||||
unsigned int offset = s->offset;
|
unsigned int offset = s->offset;
|
||||||
int count = 0, i;
|
int count = 0, i, ret;
|
||||||
|
|
||||||
for (i = 1; i < nr_elements; i++) {
|
for (i = 1; i < nr_elements; i++) {
|
||||||
s = sg_next(s);
|
s = sg_next(s);
|
||||||
|
|
||||||
s->dma_address = DMA_MAPPING_ERROR;
|
|
||||||
s->dma_length = 0;
|
s->dma_length = 0;
|
||||||
|
|
||||||
if (s->offset || (size & ~PAGE_MASK) ||
|
if (s->offset || (size & ~PAGE_MASK) ||
|
||||||
size + s->length > max) {
|
size + s->length > max) {
|
||||||
if (__s390_dma_map_sg(dev, start, size,
|
ret = __s390_dma_map_sg(dev, start, size,
|
||||||
&dma->dma_address, dir))
|
&dma->dma_address, dir);
|
||||||
|
if (ret)
|
||||||
goto unmap;
|
goto unmap;
|
||||||
|
|
||||||
dma->dma_address += offset;
|
dma->dma_address += offset;
|
||||||
@ -511,7 +511,8 @@ static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
|
|||||||
}
|
}
|
||||||
size += s->length;
|
size += s->length;
|
||||||
}
|
}
|
||||||
if (__s390_dma_map_sg(dev, start, size, &dma->dma_address, dir))
|
ret = __s390_dma_map_sg(dev, start, size, &dma->dma_address, dir);
|
||||||
|
if (ret)
|
||||||
goto unmap;
|
goto unmap;
|
||||||
|
|
||||||
dma->dma_address += offset;
|
dma->dma_address += offset;
|
||||||
@ -523,7 +524,7 @@ unmap:
|
|||||||
s390_dma_unmap_pages(dev, sg_dma_address(s), sg_dma_len(s),
|
s390_dma_unmap_pages(dev, sg_dma_address(s), sg_dma_len(s),
|
||||||
dir, attrs);
|
dir, attrs);
|
||||||
|
|
||||||
return 0;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
|
static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
|
||||||
|
@ -448,7 +448,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
|
|||||||
iommu = dev->archdata.iommu;
|
iommu = dev->archdata.iommu;
|
||||||
strbuf = dev->archdata.stc;
|
strbuf = dev->archdata.stc;
|
||||||
if (nelems == 0 || !iommu)
|
if (nelems == 0 || !iommu)
|
||||||
return 0;
|
return -EINVAL;
|
||||||
|
|
||||||
spin_lock_irqsave(&iommu->lock, flags);
|
spin_lock_irqsave(&iommu->lock, flags);
|
||||||
|
|
||||||
@ -546,7 +546,6 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
|
|||||||
|
|
||||||
if (outcount < incount) {
|
if (outcount < incount) {
|
||||||
outs = sg_next(outs);
|
outs = sg_next(outs);
|
||||||
outs->dma_address = DMA_MAPPING_ERROR;
|
|
||||||
outs->dma_length = 0;
|
outs->dma_length = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -572,7 +571,6 @@ iommu_map_failed:
|
|||||||
iommu_tbl_range_free(&iommu->tbl, vaddr, npages,
|
iommu_tbl_range_free(&iommu->tbl, vaddr, npages,
|
||||||
IOMMU_ERROR_CODE);
|
IOMMU_ERROR_CODE);
|
||||||
|
|
||||||
s->dma_address = DMA_MAPPING_ERROR;
|
|
||||||
s->dma_length = 0;
|
s->dma_length = 0;
|
||||||
}
|
}
|
||||||
if (s == outs)
|
if (s == outs)
|
||||||
@ -580,7 +578,7 @@ iommu_map_failed:
|
|||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&iommu->lock, flags);
|
spin_unlock_irqrestore(&iommu->lock, flags);
|
||||||
|
|
||||||
return 0;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* If contexts are being used, they are the same in all of the mappings
|
/* If contexts are being used, they are the same in all of the mappings
|
||||||
|
@ -486,7 +486,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
|
|||||||
|
|
||||||
iommu = dev->archdata.iommu;
|
iommu = dev->archdata.iommu;
|
||||||
if (nelems == 0 || !iommu)
|
if (nelems == 0 || !iommu)
|
||||||
return 0;
|
return -EINVAL;
|
||||||
atu = iommu->atu;
|
atu = iommu->atu;
|
||||||
|
|
||||||
prot = HV_PCI_MAP_ATTR_READ;
|
prot = HV_PCI_MAP_ATTR_READ;
|
||||||
@ -594,7 +594,6 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
|
|||||||
|
|
||||||
if (outcount < incount) {
|
if (outcount < incount) {
|
||||||
outs = sg_next(outs);
|
outs = sg_next(outs);
|
||||||
outs->dma_address = DMA_MAPPING_ERROR;
|
|
||||||
outs->dma_length = 0;
|
outs->dma_length = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -611,7 +610,6 @@ iommu_map_failed:
|
|||||||
iommu_tbl_range_free(tbl, vaddr, npages,
|
iommu_tbl_range_free(tbl, vaddr, npages,
|
||||||
IOMMU_ERROR_CODE);
|
IOMMU_ERROR_CODE);
|
||||||
/* XXX demap? XXX */
|
/* XXX demap? XXX */
|
||||||
s->dma_address = DMA_MAPPING_ERROR;
|
|
||||||
s->dma_length = 0;
|
s->dma_length = 0;
|
||||||
}
|
}
|
||||||
if (s == outs)
|
if (s == outs)
|
||||||
@ -619,7 +617,7 @@ iommu_map_failed:
|
|||||||
}
|
}
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
|
|
||||||
return 0;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
|
static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
|
||||||
|
@ -256,7 +256,7 @@ static int __sbus_iommu_map_sg(struct device *dev, struct scatterlist *sgl,
|
|||||||
sg->dma_address =__sbus_iommu_map_page(dev, sg_page(sg),
|
sg->dma_address =__sbus_iommu_map_page(dev, sg_page(sg),
|
||||||
sg->offset, sg->length, per_page_flush);
|
sg->offset, sg->length, per_page_flush);
|
||||||
if (sg->dma_address == DMA_MAPPING_ERROR)
|
if (sg->dma_address == DMA_MAPPING_ERROR)
|
||||||
return 0;
|
return -EIO;
|
||||||
sg->dma_length = sg->length;
|
sg->dma_length = sg->length;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -331,7 +331,7 @@ static int __dma_map_cont(struct device *dev, struct scatterlist *start,
|
|||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (iommu_start == -1)
|
if (iommu_start == -1)
|
||||||
return -1;
|
return -ENOMEM;
|
||||||
|
|
||||||
for_each_sg(start, s, nelems, i) {
|
for_each_sg(start, s, nelems, i) {
|
||||||
unsigned long pages, addr;
|
unsigned long pages, addr;
|
||||||
@ -380,13 +380,13 @@ static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents,
|
|||||||
enum dma_data_direction dir, unsigned long attrs)
|
enum dma_data_direction dir, unsigned long attrs)
|
||||||
{
|
{
|
||||||
struct scatterlist *s, *ps, *start_sg, *sgmap;
|
struct scatterlist *s, *ps, *start_sg, *sgmap;
|
||||||
int need = 0, nextneed, i, out, start;
|
int need = 0, nextneed, i, out, start, ret;
|
||||||
unsigned long pages = 0;
|
unsigned long pages = 0;
|
||||||
unsigned int seg_size;
|
unsigned int seg_size;
|
||||||
unsigned int max_seg_size;
|
unsigned int max_seg_size;
|
||||||
|
|
||||||
if (nents == 0)
|
if (nents == 0)
|
||||||
return 0;
|
return -EINVAL;
|
||||||
|
|
||||||
out = 0;
|
out = 0;
|
||||||
start = 0;
|
start = 0;
|
||||||
@ -414,8 +414,9 @@ static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents,
|
|||||||
if (!iommu_merge || !nextneed || !need || s->offset ||
|
if (!iommu_merge || !nextneed || !need || s->offset ||
|
||||||
(s->length + seg_size > max_seg_size) ||
|
(s->length + seg_size > max_seg_size) ||
|
||||||
(ps->offset + ps->length) % PAGE_SIZE) {
|
(ps->offset + ps->length) % PAGE_SIZE) {
|
||||||
if (dma_map_cont(dev, start_sg, i - start,
|
ret = dma_map_cont(dev, start_sg, i - start,
|
||||||
sgmap, pages, need) < 0)
|
sgmap, pages, need);
|
||||||
|
if (ret < 0)
|
||||||
goto error;
|
goto error;
|
||||||
out++;
|
out++;
|
||||||
|
|
||||||
@ -432,7 +433,8 @@ static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents,
|
|||||||
pages += iommu_num_pages(s->offset, s->length, PAGE_SIZE);
|
pages += iommu_num_pages(s->offset, s->length, PAGE_SIZE);
|
||||||
ps = s;
|
ps = s;
|
||||||
}
|
}
|
||||||
if (dma_map_cont(dev, start_sg, i - start, sgmap, pages, need) < 0)
|
ret = dma_map_cont(dev, start_sg, i - start, sgmap, pages, need);
|
||||||
|
if (ret < 0)
|
||||||
goto error;
|
goto error;
|
||||||
out++;
|
out++;
|
||||||
flush_gart();
|
flush_gart();
|
||||||
@ -456,9 +458,7 @@ error:
|
|||||||
panic("dma_map_sg: overflow on %lu pages\n", pages);
|
panic("dma_map_sg: overflow on %lu pages\n", pages);
|
||||||
|
|
||||||
iommu_full(dev, pages << PAGE_SHIFT, dir);
|
iommu_full(dev, pages << PAGE_SHIFT, dir);
|
||||||
for_each_sg(sg, s, nents, i)
|
return ret;
|
||||||
s->dma_address = DMA_MAPPING_ERROR;
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* allocate and map a coherent mapping */
|
/* allocate and map a coherent mapping */
|
||||||
|
@ -973,7 +973,7 @@ static int iommu_dma_map_sg_swiotlb(struct device *dev, struct scatterlist *sg,
|
|||||||
|
|
||||||
out_unmap:
|
out_unmap:
|
||||||
iommu_dma_unmap_sg_swiotlb(dev, sg, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
|
iommu_dma_unmap_sg_swiotlb(dev, sg, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
|
||||||
return 0;
|
return -EIO;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -994,11 +994,13 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
|
|||||||
dma_addr_t iova;
|
dma_addr_t iova;
|
||||||
size_t iova_len = 0;
|
size_t iova_len = 0;
|
||||||
unsigned long mask = dma_get_seg_boundary(dev);
|
unsigned long mask = dma_get_seg_boundary(dev);
|
||||||
|
ssize_t ret;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
|
if (static_branch_unlikely(&iommu_deferred_attach_enabled)) {
|
||||||
iommu_deferred_attach(dev, domain))
|
ret = iommu_deferred_attach(dev, domain);
|
||||||
return 0;
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
|
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
|
||||||
iommu_dma_sync_sg_for_device(dev, sg, nents, dir);
|
iommu_dma_sync_sg_for_device(dev, sg, nents, dir);
|
||||||
@ -1046,14 +1048,17 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
|
|||||||
}
|
}
|
||||||
|
|
||||||
iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
|
iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
|
||||||
if (!iova)
|
if (!iova) {
|
||||||
|
ret = -ENOMEM;
|
||||||
goto out_restore_sg;
|
goto out_restore_sg;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We'll leave any physical concatenation to the IOMMU driver's
|
* We'll leave any physical concatenation to the IOMMU driver's
|
||||||
* implementation - it knows better than we do.
|
* implementation - it knows better than we do.
|
||||||
*/
|
*/
|
||||||
if (iommu_map_sg_atomic(domain, iova, sg, nents, prot) < iova_len)
|
ret = iommu_map_sg_atomic(domain, iova, sg, nents, prot);
|
||||||
|
if (ret < iova_len)
|
||||||
goto out_free_iova;
|
goto out_free_iova;
|
||||||
|
|
||||||
return __finalise_sg(dev, sg, nents, iova);
|
return __finalise_sg(dev, sg, nents, iova);
|
||||||
@ -1062,7 +1067,10 @@ out_free_iova:
|
|||||||
iommu_dma_free_iova(cookie, iova, iova_len, NULL);
|
iommu_dma_free_iova(cookie, iova, iova_len, NULL);
|
||||||
out_restore_sg:
|
out_restore_sg:
|
||||||
__invalidate_sg(sg, nents);
|
__invalidate_sg(sg, nents);
|
||||||
return 0;
|
out:
|
||||||
|
if (ret != -ENOMEM)
|
||||||
|
return -EINVAL;
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
|
static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
|
||||||
|
@ -2570,7 +2570,7 @@ size_t iommu_unmap_fast(struct iommu_domain *domain,
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(iommu_unmap_fast);
|
EXPORT_SYMBOL_GPL(iommu_unmap_fast);
|
||||||
|
|
||||||
static size_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
|
static ssize_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
|
||||||
struct scatterlist *sg, unsigned int nents, int prot,
|
struct scatterlist *sg, unsigned int nents, int prot,
|
||||||
gfp_t gfp)
|
gfp_t gfp)
|
||||||
{
|
{
|
||||||
@ -2613,11 +2613,10 @@ out_err:
|
|||||||
/* undo mappings already done */
|
/* undo mappings already done */
|
||||||
iommu_unmap(domain, iova, mapped);
|
iommu_unmap(domain, iova, mapped);
|
||||||
|
|
||||||
return 0;
|
return ret;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
|
ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
|
||||||
struct scatterlist *sg, unsigned int nents, int prot)
|
struct scatterlist *sg, unsigned int nents, int prot)
|
||||||
{
|
{
|
||||||
might_sleep();
|
might_sleep();
|
||||||
@ -2625,7 +2624,7 @@ size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(iommu_map_sg);
|
EXPORT_SYMBOL_GPL(iommu_map_sg);
|
||||||
|
|
||||||
size_t iommu_map_sg_atomic(struct iommu_domain *domain, unsigned long iova,
|
ssize_t iommu_map_sg_atomic(struct iommu_domain *domain, unsigned long iova,
|
||||||
struct scatterlist *sg, unsigned int nents, int prot)
|
struct scatterlist *sg, unsigned int nents, int prot)
|
||||||
{
|
{
|
||||||
return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_ATOMIC);
|
return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_ATOMIC);
|
||||||
|
@ -918,7 +918,7 @@ ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
|
|||||||
BUG_ON(!dev);
|
BUG_ON(!dev);
|
||||||
ioc = GET_IOC(dev);
|
ioc = GET_IOC(dev);
|
||||||
if (!ioc)
|
if (!ioc)
|
||||||
return 0;
|
return -EINVAL;
|
||||||
|
|
||||||
DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
|
DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
|
||||||
|
|
||||||
|
@ -947,7 +947,7 @@ sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
|
|||||||
|
|
||||||
ioc = GET_IOC(dev);
|
ioc = GET_IOC(dev);
|
||||||
if (!ioc)
|
if (!ioc)
|
||||||
return 0;
|
return -EINVAL;
|
||||||
|
|
||||||
/* Fast path single entry scatterlists. */
|
/* Fast path single entry scatterlists. */
|
||||||
if (nents == 1) {
|
if (nents == 1) {
|
||||||
|
@ -509,7 +509,7 @@ xen_swiotlb_map_sg(struct device *dev, struct scatterlist *sgl, int nelems,
|
|||||||
out_unmap:
|
out_unmap:
|
||||||
xen_swiotlb_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
|
xen_swiotlb_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
|
||||||
sg_dma_len(sgl) = 0;
|
sg_dma_len(sgl) = 0;
|
||||||
return 0;
|
return -EIO;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
@ -41,8 +41,9 @@ struct dma_map_ops {
|
|||||||
size_t size, enum dma_data_direction dir,
|
size_t size, enum dma_data_direction dir,
|
||||||
unsigned long attrs);
|
unsigned long attrs);
|
||||||
/*
|
/*
|
||||||
* map_sg returns 0 on error and a value > 0 on success.
|
* map_sg should return a negative error code on error. See
|
||||||
* It should never return a value < 0.
|
* dma_map_sgtable() for a list of appropriate error codes
|
||||||
|
* and their meanings.
|
||||||
*/
|
*/
|
||||||
int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents,
|
int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents,
|
||||||
enum dma_data_direction dir, unsigned long attrs);
|
enum dma_data_direction dir, unsigned long attrs);
|
||||||
@ -170,13 +171,6 @@ int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
|
|||||||
int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr);
|
int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr);
|
||||||
int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
|
int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
|
||||||
void *cpu_addr, size_t size, int *ret);
|
void *cpu_addr, size_t size, int *ret);
|
||||||
|
|
||||||
void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size,
|
|
||||||
dma_addr_t *dma_handle);
|
|
||||||
int dma_release_from_global_coherent(int order, void *vaddr);
|
|
||||||
int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr,
|
|
||||||
size_t size, int *ret);
|
|
||||||
|
|
||||||
#else
|
#else
|
||||||
static inline int dma_declare_coherent_memory(struct device *dev,
|
static inline int dma_declare_coherent_memory(struct device *dev,
|
||||||
phys_addr_t phys_addr, dma_addr_t device_addr, size_t size)
|
phys_addr_t phys_addr, dma_addr_t device_addr, size_t size)
|
||||||
@ -186,7 +180,16 @@ static inline int dma_declare_coherent_memory(struct device *dev,
|
|||||||
#define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0)
|
#define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0)
|
||||||
#define dma_release_from_dev_coherent(dev, order, vaddr) (0)
|
#define dma_release_from_dev_coherent(dev, order, vaddr) (0)
|
||||||
#define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0)
|
#define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0)
|
||||||
|
#endif /* CONFIG_DMA_DECLARE_COHERENT */
|
||||||
|
|
||||||
|
#ifdef CONFIG_DMA_GLOBAL_POOL
|
||||||
|
void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size,
|
||||||
|
dma_addr_t *dma_handle);
|
||||||
|
int dma_release_from_global_coherent(int order, void *vaddr);
|
||||||
|
int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr,
|
||||||
|
size_t size, int *ret);
|
||||||
|
int dma_init_global_coherent(phys_addr_t phys_addr, size_t size);
|
||||||
|
#else
|
||||||
static inline void *dma_alloc_from_global_coherent(struct device *dev,
|
static inline void *dma_alloc_from_global_coherent(struct device *dev,
|
||||||
ssize_t size, dma_addr_t *dma_handle)
|
ssize_t size, dma_addr_t *dma_handle)
|
||||||
{
|
{
|
||||||
@ -201,7 +204,7 @@ static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma,
|
|||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_DMA_DECLARE_COHERENT */
|
#endif /* CONFIG_DMA_GLOBAL_POOL */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This is the actual return value from the ->alloc_noncontiguous method.
|
* This is the actual return value from the ->alloc_noncontiguous method.
|
||||||
|
@ -105,11 +105,13 @@ dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
|
|||||||
unsigned long attrs);
|
unsigned long attrs);
|
||||||
void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
|
void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
|
||||||
enum dma_data_direction dir, unsigned long attrs);
|
enum dma_data_direction dir, unsigned long attrs);
|
||||||
int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents,
|
unsigned int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
|
||||||
enum dma_data_direction dir, unsigned long attrs);
|
int nents, enum dma_data_direction dir, unsigned long attrs);
|
||||||
void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
|
void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
|
||||||
int nents, enum dma_data_direction dir,
|
int nents, enum dma_data_direction dir,
|
||||||
unsigned long attrs);
|
unsigned long attrs);
|
||||||
|
int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
|
||||||
|
enum dma_data_direction dir, unsigned long attrs);
|
||||||
dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr,
|
dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr,
|
||||||
size_t size, enum dma_data_direction dir, unsigned long attrs);
|
size_t size, enum dma_data_direction dir, unsigned long attrs);
|
||||||
void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size,
|
void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size,
|
||||||
@ -164,8 +166,9 @@ static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr,
|
|||||||
size_t size, enum dma_data_direction dir, unsigned long attrs)
|
size_t size, enum dma_data_direction dir, unsigned long attrs)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
|
static inline unsigned int dma_map_sg_attrs(struct device *dev,
|
||||||
int nents, enum dma_data_direction dir, unsigned long attrs)
|
struct scatterlist *sg, int nents, enum dma_data_direction dir,
|
||||||
|
unsigned long attrs)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -174,6 +177,11 @@ static inline void dma_unmap_sg_attrs(struct device *dev,
|
|||||||
unsigned long attrs)
|
unsigned long attrs)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
static inline int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
|
||||||
|
enum dma_data_direction dir, unsigned long attrs)
|
||||||
|
{
|
||||||
|
return -EOPNOTSUPP;
|
||||||
|
}
|
||||||
static inline dma_addr_t dma_map_resource(struct device *dev,
|
static inline dma_addr_t dma_map_resource(struct device *dev,
|
||||||
phys_addr_t phys_addr, size_t size, enum dma_data_direction dir,
|
phys_addr_t phys_addr, size_t size, enum dma_data_direction dir,
|
||||||
unsigned long attrs)
|
unsigned long attrs)
|
||||||
@ -343,34 +351,6 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
|
|||||||
return dma_sync_single_for_device(dev, addr + offset, size, dir);
|
return dma_sync_single_for_device(dev, addr + offset, size, dir);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* dma_map_sgtable - Map the given buffer for DMA
|
|
||||||
* @dev: The device for which to perform the DMA operation
|
|
||||||
* @sgt: The sg_table object describing the buffer
|
|
||||||
* @dir: DMA direction
|
|
||||||
* @attrs: Optional DMA attributes for the map operation
|
|
||||||
*
|
|
||||||
* Maps a buffer described by a scatterlist stored in the given sg_table
|
|
||||||
* object for the @dir DMA operation by the @dev device. After success the
|
|
||||||
* ownership for the buffer is transferred to the DMA domain. One has to
|
|
||||||
* call dma_sync_sgtable_for_cpu() or dma_unmap_sgtable() to move the
|
|
||||||
* ownership of the buffer back to the CPU domain before touching the
|
|
||||||
* buffer by the CPU.
|
|
||||||
*
|
|
||||||
* Returns 0 on success or -EINVAL on error during mapping the buffer.
|
|
||||||
*/
|
|
||||||
static inline int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
|
|
||||||
enum dma_data_direction dir, unsigned long attrs)
|
|
||||||
{
|
|
||||||
int nents;
|
|
||||||
|
|
||||||
nents = dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
|
|
||||||
if (nents <= 0)
|
|
||||||
return -EINVAL;
|
|
||||||
sgt->nents = nents;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* dma_unmap_sgtable - Unmap the given buffer for DMA
|
* dma_unmap_sgtable - Unmap the given buffer for DMA
|
||||||
* @dev: The device for which to perform the DMA operation
|
* @dev: The device for which to perform the DMA operation
|
||||||
|
@ -414,9 +414,9 @@ extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
|
|||||||
extern size_t iommu_unmap_fast(struct iommu_domain *domain,
|
extern size_t iommu_unmap_fast(struct iommu_domain *domain,
|
||||||
unsigned long iova, size_t size,
|
unsigned long iova, size_t size,
|
||||||
struct iommu_iotlb_gather *iotlb_gather);
|
struct iommu_iotlb_gather *iotlb_gather);
|
||||||
extern size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
|
extern ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
|
||||||
struct scatterlist *sg,unsigned int nents, int prot);
|
struct scatterlist *sg, unsigned int nents, int prot);
|
||||||
extern size_t iommu_map_sg_atomic(struct iommu_domain *domain,
|
extern ssize_t iommu_map_sg_atomic(struct iommu_domain *domain,
|
||||||
unsigned long iova, struct scatterlist *sg,
|
unsigned long iova, struct scatterlist *sg,
|
||||||
unsigned int nents, int prot);
|
unsigned int nents, int prot);
|
||||||
extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova);
|
extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova);
|
||||||
@ -679,18 +679,18 @@ static inline size_t iommu_unmap_fast(struct iommu_domain *domain,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline size_t iommu_map_sg(struct iommu_domain *domain,
|
static inline ssize_t iommu_map_sg(struct iommu_domain *domain,
|
||||||
unsigned long iova, struct scatterlist *sg,
|
unsigned long iova, struct scatterlist *sg,
|
||||||
unsigned int nents, int prot)
|
unsigned int nents, int prot)
|
||||||
{
|
{
|
||||||
return 0;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline size_t iommu_map_sg_atomic(struct iommu_domain *domain,
|
static inline ssize_t iommu_map_sg_atomic(struct iommu_domain *domain,
|
||||||
unsigned long iova, struct scatterlist *sg,
|
unsigned long iova, struct scatterlist *sg,
|
||||||
unsigned int nents, int prot)
|
unsigned int nents, int prot)
|
||||||
{
|
{
|
||||||
return 0;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void iommu_flush_iotlb_all(struct iommu_domain *domain)
|
static inline void iommu_flush_iotlb_all(struct iommu_domain *domain)
|
||||||
|
@ -93,6 +93,10 @@ config DMA_COHERENT_POOL
|
|||||||
select GENERIC_ALLOCATOR
|
select GENERIC_ALLOCATOR
|
||||||
bool
|
bool
|
||||||
|
|
||||||
|
config DMA_GLOBAL_POOL
|
||||||
|
select DMA_DECLARE_COHERENT
|
||||||
|
bool
|
||||||
|
|
||||||
config DMA_REMAP
|
config DMA_REMAP
|
||||||
bool
|
bool
|
||||||
depends on MMU
|
depends on MMU
|
||||||
|
@ -20,8 +20,6 @@ struct dma_coherent_mem {
|
|||||||
bool use_dev_dma_pfn_offset;
|
bool use_dev_dma_pfn_offset;
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct dma_coherent_mem *dma_coherent_default_memory __ro_after_init;
|
|
||||||
|
|
||||||
static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *dev)
|
static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *dev)
|
||||||
{
|
{
|
||||||
if (dev && dev->dma_mem)
|
if (dev && dev->dma_mem)
|
||||||
@ -37,51 +35,44 @@ static inline dma_addr_t dma_get_device_base(struct device *dev,
|
|||||||
return mem->device_base;
|
return mem->device_base;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int dma_init_coherent_memory(phys_addr_t phys_addr,
|
static struct dma_coherent_mem *dma_init_coherent_memory(phys_addr_t phys_addr,
|
||||||
dma_addr_t device_addr, size_t size,
|
dma_addr_t device_addr, size_t size, bool use_dma_pfn_offset)
|
||||||
struct dma_coherent_mem **mem)
|
|
||||||
{
|
{
|
||||||
struct dma_coherent_mem *dma_mem = NULL;
|
struct dma_coherent_mem *dma_mem;
|
||||||
void *mem_base = NULL;
|
|
||||||
int pages = size >> PAGE_SHIFT;
|
int pages = size >> PAGE_SHIFT;
|
||||||
int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
|
int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
|
||||||
int ret;
|
void *mem_base;
|
||||||
|
|
||||||
if (!size) {
|
if (!size)
|
||||||
ret = -EINVAL;
|
return ERR_PTR(-EINVAL);
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
mem_base = memremap(phys_addr, size, MEMREMAP_WC);
|
mem_base = memremap(phys_addr, size, MEMREMAP_WC);
|
||||||
if (!mem_base) {
|
if (!mem_base)
|
||||||
ret = -EINVAL;
|
return ERR_PTR(-EINVAL);
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
|
dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
|
||||||
if (!dma_mem) {
|
if (!dma_mem)
|
||||||
ret = -ENOMEM;
|
goto out_unmap_membase;
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
|
dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
|
||||||
if (!dma_mem->bitmap) {
|
if (!dma_mem->bitmap)
|
||||||
ret = -ENOMEM;
|
goto out_free_dma_mem;
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
dma_mem->virt_base = mem_base;
|
dma_mem->virt_base = mem_base;
|
||||||
dma_mem->device_base = device_addr;
|
dma_mem->device_base = device_addr;
|
||||||
dma_mem->pfn_base = PFN_DOWN(phys_addr);
|
dma_mem->pfn_base = PFN_DOWN(phys_addr);
|
||||||
dma_mem->size = pages;
|
dma_mem->size = pages;
|
||||||
|
dma_mem->use_dev_dma_pfn_offset = use_dma_pfn_offset;
|
||||||
spin_lock_init(&dma_mem->spinlock);
|
spin_lock_init(&dma_mem->spinlock);
|
||||||
|
|
||||||
*mem = dma_mem;
|
return dma_mem;
|
||||||
return 0;
|
|
||||||
|
|
||||||
out:
|
out_free_dma_mem:
|
||||||
kfree(dma_mem);
|
kfree(dma_mem);
|
||||||
if (mem_base)
|
out_unmap_membase:
|
||||||
memunmap(mem_base);
|
memunmap(mem_base);
|
||||||
return ret;
|
pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %zd MiB\n",
|
||||||
|
&phys_addr, size / SZ_1M);
|
||||||
|
return ERR_PTR(-ENOMEM);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dma_release_coherent_memory(struct dma_coherent_mem *mem)
|
static void dma_release_coherent_memory(struct dma_coherent_mem *mem)
|
||||||
@ -130,9 +121,9 @@ int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
|
|||||||
struct dma_coherent_mem *mem;
|
struct dma_coherent_mem *mem;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = dma_init_coherent_memory(phys_addr, device_addr, size, &mem);
|
mem = dma_init_coherent_memory(phys_addr, device_addr, size, false);
|
||||||
if (ret)
|
if (IS_ERR(mem))
|
||||||
return ret;
|
return PTR_ERR(mem);
|
||||||
|
|
||||||
ret = dma_assign_coherent_memory(dev, mem);
|
ret = dma_assign_coherent_memory(dev, mem);
|
||||||
if (ret)
|
if (ret)
|
||||||
@ -198,16 +189,6 @@ int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
|
|||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size,
|
|
||||||
dma_addr_t *dma_handle)
|
|
||||||
{
|
|
||||||
if (!dma_coherent_default_memory)
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
return __dma_alloc_from_coherent(dev, dma_coherent_default_memory, size,
|
|
||||||
dma_handle);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int __dma_release_from_coherent(struct dma_coherent_mem *mem,
|
static int __dma_release_from_coherent(struct dma_coherent_mem *mem,
|
||||||
int order, void *vaddr)
|
int order, void *vaddr)
|
||||||
{
|
{
|
||||||
@ -243,15 +224,6 @@ int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr)
|
|||||||
return __dma_release_from_coherent(mem, order, vaddr);
|
return __dma_release_from_coherent(mem, order, vaddr);
|
||||||
}
|
}
|
||||||
|
|
||||||
int dma_release_from_global_coherent(int order, void *vaddr)
|
|
||||||
{
|
|
||||||
if (!dma_coherent_default_memory)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
return __dma_release_from_coherent(dma_coherent_default_memory, order,
|
|
||||||
vaddr);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem,
|
static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem,
|
||||||
struct vm_area_struct *vma, void *vaddr, size_t size, int *ret)
|
struct vm_area_struct *vma, void *vaddr, size_t size, int *ret)
|
||||||
{
|
{
|
||||||
@ -297,6 +269,28 @@ int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
|
|||||||
return __dma_mmap_from_coherent(mem, vma, vaddr, size, ret);
|
return __dma_mmap_from_coherent(mem, vma, vaddr, size, ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_DMA_GLOBAL_POOL
|
||||||
|
static struct dma_coherent_mem *dma_coherent_default_memory __ro_after_init;
|
||||||
|
|
||||||
|
void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size,
|
||||||
|
dma_addr_t *dma_handle)
|
||||||
|
{
|
||||||
|
if (!dma_coherent_default_memory)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
return __dma_alloc_from_coherent(dev, dma_coherent_default_memory, size,
|
||||||
|
dma_handle);
|
||||||
|
}
|
||||||
|
|
||||||
|
int dma_release_from_global_coherent(int order, void *vaddr)
|
||||||
|
{
|
||||||
|
if (!dma_coherent_default_memory)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
return __dma_release_from_coherent(dma_coherent_default_memory, order,
|
||||||
|
vaddr);
|
||||||
|
}
|
||||||
|
|
||||||
int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr,
|
int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr,
|
||||||
size_t size, int *ret)
|
size_t size, int *ret)
|
||||||
{
|
{
|
||||||
@ -307,6 +301,19 @@ int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr,
|
|||||||
vaddr, size, ret);
|
vaddr, size, ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int dma_init_global_coherent(phys_addr_t phys_addr, size_t size)
|
||||||
|
{
|
||||||
|
struct dma_coherent_mem *mem;
|
||||||
|
|
||||||
|
mem = dma_init_coherent_memory(phys_addr, phys_addr, size, true);
|
||||||
|
if (IS_ERR(mem))
|
||||||
|
return PTR_ERR(mem);
|
||||||
|
dma_coherent_default_memory = mem;
|
||||||
|
pr_info("DMA: default coherent area is set\n");
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
#endif /* CONFIG_DMA_GLOBAL_POOL */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Support for reserved memory regions defined in device tree
|
* Support for reserved memory regions defined in device tree
|
||||||
*/
|
*/
|
||||||
@ -315,25 +322,22 @@ int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr,
|
|||||||
#include <linux/of_fdt.h>
|
#include <linux/of_fdt.h>
|
||||||
#include <linux/of_reserved_mem.h>
|
#include <linux/of_reserved_mem.h>
|
||||||
|
|
||||||
|
#ifdef CONFIG_DMA_GLOBAL_POOL
|
||||||
static struct reserved_mem *dma_reserved_default_memory __initdata;
|
static struct reserved_mem *dma_reserved_default_memory __initdata;
|
||||||
|
#endif
|
||||||
|
|
||||||
static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev)
|
static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev)
|
||||||
{
|
{
|
||||||
struct dma_coherent_mem *mem = rmem->priv;
|
if (!rmem->priv) {
|
||||||
int ret;
|
struct dma_coherent_mem *mem;
|
||||||
|
|
||||||
if (!mem) {
|
mem = dma_init_coherent_memory(rmem->base, rmem->base,
|
||||||
ret = dma_init_coherent_memory(rmem->base, rmem->base,
|
rmem->size, true);
|
||||||
rmem->size, &mem);
|
if (IS_ERR(mem))
|
||||||
if (ret) {
|
return PTR_ERR(mem);
|
||||||
pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n",
|
|
||||||
&rmem->base, (unsigned long)rmem->size / SZ_1M);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
mem->use_dev_dma_pfn_offset = true;
|
|
||||||
rmem->priv = mem;
|
rmem->priv = mem;
|
||||||
dma_assign_coherent_memory(dev, mem);
|
}
|
||||||
|
dma_assign_coherent_memory(dev, rmem->priv);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -361,7 +365,9 @@ static int __init rmem_dma_setup(struct reserved_mem *rmem)
|
|||||||
pr_err("Reserved memory: regions without no-map are not yet supported\n");
|
pr_err("Reserved memory: regions without no-map are not yet supported\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_DMA_GLOBAL_POOL
|
||||||
if (of_get_flat_dt_prop(node, "linux,dma-default", NULL)) {
|
if (of_get_flat_dt_prop(node, "linux,dma-default", NULL)) {
|
||||||
WARN(dma_reserved_default_memory,
|
WARN(dma_reserved_default_memory,
|
||||||
"Reserved memory: region for default DMA coherent area is redefined\n");
|
"Reserved memory: region for default DMA coherent area is redefined\n");
|
||||||
@ -375,31 +381,16 @@ static int __init rmem_dma_setup(struct reserved_mem *rmem)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_DMA_GLOBAL_POOL
|
||||||
static int __init dma_init_reserved_memory(void)
|
static int __init dma_init_reserved_memory(void)
|
||||||
{
|
{
|
||||||
const struct reserved_mem_ops *ops;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
if (!dma_reserved_default_memory)
|
if (!dma_reserved_default_memory)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
return dma_init_global_coherent(dma_reserved_default_memory->base,
|
||||||
ops = dma_reserved_default_memory->ops;
|
dma_reserved_default_memory->size);
|
||||||
|
|
||||||
/*
|
|
||||||
* We rely on rmem_dma_device_init() does not propagate error of
|
|
||||||
* dma_assign_coherent_memory() for "NULL" device.
|
|
||||||
*/
|
|
||||||
ret = ops->device_init(dma_reserved_default_memory, NULL);
|
|
||||||
|
|
||||||
if (!ret) {
|
|
||||||
dma_coherent_default_memory = dma_reserved_default_memory->priv;
|
|
||||||
pr_info("DMA: default coherent area is set\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
core_initcall(dma_init_reserved_memory);
|
core_initcall(dma_init_reserved_memory);
|
||||||
|
#endif /* CONFIG_DMA_GLOBAL_POOL */
|
||||||
|
|
||||||
RESERVEDMEM_OF_DECLARE(dma, "shared-dma-pool", rmem_dma_setup);
|
RESERVEDMEM_OF_DECLARE(dma, "shared-dma-pool", rmem_dma_setup);
|
||||||
#endif
|
#endif
|
||||||
|
@ -792,7 +792,7 @@ static int dump_show(struct seq_file *seq, void *v)
|
|||||||
}
|
}
|
||||||
DEFINE_SHOW_ATTRIBUTE(dump);
|
DEFINE_SHOW_ATTRIBUTE(dump);
|
||||||
|
|
||||||
static void dma_debug_fs_init(void)
|
static int __init dma_debug_fs_init(void)
|
||||||
{
|
{
|
||||||
struct dentry *dentry = debugfs_create_dir("dma-api", NULL);
|
struct dentry *dentry = debugfs_create_dir("dma-api", NULL);
|
||||||
|
|
||||||
@ -805,7 +805,10 @@ static void dma_debug_fs_init(void)
|
|||||||
debugfs_create_u32("nr_total_entries", 0444, dentry, &nr_total_entries);
|
debugfs_create_u32("nr_total_entries", 0444, dentry, &nr_total_entries);
|
||||||
debugfs_create_file("driver_filter", 0644, dentry, NULL, &filter_fops);
|
debugfs_create_file("driver_filter", 0644, dentry, NULL, &filter_fops);
|
||||||
debugfs_create_file("dump", 0444, dentry, NULL, &dump_fops);
|
debugfs_create_file("dump", 0444, dentry, NULL, &dump_fops);
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
core_initcall_sync(dma_debug_fs_init);
|
||||||
|
|
||||||
static int device_dma_allocations(struct device *dev, struct dma_debug_entry **out_entry)
|
static int device_dma_allocations(struct device *dev, struct dma_debug_entry **out_entry)
|
||||||
{
|
{
|
||||||
@ -890,8 +893,6 @@ static int dma_debug_init(void)
|
|||||||
spin_lock_init(&dma_entry_hash[i].lock);
|
spin_lock_init(&dma_entry_hash[i].lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
dma_debug_fs_init();
|
|
||||||
|
|
||||||
nr_pages = DIV_ROUND_UP(nr_prealloc_entries, DMA_DEBUG_DYNAMIC_ENTRIES);
|
nr_pages = DIV_ROUND_UP(nr_prealloc_entries, DMA_DEBUG_DYNAMIC_ENTRIES);
|
||||||
for (i = 0; i < nr_pages; ++i)
|
for (i = 0; i < nr_pages; ++i)
|
||||||
dma_debug_create_entries(GFP_KERNEL);
|
dma_debug_create_entries(GFP_KERNEL);
|
||||||
@ -1064,20 +1065,10 @@ static void check_for_stack(struct device *dev,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool overlap(void *addr, unsigned long len, void *start, void *end)
|
|
||||||
{
|
|
||||||
unsigned long a1 = (unsigned long)addr;
|
|
||||||
unsigned long b1 = a1 + len;
|
|
||||||
unsigned long a2 = (unsigned long)start;
|
|
||||||
unsigned long b2 = (unsigned long)end;
|
|
||||||
|
|
||||||
return !(b1 <= a2 || a1 >= b2);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len)
|
static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len)
|
||||||
{
|
{
|
||||||
if (overlap(addr, len, _stext, _etext) ||
|
if (memory_intersects(_stext, _etext, addr, len) ||
|
||||||
overlap(addr, len, __start_rodata, __end_rodata))
|
memory_intersects(__start_rodata, __end_rodata, addr, len))
|
||||||
err_printk(dev, NULL, "device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len);
|
err_printk(dev, NULL, "device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -156,9 +156,14 @@ void *dma_direct_alloc(struct device *dev, size_t size,
|
|||||||
|
|
||||||
if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
|
if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
|
||||||
!IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
|
!IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
|
||||||
|
!IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) &&
|
||||||
!dev_is_dma_coherent(dev))
|
!dev_is_dma_coherent(dev))
|
||||||
return arch_dma_alloc(dev, size, dma_handle, gfp, attrs);
|
return arch_dma_alloc(dev, size, dma_handle, gfp, attrs);
|
||||||
|
|
||||||
|
if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) &&
|
||||||
|
!dev_is_dma_coherent(dev))
|
||||||
|
return dma_alloc_from_global_coherent(dev, size, dma_handle);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Remapping or decrypting memory may block. If either is required and
|
* Remapping or decrypting memory may block. If either is required and
|
||||||
* we can't block, allocate the memory from the atomic pools.
|
* we can't block, allocate the memory from the atomic pools.
|
||||||
@ -255,11 +260,19 @@ void dma_direct_free(struct device *dev, size_t size,
|
|||||||
|
|
||||||
if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
|
if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
|
||||||
!IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
|
!IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
|
||||||
|
!IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) &&
|
||||||
!dev_is_dma_coherent(dev)) {
|
!dev_is_dma_coherent(dev)) {
|
||||||
arch_dma_free(dev, size, cpu_addr, dma_addr, attrs);
|
arch_dma_free(dev, size, cpu_addr, dma_addr, attrs);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) &&
|
||||||
|
!dev_is_dma_coherent(dev)) {
|
||||||
|
if (!dma_release_from_global_coherent(page_order, cpu_addr))
|
||||||
|
WARN_ON_ONCE(1);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
/* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */
|
/* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */
|
||||||
if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
|
if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
|
||||||
dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size)))
|
dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size)))
|
||||||
@ -411,7 +424,7 @@ int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
|
|||||||
|
|
||||||
out_unmap:
|
out_unmap:
|
||||||
dma_direct_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
|
dma_direct_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
|
||||||
return 0;
|
return -EIO;
|
||||||
}
|
}
|
||||||
|
|
||||||
dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
|
dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
|
||||||
@ -462,6 +475,8 @@ int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
|
|||||||
|
|
||||||
if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
|
if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
|
||||||
return ret;
|
return ret;
|
||||||
|
if (dma_mmap_from_global_coherent(vma, cpu_addr, size, &ret))
|
||||||
|
return ret;
|
||||||
|
|
||||||
if (vma->vm_pgoff >= count || user_count > count - vma->vm_pgoff)
|
if (vma->vm_pgoff >= count || user_count > count - vma->vm_pgoff)
|
||||||
return -ENXIO;
|
return -ENXIO;
|
||||||
|
@ -22,7 +22,7 @@ static int dma_dummy_map_sg(struct device *dev, struct scatterlist *sgl,
|
|||||||
int nelems, enum dma_data_direction dir,
|
int nelems, enum dma_data_direction dir,
|
||||||
unsigned long attrs)
|
unsigned long attrs)
|
||||||
{
|
{
|
||||||
return 0;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int dma_dummy_supported(struct device *hwdev, u64 mask)
|
static int dma_dummy_supported(struct device *hwdev, u64 mask)
|
||||||
|
@ -177,12 +177,8 @@ void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(dma_unmap_page_attrs);
|
EXPORT_SYMBOL(dma_unmap_page_attrs);
|
||||||
|
|
||||||
/*
|
static int __dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
|
||||||
* dma_maps_sg_attrs returns 0 on error and > 0 on success.
|
int nents, enum dma_data_direction dir, unsigned long attrs)
|
||||||
* It should never return a value < 0.
|
|
||||||
*/
|
|
||||||
int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents,
|
|
||||||
enum dma_data_direction dir, unsigned long attrs)
|
|
||||||
{
|
{
|
||||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||||
int ents;
|
int ents;
|
||||||
@ -197,13 +193,81 @@ int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents,
|
|||||||
ents = dma_direct_map_sg(dev, sg, nents, dir, attrs);
|
ents = dma_direct_map_sg(dev, sg, nents, dir, attrs);
|
||||||
else
|
else
|
||||||
ents = ops->map_sg(dev, sg, nents, dir, attrs);
|
ents = ops->map_sg(dev, sg, nents, dir, attrs);
|
||||||
BUG_ON(ents < 0);
|
|
||||||
|
if (ents > 0)
|
||||||
debug_dma_map_sg(dev, sg, nents, ents, dir);
|
debug_dma_map_sg(dev, sg, nents, ents, dir);
|
||||||
|
else if (WARN_ON_ONCE(ents != -EINVAL && ents != -ENOMEM &&
|
||||||
|
ents != -EIO))
|
||||||
|
return -EIO;
|
||||||
|
|
||||||
return ents;
|
return ents;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* dma_map_sg_attrs - Map the given buffer for DMA
|
||||||
|
* @dev: The device for which to perform the DMA operation
|
||||||
|
* @sg: The sg_table object describing the buffer
|
||||||
|
* @dir: DMA direction
|
||||||
|
* @attrs: Optional DMA attributes for the map operation
|
||||||
|
*
|
||||||
|
* Maps a buffer described by a scatterlist passed in the sg argument with
|
||||||
|
* nents segments for the @dir DMA operation by the @dev device.
|
||||||
|
*
|
||||||
|
* Returns the number of mapped entries (which can be less than nents)
|
||||||
|
* on success. Zero is returned for any error.
|
||||||
|
*
|
||||||
|
* dma_unmap_sg_attrs() should be used to unmap the buffer with the
|
||||||
|
* original sg and original nents (not the value returned by this funciton).
|
||||||
|
*/
|
||||||
|
unsigned int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
|
||||||
|
int nents, enum dma_data_direction dir, unsigned long attrs)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = __dma_map_sg_attrs(dev, sg, nents, dir, attrs);
|
||||||
|
if (ret < 0)
|
||||||
|
return 0;
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
EXPORT_SYMBOL(dma_map_sg_attrs);
|
EXPORT_SYMBOL(dma_map_sg_attrs);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* dma_map_sgtable - Map the given buffer for DMA
|
||||||
|
* @dev: The device for which to perform the DMA operation
|
||||||
|
* @sgt: The sg_table object describing the buffer
|
||||||
|
* @dir: DMA direction
|
||||||
|
* @attrs: Optional DMA attributes for the map operation
|
||||||
|
*
|
||||||
|
* Maps a buffer described by a scatterlist stored in the given sg_table
|
||||||
|
* object for the @dir DMA operation by the @dev device. After success, the
|
||||||
|
* ownership for the buffer is transferred to the DMA domain. One has to
|
||||||
|
* call dma_sync_sgtable_for_cpu() or dma_unmap_sgtable() to move the
|
||||||
|
* ownership of the buffer back to the CPU domain before touching the
|
||||||
|
* buffer by the CPU.
|
||||||
|
*
|
||||||
|
* Returns 0 on success or a negative error code on error. The following
|
||||||
|
* error codes are supported with the given meaning:
|
||||||
|
*
|
||||||
|
* -EINVAL - An invalid argument, unaligned access or other error
|
||||||
|
* in usage. Will not succeed if retried.
|
||||||
|
* -ENOMEM - Insufficient resources (like memory or IOVA space) to
|
||||||
|
* complete the mapping. Should succeed if retried later.
|
||||||
|
* -EIO - Legacy error code with an unknown meaning. eg. this is
|
||||||
|
* returned if a lower level call returned DMA_MAPPING_ERROR.
|
||||||
|
*/
|
||||||
|
int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
|
||||||
|
enum dma_data_direction dir, unsigned long attrs)
|
||||||
|
{
|
||||||
|
int nents;
|
||||||
|
|
||||||
|
nents = __dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
|
||||||
|
if (nents < 0)
|
||||||
|
return nents;
|
||||||
|
sgt->nents = nents;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(dma_map_sgtable);
|
||||||
|
|
||||||
void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
|
void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
|
||||||
int nents, enum dma_data_direction dir,
|
int nents, enum dma_data_direction dir,
|
||||||
unsigned long attrs)
|
unsigned long attrs)
|
||||||
|
Loading…
Reference in New Issue
Block a user