2008-02-03 13:06:26 +00:00
|
|
|
#ifndef _LINUX_DMA_MAPPING_H
|
|
|
|
#define _LINUX_DMA_MAPPING_H
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2011-11-02 20:39:33 +00:00
|
|
|
#include <linux/string.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/device.h>
|
|
|
|
#include <linux/err.h>
|
2009-01-05 14:59:01 +00:00
|
|
|
#include <linux/dma-attrs.h>
|
2011-06-16 11:01:34 +00:00
|
|
|
#include <linux/dma-direction.h>
|
2009-01-05 14:59:01 +00:00
|
|
|
#include <linux/scatterlist.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2009-01-05 14:59:01 +00:00
|
|
|
struct dma_map_ops {
|
2012-03-28 14:36:27 +00:00
|
|
|
void* (*alloc)(struct device *dev, size_t size,
|
|
|
|
dma_addr_t *dma_handle, gfp_t gfp,
|
|
|
|
struct dma_attrs *attrs);
|
|
|
|
void (*free)(struct device *dev, size_t size,
|
|
|
|
void *vaddr, dma_addr_t dma_handle,
|
|
|
|
struct dma_attrs *attrs);
|
2011-12-21 15:55:33 +00:00
|
|
|
int (*mmap)(struct device *, struct vm_area_struct *,
|
|
|
|
void *, dma_addr_t, size_t, struct dma_attrs *attrs);
|
|
|
|
|
2012-06-13 08:05:52 +00:00
|
|
|
int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *,
|
|
|
|
dma_addr_t, size_t, struct dma_attrs *attrs);
|
|
|
|
|
2009-01-05 14:59:01 +00:00
|
|
|
dma_addr_t (*map_page)(struct device *dev, struct page *page,
|
|
|
|
unsigned long offset, size_t size,
|
|
|
|
enum dma_data_direction dir,
|
|
|
|
struct dma_attrs *attrs);
|
|
|
|
void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
|
|
|
|
size_t size, enum dma_data_direction dir,
|
|
|
|
struct dma_attrs *attrs);
|
|
|
|
int (*map_sg)(struct device *dev, struct scatterlist *sg,
|
|
|
|
int nents, enum dma_data_direction dir,
|
|
|
|
struct dma_attrs *attrs);
|
|
|
|
void (*unmap_sg)(struct device *dev,
|
|
|
|
struct scatterlist *sg, int nents,
|
|
|
|
enum dma_data_direction dir,
|
|
|
|
struct dma_attrs *attrs);
|
|
|
|
void (*sync_single_for_cpu)(struct device *dev,
|
|
|
|
dma_addr_t dma_handle, size_t size,
|
|
|
|
enum dma_data_direction dir);
|
|
|
|
void (*sync_single_for_device)(struct device *dev,
|
|
|
|
dma_addr_t dma_handle, size_t size,
|
|
|
|
enum dma_data_direction dir);
|
|
|
|
void (*sync_sg_for_cpu)(struct device *dev,
|
|
|
|
struct scatterlist *sg, int nents,
|
|
|
|
enum dma_data_direction dir);
|
|
|
|
void (*sync_sg_for_device)(struct device *dev,
|
|
|
|
struct scatterlist *sg, int nents,
|
|
|
|
enum dma_data_direction dir);
|
|
|
|
int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
|
|
|
|
int (*dma_supported)(struct device *dev, u64 mask);
|
2009-08-04 19:08:24 +00:00
|
|
|
int (*set_dma_mask)(struct device *dev, u64 mask);
|
2011-06-24 09:05:23 +00:00
|
|
|
#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
|
|
|
|
u64 (*get_required_mask)(struct device *dev);
|
|
|
|
#endif
|
2009-01-05 14:59:01 +00:00
|
|
|
int is_phys;
|
|
|
|
};
|
|
|
|
|
2007-10-18 10:05:07 +00:00
|
|
|
#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
|
2007-10-18 10:05:06 +00:00
|
|
|
|
2007-10-16 08:23:55 +00:00
|
|
|
#define DMA_MASK_NONE 0x0ULL
|
|
|
|
|
2006-09-29 08:59:48 +00:00
|
|
|
static inline int valid_dma_direction(int dma_direction)
|
|
|
|
{
|
|
|
|
return ((dma_direction == DMA_BIDIRECTIONAL) ||
|
|
|
|
(dma_direction == DMA_TO_DEVICE) ||
|
|
|
|
(dma_direction == DMA_FROM_DEVICE));
|
|
|
|
}
|
|
|
|
|
2007-10-16 08:23:55 +00:00
|
|
|
static inline int is_device_dma_capable(struct device *dev)
|
|
|
|
{
|
|
|
|
return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE;
|
|
|
|
}
|
|
|
|
|
2007-07-16 06:40:26 +00:00
|
|
|
#ifdef CONFIG_HAS_DMA
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <asm/dma-mapping.h>
|
2007-07-16 06:40:26 +00:00
|
|
|
#else
|
|
|
|
#include <asm-generic/dma-mapping-broken.h>
|
|
|
|
#endif
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-09-12 10:42:34 +00:00
|
|
|
static inline u64 dma_get_mask(struct device *dev)
|
|
|
|
{
|
2008-09-18 17:02:05 +00:00
|
|
|
if (dev && dev->dma_mask && *dev->dma_mask)
|
2008-09-12 10:42:34 +00:00
|
|
|
return *dev->dma_mask;
|
2009-04-07 02:01:15 +00:00
|
|
|
return DMA_BIT_MASK(32);
|
2008-09-12 10:42:34 +00:00
|
|
|
}
|
|
|
|
|
2012-03-20 19:33:01 +00:00
|
|
|
#ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
|
2010-09-22 20:04:55 +00:00
|
|
|
int dma_set_coherent_mask(struct device *dev, u64 mask);
|
|
|
|
#else
|
2010-03-10 23:23:39 +00:00
|
|
|
static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
|
|
|
|
{
|
|
|
|
if (!dma_supported(dev, mask))
|
|
|
|
return -EIO;
|
|
|
|
dev->coherent_dma_mask = mask;
|
|
|
|
return 0;
|
|
|
|
}
|
2010-09-22 20:04:55 +00:00
|
|
|
#endif
|
2010-03-10 23:23:39 +00:00
|
|
|
|
2013-06-26 12:49:44 +00:00
|
|
|
/*
|
|
|
|
* Set both the DMA mask and the coherent DMA mask to the same thing.
|
|
|
|
* Note that we don't check the return value from dma_set_coherent_mask()
|
|
|
|
* as the DMA API guarantees that the coherent DMA mask can be set to
|
|
|
|
* the same or smaller than the streaming DMA mask.
|
|
|
|
*/
|
|
|
|
static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
|
|
|
|
{
|
|
|
|
int rc = dma_set_mask(dev, mask);
|
|
|
|
if (rc == 0)
|
|
|
|
dma_set_coherent_mask(dev, mask);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2013-06-27 11:21:45 +00:00
|
|
|
/*
|
|
|
|
* Similar to the above, except it deals with the case where the device
|
|
|
|
* does not have dev->dma_mask appropriately setup.
|
|
|
|
*/
|
|
|
|
static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
|
|
|
|
{
|
|
|
|
dev->dma_mask = &dev->coherent_dma_mask;
|
|
|
|
return dma_set_mask_and_coherent(dev, mask);
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
extern u64 dma_get_required_mask(struct device *dev);
|
|
|
|
|
2008-02-05 06:27:55 +00:00
|
|
|
static inline unsigned int dma_get_max_seg_size(struct device *dev)
|
|
|
|
{
|
|
|
|
return dev->dma_parms ? dev->dma_parms->max_segment_size : 65536;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned int dma_set_max_seg_size(struct device *dev,
|
|
|
|
unsigned int size)
|
|
|
|
{
|
|
|
|
if (dev->dma_parms) {
|
|
|
|
dev->dma_parms->max_segment_size = size;
|
|
|
|
return 0;
|
|
|
|
} else
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
2008-02-05 06:28:13 +00:00
|
|
|
static inline unsigned long dma_get_seg_boundary(struct device *dev)
|
|
|
|
{
|
|
|
|
return dev->dma_parms ?
|
|
|
|
dev->dma_parms->segment_boundary_mask : 0xffffffff;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
|
|
|
|
{
|
|
|
|
if (dev->dma_parms) {
|
|
|
|
dev->dma_parms->segment_boundary_mask = mask;
|
|
|
|
return 0;
|
|
|
|
} else
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
2013-07-29 13:18:48 +00:00
|
|
|
#ifndef dma_max_pfn
|
|
|
|
static inline unsigned long dma_max_pfn(struct device *dev)
|
|
|
|
{
|
|
|
|
return *dev->dma_mask >> PAGE_SHIFT;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2011-11-02 20:39:33 +00:00
|
|
|
static inline void *dma_zalloc_coherent(struct device *dev, size_t size,
|
|
|
|
dma_addr_t *dma_handle, gfp_t flag)
|
|
|
|
{
|
2013-08-27 05:45:23 +00:00
|
|
|
void *ret = dma_alloc_coherent(dev, size, dma_handle,
|
|
|
|
flag | __GFP_ZERO);
|
2011-11-02 20:39:33 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2010-08-13 07:39:18 +00:00
|
|
|
#ifdef CONFIG_HAS_DMA
|
2010-08-11 01:03:22 +00:00
|
|
|
static inline int dma_get_cache_alignment(void)
|
|
|
|
{
|
|
|
|
#ifdef ARCH_DMA_MINALIGN
|
|
|
|
return ARCH_DMA_MINALIGN;
|
|
|
|
#endif
|
|
|
|
return 1;
|
|
|
|
}
|
2010-08-13 07:39:18 +00:00
|
|
|
#endif
|
2010-08-11 01:03:22 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/* flags for the coherent memory api */
|
|
|
|
#define DMA_MEMORY_MAP 0x01
|
|
|
|
#define DMA_MEMORY_IO 0x02
|
|
|
|
#define DMA_MEMORY_INCLUDES_CHILDREN 0x04
|
|
|
|
#define DMA_MEMORY_EXCLUSIVE 0x08
|
|
|
|
|
|
|
|
#ifndef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
|
|
|
|
static inline int
|
|
|
|
dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
|
|
|
|
dma_addr_t device_addr, size_t size, int flags)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
dma_release_declared_memory(struct device *dev)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void *
|
|
|
|
dma_mark_declared_memory_occupied(struct device *dev,
|
|
|
|
dma_addr_t device_addr, size_t size)
|
|
|
|
{
|
|
|
|
return ERR_PTR(-EBUSY);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
devres: device resource management
Implement device resource management, in short, devres. A device
driver can allocate arbirary size of devres data which is associated
with a release function. On driver detach, release function is
invoked on the devres data, then, devres data is freed.
devreses are typed by associated release functions. Some devreses are
better represented by single instance of the type while others need
multiple instances sharing the same release function. Both usages are
supported.
devreses can be grouped using devres group such that a device driver
can easily release acquired resources halfway through initialization
or selectively release resources (e.g. resources for port 1 out of 4
ports).
This patch adds devres core including documentation and the following
managed interfaces.
* alloc/free : devm_kzalloc(), devm_kzfree()
* IO region : devm_request_region(), devm_release_region()
* IRQ : devm_request_irq(), devm_free_irq()
* DMA : dmam_alloc_coherent(), dmam_free_coherent(),
dmam_declare_coherent_memory(), dmam_pool_create(),
dmam_pool_destroy()
* PCI : pcim_enable_device(), pcim_pin_device(), pci_is_managed()
* iomap : devm_ioport_map(), devm_ioport_unmap(), devm_ioremap(),
devm_ioremap_nocache(), devm_iounmap(), pcim_iomap_table(),
pcim_iomap(), pcim_iounmap()
Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
2007-01-20 07:00:26 +00:00
|
|
|
/*
|
|
|
|
* Managed DMA API
|
|
|
|
*/
|
|
|
|
extern void *dmam_alloc_coherent(struct device *dev, size_t size,
|
|
|
|
dma_addr_t *dma_handle, gfp_t gfp);
|
|
|
|
extern void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
|
|
|
|
dma_addr_t dma_handle);
|
|
|
|
extern void *dmam_alloc_noncoherent(struct device *dev, size_t size,
|
|
|
|
dma_addr_t *dma_handle, gfp_t gfp);
|
|
|
|
extern void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr,
|
|
|
|
dma_addr_t dma_handle);
|
|
|
|
#ifdef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
|
|
|
|
extern int dmam_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
|
|
|
|
dma_addr_t device_addr, size_t size,
|
|
|
|
int flags);
|
|
|
|
extern void dmam_release_declared_memory(struct device *dev);
|
|
|
|
#else /* ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY */
|
|
|
|
static inline int dmam_declare_coherent_memory(struct device *dev,
|
|
|
|
dma_addr_t bus_addr, dma_addr_t device_addr,
|
|
|
|
size_t size, gfp_t gfp)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
devres: device resource management
Implement device resource management, in short, devres. A device
driver can allocate arbirary size of devres data which is associated
with a release function. On driver detach, release function is
invoked on the devres data, then, devres data is freed.
devreses are typed by associated release functions. Some devreses are
better represented by single instance of the type while others need
multiple instances sharing the same release function. Both usages are
supported.
devreses can be grouped using devres group such that a device driver
can easily release acquired resources halfway through initialization
or selectively release resources (e.g. resources for port 1 out of 4
ports).
This patch adds devres core including documentation and the following
managed interfaces.
* alloc/free : devm_kzalloc(), devm_kzfree()
* IO region : devm_request_region(), devm_release_region()
* IRQ : devm_request_irq(), devm_free_irq()
* DMA : dmam_alloc_coherent(), dmam_free_coherent(),
dmam_declare_coherent_memory(), dmam_pool_create(),
dmam_pool_destroy()
* PCI : pcim_enable_device(), pcim_pin_device(), pci_is_managed()
* iomap : devm_ioport_map(), devm_ioport_unmap(), devm_ioremap(),
devm_ioremap_nocache(), devm_iounmap(), pcim_iomap_table(),
pcim_iomap(), pcim_iounmap()
Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
2007-01-20 07:00:26 +00:00
|
|
|
static inline void dmam_release_declared_memory(struct device *dev)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
#endif /* ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY */
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-04-29 08:00:30 +00:00
|
|
|
#ifndef CONFIG_HAVE_DMA_ATTRS
|
|
|
|
struct dma_attrs;
|
|
|
|
|
|
|
|
#define dma_map_single_attrs(dev, cpu_addr, size, dir, attrs) \
|
|
|
|
dma_map_single(dev, cpu_addr, size, dir)
|
|
|
|
|
|
|
|
#define dma_unmap_single_attrs(dev, dma_addr, size, dir, attrs) \
|
|
|
|
dma_unmap_single(dev, dma_addr, size, dir)
|
|
|
|
|
|
|
|
#define dma_map_sg_attrs(dev, sgl, nents, dir, attrs) \
|
|
|
|
dma_map_sg(dev, sgl, nents, dir)
|
|
|
|
|
|
|
|
#define dma_unmap_sg_attrs(dev, sgl, nents, dir, attrs) \
|
|
|
|
dma_unmap_sg(dev, sgl, nents, dir)
|
|
|
|
|
|
|
|
#endif /* CONFIG_HAVE_DMA_ATTRS */
|
|
|
|
|
2010-03-10 23:23:31 +00:00
|
|
|
#ifdef CONFIG_NEED_DMA_MAP_STATE
|
|
|
|
#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
|
|
|
|
#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
|
|
|
|
#define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
|
|
|
|
#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL))
|
|
|
|
#define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME)
|
|
|
|
#define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL))
|
|
|
|
#else
|
|
|
|
#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
|
|
|
|
#define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
|
|
|
|
#define dma_unmap_addr(PTR, ADDR_NAME) (0)
|
|
|
|
#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
|
|
|
|
#define dma_unmap_len(PTR, LEN_NAME) (0)
|
|
|
|
#define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
|
|
|
|
#endif
|
|
|
|
|
devres: device resource management
Implement device resource management, in short, devres. A device
driver can allocate arbirary size of devres data which is associated
with a release function. On driver detach, release function is
invoked on the devres data, then, devres data is freed.
devreses are typed by associated release functions. Some devreses are
better represented by single instance of the type while others need
multiple instances sharing the same release function. Both usages are
supported.
devreses can be grouped using devres group such that a device driver
can easily release acquired resources halfway through initialization
or selectively release resources (e.g. resources for port 1 out of 4
ports).
This patch adds devres core including documentation and the following
managed interfaces.
* alloc/free : devm_kzalloc(), devm_kzfree()
* IO region : devm_request_region(), devm_release_region()
* IRQ : devm_request_irq(), devm_free_irq()
* DMA : dmam_alloc_coherent(), dmam_free_coherent(),
dmam_declare_coherent_memory(), dmam_pool_create(),
dmam_pool_destroy()
* PCI : pcim_enable_device(), pcim_pin_device(), pci_is_managed()
* iomap : devm_ioport_map(), devm_ioport_unmap(), devm_ioremap(),
devm_ioremap_nocache(), devm_iounmap(), pcim_iomap_table(),
pcim_iomap(), pcim_iounmap()
Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
2007-01-20 07:00:26 +00:00
|
|
|
#endif
|