Commit 58bca4a8 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.linaro.org/people/mszyprowski/linux-dma-mapping

Pull DMA mapping branch from Marek Szyprowski:
 "Short summary for the whole series:

  A few limitations have been identified in the current dma-mapping
  design and its implementations for various architectures.  There exist
  more than one function for allocating and freeing the buffers:
  currently these 3 are used dma_{alloc, free}_coherent,
  dma_{alloc,free}_writecombine, dma_{alloc,free}_noncoherent.

  For most of the systems these calls are almost equivalent and can be
  interchanged.  For others, especially the truly non-coherent ones
  (like ARM), the difference can be easily noticed in overall driver
  performance.  Sadly not all architectures provide implementations for
  all of them, so the drivers might need to be adapted and cannot be
  easily shared between different architectures.  The provided patches
  unify all these functions and hide the differences under the already
  existing dma attributes concept.  The thread with more references is
  available here:

    http://www.spinics.net/lists/linux-sh/msg09777.html

  These patches are also a prerequisite for unifying DMA-mapping
  implementation on ARM architecture with the common one provided by
  dma_map_ops structure and extending it with IOMMU support.  More
  information is available in the following thread:

    http://thread.gmane.org/gmane.linux.kernel.cross-arch/12819

  More works on dma-mapping framework are planned, especially in the
  area of buffer sharing and managing the shared mappings (together with
  the recently introduced dma_buf interface: commit d15bd7ee
  "dma-buf: Introduce dma buffer sharing mechanism").

  The patches in the current set introduce a new alloc/free methods
  (with support for memory attributes) in dma_map_ops structure, which
  will later replace dma_alloc_coherent and dma_alloc_writecombine
  functions."

People finally started piping up with support for merging this, so I'm
merging it as the last of the pending stuff from the merge window.
Looks like pohmelfs is going to wait for 3.5 and more external support
for merging.

* 'for-linus' of git://git.linaro.org/people/mszyprowski/linux-dma-mapping:
  common: DMA-mapping: add NON-CONSISTENT attribute
  common: DMA-mapping: add WRITE_COMBINE attribute
  common: dma-mapping: introduce mmap method
  common: dma-mapping: remove old alloc_coherent and free_coherent methods
  Hexagon: adapt for dma_map_ops changes
  Unicore32: adapt for dma_map_ops changes
  Microblaze: adapt for dma_map_ops changes
  SH: adapt for dma_map_ops changes
  Alpha: adapt for dma_map_ops changes
  SPARC: adapt for dma_map_ops changes
  PowerPC: adapt for dma_map_ops changes
  MIPS: adapt for dma_map_ops changes
  X86 & IA64: adapt for dma_map_ops changes
  common: dma-mapping: introduce generic alloc() and free() methods
parents 64ebe987 64d70fe5
......@@ -31,3 +31,21 @@ may be weakly ordered, that is that reads and writes may pass each other.
Since it is optional for platforms to implement DMA_ATTR_WEAK_ORDERING,
those that do not will simply ignore the attribute and exhibit default
behavior.
DMA_ATTR_WRITE_COMBINE
----------------------
DMA_ATTR_WRITE_COMBINE specifies that writes to the mapping may be
buffered to improve performance.
Since it is optional for platforms to implement DMA_ATTR_WRITE_COMBINE,
those that do not will simply ignore the attribute and exhibit default
behavior.
DMA_ATTR_NON_CONSISTENT
-----------------------
DMA_ATTR_NON_CONSISTENT lets the platform to choose to return either
consistent or non-consistent memory as it sees fit. By using this API,
you are guaranteeing to the platform that you have all the correct and
necessary sync points for this memory in the driver.
......@@ -12,16 +12,22 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
#include <asm-generic/dma-mapping-common.h>
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp)
#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp,
struct dma_attrs *attrs)
{
return get_dma_ops(dev)->alloc_coherent(dev, size, dma_handle, gfp);
return get_dma_ops(dev)->alloc(dev, size, dma_handle, gfp, attrs);
}
static inline void dma_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle)
#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
static inline void dma_free_attrs(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
struct dma_attrs *attrs)
{
get_dma_ops(dev)->free_coherent(dev, size, vaddr, dma_handle);
get_dma_ops(dev)->free(dev, size, vaddr, dma_handle, attrs);
}
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
......
......@@ -108,7 +108,8 @@ sys_pciconfig_write(unsigned long bus, unsigned long dfn,
}
static void *alpha_noop_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp)
dma_addr_t *dma_handle, gfp_t gfp,
struct dma_attrs *attrs)
{
void *ret;
......@@ -123,7 +124,8 @@ static void *alpha_noop_alloc_coherent(struct device *dev, size_t size,
}
static void alpha_noop_free_coherent(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_addr)
void *cpu_addr, dma_addr_t dma_addr,
struct dma_attrs *attrs)
{
free_pages((unsigned long)cpu_addr, get_order(size));
}
......@@ -174,8 +176,8 @@ static int alpha_noop_set_mask(struct device *dev, u64 mask)
}
struct dma_map_ops alpha_noop_ops = {
.alloc_coherent = alpha_noop_alloc_coherent,
.free_coherent = alpha_noop_free_coherent,
.alloc = alpha_noop_alloc_coherent,
.free = alpha_noop_free_coherent,
.map_page = alpha_noop_map_page,
.map_sg = alpha_noop_map_sg,
.mapping_error = alpha_noop_mapping_error,
......
......@@ -434,7 +434,8 @@ static void alpha_pci_unmap_page(struct device *dev, dma_addr_t dma_addr,
else DMA_ADDRP is undefined. */
static void *alpha_pci_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addrp, gfp_t gfp)
dma_addr_t *dma_addrp, gfp_t gfp,
struct dma_attrs *attrs)
{
struct pci_dev *pdev = alpha_gendev_to_pci(dev);
void *cpu_addr;
......@@ -478,7 +479,8 @@ try_again:
DMA_ADDR past this call are illegal. */
static void alpha_pci_free_coherent(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_addr)
void *cpu_addr, dma_addr_t dma_addr,
struct dma_attrs *attrs)
{
struct pci_dev *pdev = alpha_gendev_to_pci(dev);
pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
......@@ -952,8 +954,8 @@ static int alpha_pci_set_mask(struct device *dev, u64 mask)
}
struct dma_map_ops alpha_pci_ops = {
.alloc_coherent = alpha_pci_alloc_coherent,
.free_coherent = alpha_pci_free_coherent,
.alloc = alpha_pci_alloc_coherent,
.free = alpha_pci_free_coherent,
.map_page = alpha_pci_map_page,
.unmap_page = alpha_pci_unmap_page,
.map_sg = alpha_pci_map_sg,
......
......@@ -71,29 +71,35 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
return (dma_addr == bad_dma_address);
}
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag)
#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
struct dma_attrs *attrs)
{
void *ret;
struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!dma_ops);
ret = ops->alloc_coherent(dev, size, dma_handle, flag);
ret = ops->alloc(dev, size, dma_handle, flag, attrs);
debug_dma_alloc_coherent(dev, size, *dma_handle, ret);
return ret;
}
static inline void dma_free_coherent(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_handle)
#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
static inline void dma_free_attrs(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_handle,
struct dma_attrs *attrs)
{
struct dma_map_ops *dma_ops = get_dma_ops(dev);
BUG_ON(!dma_ops);
dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
dma_ops->free(dev, size, cpu_addr, dma_handle, attrs);
debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
}
......
......@@ -54,7 +54,8 @@ static struct gen_pool *coherent_pool;
/* Allocates from a pool of uncached memory that was reserved at boot time */
void *hexagon_dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addr, gfp_t flag)
dma_addr_t *dma_addr, gfp_t flag,
struct dma_attrs *attrs)
{
void *ret;
......@@ -81,7 +82,7 @@ void *hexagon_dma_alloc_coherent(struct device *dev, size_t size,
}
static void hexagon_free_coherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_addr)
dma_addr_t dma_addr, struct dma_attrs *attrs)
{
gen_pool_free(coherent_pool, (unsigned long) vaddr, size);
}
......@@ -202,8 +203,8 @@ static void hexagon_sync_single_for_device(struct device *dev,
}
struct dma_map_ops hexagon_dma_ops = {
.alloc_coherent = hexagon_dma_alloc_coherent,
.free_coherent = hexagon_free_coherent,
.alloc = hexagon_dma_alloc_coherent,
.free = hexagon_free_coherent,
.map_sg = hexagon_map_sg,
.map_page = hexagon_map_page,
.sync_single_for_cpu = hexagon_sync_single_for_cpu,
......
......@@ -1129,7 +1129,8 @@ void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
* See Documentation/DMA-API-HOWTO.txt
*/
static void *
sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags)
sba_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t flags, struct dma_attrs *attrs)
{
struct ioc *ioc;
void *addr;
......@@ -1191,8 +1192,8 @@ sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp
*
* See Documentation/DMA-API-HOWTO.txt
*/
static void sba_free_coherent (struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle)
static void sba_free_coherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle, struct dma_attrs *attrs)
{
sba_unmap_single_attrs(dev, dma_handle, size, 0, NULL);
free_pages((unsigned long) vaddr, get_order(size));
......@@ -2212,8 +2213,8 @@ sba_page_override(char *str)
__setup("sbapagesize=",sba_page_override);
struct dma_map_ops sba_dma_ops = {
.alloc_coherent = sba_alloc_coherent,
.free_coherent = sba_free_coherent,
.alloc = sba_alloc_coherent,
.free = sba_free_coherent,
.map_page = sba_map_page,
.unmap_page = sba_unmap_page,
.map_sg = sba_map_sg_attrs,
......
......@@ -23,23 +23,29 @@ extern void machvec_dma_sync_single(struct device *, dma_addr_t, size_t,
extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int,
enum dma_data_direction);
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *daddr, gfp_t gfp)
#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
dma_addr_t *daddr, gfp_t gfp,
struct dma_attrs *attrs)
{
struct dma_map_ops *ops = platform_dma_get_ops(dev);
void *caddr;
caddr = ops->alloc_coherent(dev, size, daddr, gfp);
caddr = ops->alloc(dev, size, daddr, gfp, attrs);
debug_dma_alloc_coherent(dev, size, *daddr, caddr);
return caddr;
}
static inline void dma_free_coherent(struct device *dev, size_t size,
void *caddr, dma_addr_t daddr)
#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
static inline void dma_free_attrs(struct device *dev, size_t size,
void *caddr, dma_addr_t daddr,
struct dma_attrs *attrs)
{
struct dma_map_ops *ops = platform_dma_get_ops(dev);
debug_dma_free_coherent(dev, size, caddr, daddr);
ops->free_coherent(dev, size, caddr, daddr);
ops->free(dev, size, caddr, daddr, attrs);
}
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
......
......@@ -15,16 +15,24 @@ int swiotlb __read_mostly;
EXPORT_SYMBOL(swiotlb);
static void *ia64_swiotlb_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp)
dma_addr_t *dma_handle, gfp_t gfp,
struct dma_attrs *attrs)
{
if (dev->coherent_dma_mask != DMA_BIT_MASK(64))
gfp |= GFP_DMA;
return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
}
static void ia64_swiotlb_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_addr,
struct dma_attrs *attrs)
{
swiotlb_free_coherent(dev, size, vaddr, dma_addr);
}
struct dma_map_ops swiotlb_dma_ops = {
.alloc_coherent = ia64_swiotlb_alloc_coherent,
.free_coherent = swiotlb_free_coherent,
.alloc = ia64_swiotlb_alloc_coherent,
.free = ia64_swiotlb_free_coherent,
.map_page = swiotlb_map_page,
.unmap_page = swiotlb_unmap_page,
.map_sg = swiotlb_map_sg_attrs,
......
......@@ -76,7 +76,8 @@ EXPORT_SYMBOL(sn_dma_set_mask);
* more information.
*/
static void *sn_dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t * dma_handle, gfp_t flags)
dma_addr_t * dma_handle, gfp_t flags,
struct dma_attrs *attrs)
{
void *cpuaddr;
unsigned long phys_addr;
......@@ -137,7 +138,7 @@ static void *sn_dma_alloc_coherent(struct device *dev, size_t size,
* any associated IOMMU mappings.
*/
static void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_handle)
dma_addr_t dma_handle, struct dma_attrs *attrs)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
......@@ -466,8 +467,8 @@ int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
}
static struct dma_map_ops sn_dma_ops = {
.alloc_coherent = sn_dma_alloc_coherent,
.free_coherent = sn_dma_free_coherent,
.alloc = sn_dma_alloc_coherent,
.free = sn_dma_free_coherent,
.map_page = sn_dma_map_page,
.unmap_page = sn_dma_unmap_page,
.map_sg = sn_dma_map_sg,
......
......@@ -123,28 +123,34 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag)
#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
struct dma_attrs *attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
void *memory;
BUG_ON(!ops);
memory = ops->alloc_coherent(dev, size, dma_handle, flag);
memory = ops->alloc(dev, size, dma_handle, flag, attrs);
debug_dma_alloc_coherent(dev, size, *dma_handle, memory);
return memory;
}
static inline void dma_free_coherent(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_handle)
#define dma_free_coherent(d,s,c,h) dma_free_attrs(d, s, c, h, NULL)
static inline void dma_free_attrs(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_handle,
struct dma_attrs *attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!ops);
debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
ops->free_coherent(dev, size, cpu_addr, dma_handle);
ops->free(dev, size, cpu_addr, dma_handle, attrs);
}
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
......
......@@ -33,7 +33,8 @@ static unsigned long get_dma_direct_offset(struct device *dev)
#define NOT_COHERENT_CACHE
static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag)
dma_addr_t *dma_handle, gfp_t flag,
struct dma_attrs *attrs)
{
#ifdef NOT_COHERENT_CACHE
return consistent_alloc(flag, size, dma_handle);
......@@ -57,7 +58,8 @@ static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
}
static void dma_direct_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle)
void *vaddr, dma_addr_t dma_handle,
struct dma_attrs *attrs)
{
#ifdef NOT_COHERENT_CACHE
consistent_free(size, vaddr);
......@@ -176,8 +178,8 @@ dma_direct_sync_sg_for_device(struct device *dev,
}
struct dma_map_ops dma_direct_ops = {
.alloc_coherent = dma_direct_alloc_coherent,
.free_coherent = dma_direct_free_coherent,
.alloc = dma_direct_alloc_coherent,
.free = dma_direct_free_coherent,
.map_sg = dma_direct_map_sg,
.unmap_sg = dma_direct_unmap_sg,
.dma_supported = dma_direct_dma_supported,
......
......@@ -157,7 +157,7 @@ static void octeon_dma_sync_sg_for_device(struct device *dev,
}
static void *octeon_dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp)
dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
{
void *ret;
......@@ -192,7 +192,7 @@ static void *octeon_dma_alloc_coherent(struct device *dev, size_t size,
}
static void octeon_dma_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle)
void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs)
{
int order = get_order(size);
......@@ -240,8 +240,8 @@ EXPORT_SYMBOL(dma_to_phys);
static struct octeon_dma_map_ops octeon_linear_dma_map_ops = {
.dma_map_ops = {
.alloc_coherent = octeon_dma_alloc_coherent,
.free_coherent = octeon_dma_free_coherent,
.alloc = octeon_dma_alloc_coherent,
.free = octeon_dma_free_coherent,
.map_page = octeon_dma_map_page,
.unmap_page = swiotlb_unmap_page,
.map_sg = octeon_dma_map_sg,
......@@ -325,8 +325,8 @@ void __init plat_swiotlb_setup(void)
#ifdef CONFIG_PCI
static struct octeon_dma_map_ops _octeon_pci_dma_map_ops = {
.dma_map_ops = {
.alloc_coherent = octeon_dma_alloc_coherent,
.free_coherent = octeon_dma_free_coherent,
.alloc = octeon_dma_alloc_coherent,
.free = octeon_dma_free_coherent,
.map_page = octeon_dma_map_page,
.unmap_page = swiotlb_unmap_page,
.map_sg = octeon_dma_map_sg,
......
......@@ -57,25 +57,31 @@ dma_set_mask(struct device *dev, u64 mask)
extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction);
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp)
#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp,
struct dma_attrs *attrs)
{
void *ret;
struct dma_map_ops *ops = get_dma_ops(dev);
ret = ops->alloc_coherent(dev, size, dma_handle, gfp);
ret = ops->alloc(dev, size, dma_handle, gfp, attrs);
debug_dma_alloc_coherent(dev, size, *dma_handle, ret);
return ret;
}
static inline void dma_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle)
#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
static inline void dma_free_attrs(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
struct dma_attrs *attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
ops->free_coherent(dev, size, vaddr, dma_handle);
ops->free(dev, size, vaddr, dma_handle, attrs);
debug_dma_free_coherent(dev, size, vaddr, dma_handle);
}
......
......@@ -98,7 +98,7 @@ void *dma_alloc_noncoherent(struct device *dev, size_t size,
EXPORT_SYMBOL(dma_alloc_noncoherent);
static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t * dma_handle, gfp_t gfp)
dma_addr_t * dma_handle, gfp_t gfp, struct dma_attrs *attrs)
{
void *ret;
......@@ -132,7 +132,7 @@ void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
EXPORT_SYMBOL(dma_free_noncoherent);
static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle)
dma_addr_t dma_handle, struct dma_attrs *attrs)
{
unsigned long addr = (unsigned long) vaddr;
int order = get_order(size);
......@@ -323,8 +323,8 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
EXPORT_SYMBOL(dma_cache_sync);
static struct dma_map_ops mips_default_dma_map_ops = {
.alloc_coherent = mips_dma_alloc_coherent,
.free_coherent = mips_dma_free_coherent,
.alloc = mips_dma_alloc_coherent,
.free = mips_dma_free_coherent,
.map_page = mips_dma_map_page,
.unmap_page = mips_dma_unmap_page,
.map_sg = mips_dma_map_sg,
......
......@@ -22,9 +22,11 @@
/* Some dma direct funcs must be visible for use in other dma_ops */
extern void *dma_direct_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag);
dma_addr_t *dma_handle, gfp_t flag,
struct dma_attrs *attrs);
extern void dma_direct_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle);
void *vaddr, dma_addr_t dma_handle,
struct dma_attrs *attrs);
#ifdef CONFIG_NOT_COHERENT_CACHE
......@@ -130,23 +132,29 @@ static inline int dma_supported(struct device *dev, u64 mask)
extern int dma_set_mask(struct device *dev, u64 dma_mask);
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag)
#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
struct dma_attrs *attrs)
{
struct dma_map_ops *dma_ops = get_dma_ops(dev);
void *cpu_addr;
BUG_ON(!dma_ops);
cpu_addr = dma_ops->alloc_coherent(dev, size, dma_handle, flag);
cpu_addr = dma_ops->alloc(dev, size, dma_handle, flag, attrs);
debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
return cpu_addr;
}
static inline void dma_free_coherent(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_handle)
#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
static inline void dma_free_attrs(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_handle,
struct dma_attrs *attrs)
{
struct dma_map_ops *dma_ops = get_dma_ops(dev);
......@@ -154,7 +162,7 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
dma_ops->free(dev, size, cpu_addr, dma_handle, attrs);
}
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
......
......@@ -17,7 +17,8 @@
* to the dma address (mapping) of the first page.
*/
static void *dma_iommu_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag)
dma_addr_t *dma_handle, gfp_t flag,
struct dma_attrs *attrs)
{
return iommu_alloc_coherent(dev, get_iommu_table_base(dev), size,
dma_handle, dev->coherent_dma_mask, flag,
......@@ -25,7 +26,8 @@ static void *dma_iommu_alloc_coherent(struct device *dev, size_t size,
}
static void dma_iommu_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle)
void *vaddr, dma_addr_t dma_handle,
struct dma_attrs *attrs)
{
iommu_free_coherent(get_iommu_table_base(dev), size, vaddr, dma_handle);
}
......@@ -105,8 +107,8 @@ static u64 dma_iommu_get_required_mask(struct device *dev)
}
struct dma_map_ops dma_iommu_ops = {
.alloc_coherent = dma_iommu_alloc_coherent,
.free_coherent = dma_iommu_free_coherent,
.alloc = dma_iommu_alloc_coherent,
.free = dma_iommu_free_coherent,
.map_sg = dma_iommu_map_sg,
.unmap_sg = dma_iommu_unmap_sg,
.dma_supported = dma_iommu_dma_supported,
......
......@@ -47,8 +47,8 @@ static u64 swiotlb_powerpc_get_required(struct device *dev)
* for everything else.
*/
struct dma_map_ops swiotlb_dma_ops = {
.alloc_coherent = dma_direct_alloc_coherent,
.free_coherent = dma_direct_free_coherent,
.alloc = dma_direct_alloc_coherent,
.free = dma_direct_free_coherent,