Commit d0cd8481 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'dmaengine-3.17' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/dmaengine

Pull dmaengine updates from Dan Williams:
 "Even though this has fixes marked for -stable, given the size and the
  needed conflict resolutions this is 3.18-rc1/merge-window material.

  These patches have been languishing in my tree for a long while.  The
  fact that I do not have the time to do proper/prompt maintenance of
  this tree is a primary factor in the decision to step down as
  dmaengine maintainer.  That and the fact that the bulk of drivers/dma/
  activity is going through Vinod these days.

  The net_dma removal has not been in -next.  It has developed simple
  conflicts against mainline and net-next (for-3.18).

  Continuing thanks to Vinod for staying on top of drivers/dma/.

  Summary:

   1/ Step down as dmaengine maintainer see commit 08223d80
      "dmaengine maintainer update"

   2/ Removal of net_dma, as it has been marked 'broken' since 3.13
      (commit 77873803 "net_dma: mark broken"), without reports of
      performance regression.

   3/ Miscellaneous fixes"

* tag 'dmaengine-3.17' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/dmaengine:
  net: make tcp_cleanup_rbuf private
  net_dma: revert 'copied_early'
  net_dma: simple removal
  dmaengine maintainer update
  dmatest: prevent memory leakage on error path in thread
  ioat: Use time_before_jiffies()
  dmaengine: fix xor sources continuation
  dma: mv_xor: Rename __mv_xor_slot_cleanup() to mv_xor_slot_cleanup()
  dma: mv_xor: Remove all callers of mv_xor_slot_cleanup()
  dma: mv_xor: Remove unneeded mv_xor_clean_completed_slots() call
  ioat: Use pci_enable_msix_exact() instead of pci_enable_msix()
  drivers: dma: Include appropriate header file in dca.c
  drivers: dma: Mark functions as static in dma_v3.c
  dma: mv_xor: Add DMA API error checks
  ioat/dca: Use dev_is_pci() to check whether it is pci device
parents bdf428fe 3f334078
What: tcp_dma_copybreak sysctl
Date: Removed in kernel v3.13
Contact: Dan Williams <dan.j.williams@intel.com>
Description:
Formerly the lower limit, in bytes, of the size of socket reads
that will be offloaded to a DMA copy engine. Removed due to
coherency issues of the cpu potentially touching the buffers
while dma is in flight.
......@@ -580,12 +580,6 @@ tcp_workaround_signed_windows - BOOLEAN
not receive a window scaling option from them.
Default: 0
tcp_dma_copybreak - INTEGER
Lower limit, in bytes, of the size of socket reads that will be
offloaded to a DMA copy engine, if one is present in the system
and CONFIG_NET_DMA is enabled.
Default: 4096
tcp_thin_linear_timeouts - BOOLEAN
Enable dynamic triggering of linear timeouts for thin streams.
If set, a check is performed upon retransmission by timeout to
......
......@@ -1025,24 +1025,20 @@ F: arch/arm/mach-pxa/colibri-pxa270-income.c
ARM/INTEL IOP32X ARM ARCHITECTURE
M: Lennert Buytenhek <kernel@wantstofly.org>
M: Dan Williams <dan.j.williams@intel.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
ARM/INTEL IOP33X ARM ARCHITECTURE
M: Dan Williams <dan.j.williams@intel.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
S: Orphan
ARM/INTEL IOP13XX ARM ARCHITECTURE
M: Lennert Buytenhek <kernel@wantstofly.org>
M: Dan Williams <dan.j.williams@intel.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
ARM/INTEL IQ81342EX MACHINE SUPPORT
M: Lennert Buytenhek <kernel@wantstofly.org>
M: Dan Williams <dan.j.williams@intel.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
......@@ -1067,7 +1063,6 @@ F: drivers/pcmcia/pxa2xx_stargate2.c
ARM/INTEL XSC3 (MANZANO) ARM CORE
M: Lennert Buytenhek <kernel@wantstofly.org>
M: Dan Williams <dan.j.williams@intel.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
......@@ -1562,9 +1557,9 @@ F: drivers/platform/x86/asus*.c
F: drivers/platform/x86/eeepc*.c
ASYNCHRONOUS TRANSFERS/TRANSFORMS (IOAT) API
M: Dan Williams <dan.j.williams@intel.com>
R: Dan Williams <dan.j.williams@intel.com>
W: http://sourceforge.net/projects/xscaleiop
S: Maintained
S: Odd fixes
F: Documentation/crypto/async-tx-api.txt
F: crypto/async_tx/
F: drivers/dma/
......@@ -2995,13 +2990,11 @@ T: git git://git.linaro.org/people/sumitsemwal/linux-dma-buf.git
DMA GENERIC OFFLOAD ENGINE SUBSYSTEM
M: Vinod Koul <vinod.koul@intel.com>
M: Dan Williams <dan.j.williams@intel.com>
L: dmaengine@vger.kernel.org
Q: https://patchwork.kernel.org/project/linux-dmaengine/list/
S: Supported
S: Maintained
F: drivers/dma/
F: include/linux/dma*
T: git git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx.git
T: git git://git.infradead.org/users/vkoul/slave-dma.git (slave-dma)
DME1737 HARDWARE MONITOR DRIVER
......@@ -4754,8 +4747,8 @@ F: arch/x86/kernel/cpu/microcode/core*
F: arch/x86/kernel/cpu/microcode/intel*
INTEL I/OAT DMA DRIVER
M: Dan Williams <dan.j.williams@intel.com>
M: Dave Jiang <dave.jiang@intel.com>
R: Dan Williams <dan.j.williams@intel.com>
L: dmaengine@vger.kernel.org
Q: https://patchwork.kernel.org/project/linux-dmaengine/list/
S: Supported
......@@ -4770,7 +4763,7 @@ F: drivers/iommu/intel-iommu.c
F: include/linux/intel-iommu.h
INTEL IOP-ADMA DMA DRIVER
M: Dan Williams <dan.j.williams@intel.com>
R: Dan Williams <dan.j.williams@intel.com>
S: Odd fixes
F: drivers/dma/iop-adma.c
......
......@@ -78,8 +78,6 @@ do_async_xor(struct dma_chan *chan, struct dmaengine_unmap_data *unmap,
tx = dma->device_prep_dma_xor(chan, dma_dest, src_list,
xor_src_cnt, unmap->len,
dma_flags);
src_list[0] = tmp;
if (unlikely(!tx))
async_tx_quiesce(&submit->depend_tx);
......@@ -92,6 +90,7 @@ do_async_xor(struct dma_chan *chan, struct dmaengine_unmap_data *unmap,
xor_src_cnt, unmap->len,
dma_flags);
}
src_list[0] = tmp;
dma_set_unmap(tx, unmap);
async_tx_submit(chan, tx, submit);
......
......@@ -427,18 +427,6 @@ config DMA_OF
comment "DMA Clients"
depends on DMA_ENGINE
config NET_DMA
bool "Network: TCP receive copy offload"
depends on DMA_ENGINE && NET
default (INTEL_IOATDMA || FSL_DMA)
depends on BROKEN
help
This enables the use of DMA engines in the network stack to
offload receive copy-to-user operations, freeing CPU cycles.
Say Y here if you enabled INTEL_IOATDMA or FSL_DMA, otherwise
say N.
config ASYNC_TX_DMA
bool "Async_tx: Offload support for the async_tx api"
depends on DMA_ENGINE
......
......@@ -6,7 +6,6 @@ obj-$(CONFIG_DMA_VIRTUAL_CHANNELS) += virt-dma.o
obj-$(CONFIG_DMA_ACPI) += acpi-dma.o
obj-$(CONFIG_DMA_OF) += of-dma.o
obj-$(CONFIG_NET_DMA) += iovlock.o
obj-$(CONFIG_INTEL_MID_DMAC) += intel_mid_dma.o
obj-$(CONFIG_DMATEST) += dmatest.o
obj-$(CONFIG_INTEL_IOATDMA) += ioat/
......
......@@ -1081,110 +1081,6 @@ dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
}
EXPORT_SYMBOL(dmaengine_get_unmap_data);
/**
* dma_async_memcpy_pg_to_pg - offloaded copy from page to page
* @chan: DMA channel to offload copy to
* @dest_pg: destination page
* @dest_off: offset in page to copy to
* @src_pg: source page
* @src_off: offset in page to copy from
* @len: length
*
* Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus
* address according to the DMA mapping API rules for streaming mappings.
* Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident
* (kernel memory or locked user space pages).
*/
dma_cookie_t
dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
unsigned int dest_off, struct page *src_pg, unsigned int src_off,
size_t len)
{
struct dma_device *dev = chan->device;
struct dma_async_tx_descriptor *tx;
struct dmaengine_unmap_data *unmap;
dma_cookie_t cookie;
unsigned long flags;
unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOWAIT);
if (!unmap)
return -ENOMEM;
unmap->to_cnt = 1;
unmap->from_cnt = 1;
unmap->addr[0] = dma_map_page(dev->dev, src_pg, src_off, len,
DMA_TO_DEVICE);
unmap->addr[1] = dma_map_page(dev->dev, dest_pg, dest_off, len,
DMA_FROM_DEVICE);
unmap->len = len;
flags = DMA_CTRL_ACK;
tx = dev->device_prep_dma_memcpy(chan, unmap->addr[1], unmap->addr[0],
len, flags);
if (!tx) {
dmaengine_unmap_put(unmap);
return -ENOMEM;
}
dma_set_unmap(tx, unmap);
cookie = tx->tx_submit(tx);
dmaengine_unmap_put(unmap);
preempt_disable();
__this_cpu_add(chan->local->bytes_transferred, len);
__this_cpu_inc(chan->local->memcpy_count);
preempt_enable();
return cookie;
}
EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg);
/**
* dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses
* @chan: DMA channel to offload copy to
* @dest: destination address (virtual)
* @src: source address (virtual)
* @len: length
*
* Both @dest and @src must be mappable to a bus address according to the
* DMA mapping API rules for streaming mappings.
* Both @dest and @src must stay memory resident (kernel memory or locked
* user space pages).
*/
dma_cookie_t
dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
void *src, size_t len)
{
return dma_async_memcpy_pg_to_pg(chan, virt_to_page(dest),
(unsigned long) dest & ~PAGE_MASK,
virt_to_page(src),
(unsigned long) src & ~PAGE_MASK, len);
}
EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);
/**
* dma_async_memcpy_buf_to_pg - offloaded copy from address to page
* @chan: DMA channel to offload copy to
* @page: destination page
* @offset: offset in page to copy to
* @kdata: source address (virtual)
* @len: length
*
* Both @page/@offset and @kdata must be mappable to a bus address according
* to the DMA mapping API rules for streaming mappings.
* Both @page/@offset and @kdata must stay memory resident (kernel memory or
* locked user space pages)
*/
dma_cookie_t
dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
unsigned int offset, void *kdata, size_t len)
{
return dma_async_memcpy_pg_to_pg(chan, page, offset,
virt_to_page(kdata),
(unsigned long) kdata & ~PAGE_MASK, len);
}
EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg);
void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
struct dma_chan *chan)
{
......
......@@ -688,14 +688,14 @@ static int dmatest_func(void *data)
runtime = ktime_us_delta(ktime_get(), ktime);
ret = 0;
err_dstbuf:
for (i = 0; thread->dsts[i]; i++)
kfree(thread->dsts[i]);
err_dstbuf:
kfree(thread->dsts);
err_dsts:
err_srcbuf:
for (i = 0; thread->srcs[i]; i++)
kfree(thread->srcs[i]);
err_srcbuf:
kfree(thread->srcs);
err_srcs:
kfree(pq_coefs);
......
......@@ -35,6 +35,7 @@
#include "dma.h"
#include "registers.h"
#include "dma_v2.h"
/*
* Bit 7 of a tag map entry is the "valid" bit, if it is set then bits 0:6
......@@ -147,7 +148,7 @@ static int ioat_dca_add_requester(struct dca_provider *dca, struct device *dev)
u16 id;
/* This implementation only supports PCI-Express */
if (dev->bus != &pci_bus_type)
if (!dev_is_pci(dev))
return -ENODEV;
pdev = to_pci_dev(dev);
id = dcaid_from_pcidev(pdev);
......@@ -179,7 +180,7 @@ static int ioat_dca_remove_requester(struct dca_provider *dca,
int i;
/* This implementation only supports PCI-Express */
if (dev->bus != &pci_bus_type)
if (!dev_is_pci(dev))
return -ENODEV;
pdev = to_pci_dev(dev);
......@@ -320,7 +321,7 @@ static int ioat2_dca_add_requester(struct dca_provider *dca, struct device *dev)
u16 global_req_table;
/* This implementation only supports PCI-Express */
if (dev->bus != &pci_bus_type)
if (!dev_is_pci(dev))
return -ENODEV;
pdev = to_pci_dev(dev);
id = dcaid_from_pcidev(pdev);
......@@ -354,7 +355,7 @@ static int ioat2_dca_remove_requester(struct dca_provider *dca,
u16 global_req_table;
/* This implementation only supports PCI-Express */
if (dev->bus != &pci_bus_type)
if (!dev_is_pci(dev))
return -ENODEV;
pdev = to_pci_dev(dev);
......@@ -496,7 +497,7 @@ static int ioat3_dca_add_requester(struct dca_provider *dca, struct device *dev)
u16 global_req_table;
/* This implementation only supports PCI-Express */
if (dev->bus != &pci_bus_type)
if (!dev_is_pci(dev))
return -ENODEV;
pdev = to_pci_dev(dev);
id = dcaid_from_pcidev(pdev);
......@@ -530,7 +531,7 @@ static int ioat3_dca_remove_requester(struct dca_provider *dca,
u16 global_req_table;
/* This implementation only supports PCI-Express */
if (dev->bus != &pci_bus_type)
if (!dev_is_pci(dev))
return -ENODEV;
pdev = to_pci_dev(dev);
......
......@@ -947,7 +947,7 @@ msix:
for (i = 0; i < msixcnt; i++)
device->msix_entries[i].entry = i;
err = pci_enable_msix(pdev, device->msix_entries, msixcnt);
err = pci_enable_msix_exact(pdev, device->msix_entries, msixcnt);
if (err)
goto msi;
......@@ -1222,7 +1222,6 @@ int ioat1_dma_probe(struct ioatdma_device *device, int dca)
err = ioat_probe(device);
if (err)
return err;
ioat_set_tcp_copy_break(4096);
err = ioat_register(device);
if (err)
return err;
......
......@@ -214,13 +214,6 @@ __dump_desc_dbg(struct ioat_chan_common *chan, struct ioat_dma_descriptor *hw,
#define dump_desc_dbg(c, d) \
({ if (d) __dump_desc_dbg(&c->base, d->hw, &d->txd, desc_id(d)); 0; })
static inline void ioat_set_tcp_copy_break(unsigned long copybreak)
{
#ifdef CONFIG_NET_DMA
sysctl_tcp_dma_copybreak = copybreak;
#endif
}
static inline struct ioat_chan_common *
ioat_chan_by_index(struct ioatdma_device *device, int index)
{
......
......@@ -735,7 +735,8 @@ int ioat2_check_space_lock(struct ioat2_dma_chan *ioat, int num_descs)
* called under bh_disabled so we need to trigger the timer
* event directly
*/
if (jiffies > chan->timer.expires && timer_pending(&chan->timer)) {
if (time_is_before_jiffies(chan->timer.expires)
&& timer_pending(&chan->timer)) {
struct ioatdma_device *device = chan->device;
mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
......@@ -899,7 +900,6 @@ int ioat2_dma_probe(struct ioatdma_device *device, int dca)
err = ioat_probe(device);
if (err)
return err;
ioat_set_tcp_copy_break(2048);
list_for_each_entry(c, &dma->channels, device_node) {
chan = to_chan_common(c);
......
......@@ -740,7 +740,7 @@ ioat3_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
return __ioat3_prep_xor_lock(chan, NULL, dest, src, src_cnt, len, flags);
}
struct dma_async_tx_descriptor *
static struct dma_async_tx_descriptor *
ioat3_prep_xor_val(struct dma_chan *chan, dma_addr_t *src,
unsigned int src_cnt, size_t len,
enum sum_check_flags *result, unsigned long flags)
......@@ -1091,7 +1091,7 @@ ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
}
}
struct dma_async_tx_descriptor *
static struct dma_async_tx_descriptor *
ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
unsigned int src_cnt, const unsigned char *scf, size_t len,
enum sum_check_flags *pqres, unsigned long flags)
......@@ -1133,7 +1133,7 @@ ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
flags);
}
struct dma_async_tx_descriptor *
static struct dma_async_tx_descriptor *
ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src,
unsigned int src_cnt, size_t len,
enum sum_check_flags *result, unsigned long flags)
......@@ -1655,7 +1655,6 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)
err = ioat_probe(device);
if (err)
return err;
ioat_set_tcp_copy_break(262144);
list_for_each_entry(c, &dma->channels, device_node) {
chan = to_chan_common(c);
......
/*
* Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
* Portions based on net/core/datagram.c and copyrighted by their authors.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* The full GNU General Public License is included in this distribution in the
* file called COPYING.
*/
/*
* This code allows the net stack to make use of a DMA engine for
* skb to iovec copies.
*/
#include <linux/dmaengine.h>
#include <linux/pagemap.h>
#include <linux/slab.h>
#include <net/tcp.h> /* for memcpy_toiovec */
#include <asm/io.h>
#include <asm/uaccess.h>
static int num_pages_spanned(struct iovec *iov)
{
return
((PAGE_ALIGN((unsigned long)iov->iov_base + iov->iov_len) -
((unsigned long)iov->iov_base & PAGE_MASK)) >> PAGE_SHIFT);
}
/*
* Pin down all the iovec pages needed for len bytes.
* Return a struct dma_pinned_list to keep track of pages pinned down.
*
* We are allocating a single chunk of memory, and then carving it up into
* 3 sections, the latter 2 whose size depends on the number of iovecs and the
* total number of pages, respectively.
*/
struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len)
{
struct dma_pinned_list *local_list;
struct page **pages;
int i;
int ret;
int nr_iovecs = 0;
int iovec_len_used = 0;
int iovec_pages_used = 0;
/* don't pin down non-user-based iovecs */
if (segment_eq(get_fs(), KERNEL_DS))
return NULL;
/* determine how many iovecs/pages there are, up front */
do {
iovec_len_used += iov[nr_iovecs].iov_len;
iovec_pages_used += num_pages_spanned(&iov[nr_iovecs]);
nr_iovecs++;
} while (iovec_len_used < len);
/* single kmalloc for pinned list, page_list[], and the page arrays */
local_list = kmalloc(sizeof(*local_list)
+ (nr_iovecs * sizeof (struct dma_page_list))
+ (iovec_pages_used * sizeof (struct page*)), GFP_KERNEL);
if (!local_list)
goto out;
/* list of pages starts right after the page list array */
pages = (struct page **) &local_list->page_list[nr_iovecs];
local_list->nr_iovecs = 0;
for (i = 0; i < nr_iovecs; i++) {
struct dma_page_list *page_list = &local_list->page_list[i];
len -= iov[i].iov_len;
if (!access_ok(VERIFY_WRITE, iov[i].iov_base, iov[i].iov_len))
goto unpin;
page_list->nr_pages = num_pages_spanned(&iov[i]);
page_list->base_address = iov[i].iov_base;
page_list->pages = pages;
pages += page_list->nr_pages;
/* pin pages down */
down_read(&current->mm->mmap_sem);
ret = get_user_pages(
current,
current->mm,
(unsigned long) iov[i].iov_base,
page_list->nr_pages,
1, /* write */
0, /* force */
page_list->pages,
NULL);
up_read(&current->mm->mmap_sem);
if (ret != page_list->nr_pages)
goto unpin;
local_list->nr_iovecs = i + 1;
}
return local_list;
unpin:
dma_unpin_iovec_pages(local_list);
out:
return NULL;
}
void dma_unpin_iovec_pages(struct dma_pinned_list *pinned_list)
{
int i, j;
if (!pinned_list)
return;
for (i = 0; i < pinned_list->nr_iovecs; i++) {
struct dma_page_list *page_list = &pinned_list->page_list[i];
for (j = 0; j < page_list->nr_pages; j++) {
set_page_dirty_lock(page_list->pages[j]);
page_cache_release(page_list->pages[j]);
}
}
kfree(pinned_list);
}
/*
* We have already pinned down the pages we will be using in the iovecs.
* Each entry in iov array has corresponding entry in pinned_list->page_list.
* Using array indexing to keep iov[] and page_list[] in sync.
* Initial elements in iov array's iov->iov_len will be 0 if already copied into
* by another call.
* iov array length remaining guaranteed to be bigger than len.
*/
dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len)
{
int iov_byte_offset;
int copy;
dma_cookie_t dma_cookie = 0;
int iovec_idx;
int page_idx;
if (!chan)
return memcpy_toiovec(iov, kdata, len);
iovec_idx = 0;
while (iovec_idx < pinned_list->nr_iovecs) {
struct dma_page_list *page_list;
/* skip already used-up iovecs */
while (!iov[iovec_idx].iov_len)
iovec_idx++;
page_list = &pinned_list->page_list[iovec_idx];
iov_byte_offset = ((unsigned long)iov[iovec_idx].iov_base & ~PAGE_MASK);
page_idx = (((unsigned long)iov[iovec_idx].iov_base & PAGE_MASK)
- ((unsigned long)page_list->base_address & PAGE_MASK)) >> PAGE_SHIFT;
/* break up copies to not cross page boundary */
while (iov[iovec_idx].iov_len) {
copy = min_t(int, PAGE_SIZE - iov_byte_offset, len);
copy = min_t(int, copy, iov[iovec_idx].iov_len);
dma_cookie = dma_async_memcpy_buf_to_pg(chan,
page_list->pages[page_idx],
iov_byte_offset,
kdata,
copy);
/* poll for a descriptor slot */
if (unlikely(dma_cookie < 0)) {
dma_async_issue_pending(chan);
continue;
}
len -= copy;
iov[iovec_idx].iov_len -= copy;
iov[iovec_idx].iov_base += copy;
if (!len)
return dma_cookie;
kdata += copy;
iov_byte_offset = 0;
page_idx++;
}
iovec_idx++;
}