2006-05-24 00:35:34 +00:00
|
|
|
/*
|
2007-10-16 08:27:39 +00:00
|
|
|
* Intel I/OAT DMA Linux driver
|
|
|
|
* Copyright(c) 2004 - 2007 Intel Corporation.
|
2006-05-24 00:35:34 +00:00
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify it
|
2007-10-16 08:27:39 +00:00
|
|
|
* under the terms and conditions of the GNU General Public License,
|
|
|
|
* version 2, as published by the Free Software Foundation.
|
2006-05-24 00:35:34 +00:00
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful, but WITHOUT
|
|
|
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
|
|
* more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License along with
|
2007-10-16 08:27:39 +00:00
|
|
|
* this program; if not, write to the Free Software Foundation, Inc.,
|
|
|
|
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
|
|
|
|
*
|
|
|
|
* The full GNU General Public License is included in this distribution in
|
|
|
|
* the file called "COPYING".
|
2006-05-24 00:35:34 +00:00
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This driver supports an Intel I/OAT DMA engine, which does asynchronous
|
|
|
|
* copy operations.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/pci.h>
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/dmaengine.h>
|
|
|
|
#include <linux/delay.h>
|
2006-05-24 00:37:58 +00:00
|
|
|
#include <linux/dma-mapping.h>
|
2006-05-24 00:35:34 +00:00
|
|
|
#include "ioatdma.h"
|
|
|
|
#include "ioatdma_registers.h"
|
|
|
|
#include "ioatdma_hw.h"
|
|
|
|
|
2007-10-16 08:27:39 +00:00
|
|
|
#define INITIAL_IOAT_DESC_COUNT 128
|
|
|
|
|
2006-05-24 00:35:34 +00:00
|
|
|
#define to_ioat_chan(chan) container_of(chan, struct ioat_dma_chan, common)
|
2007-10-16 08:27:39 +00:00
|
|
|
#define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common)
|
2006-05-24 00:35:34 +00:00
|
|
|
#define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node)
|
dmaengine: refactor dmaengine around dma_async_tx_descriptor
The current dmaengine interface defines mutliple routines per operation,
i.e. dma_async_memcpy_buf_to_buf, dma_async_memcpy_buf_to_page etc. Adding
more operation types (xor, crc, etc) to this model would result in an
unmanageable number of method permutations.
Are we really going to add a set of hooks for each DMA engine
whizbang feature?
- Jeff Garzik
The descriptor creation process is refactored using the new common
dma_async_tx_descriptor structure. Instead of per driver
do_<operation>_<dest>_to_<src> methods, drivers integrate
dma_async_tx_descriptor into their private software descriptor and then
define a 'prep' routine per operation. The prep routine allocates a
descriptor and ensures that the tx_set_src, tx_set_dest, tx_submit routines
are valid. Descriptor creation and submission becomes:
struct dma_device *dev;
struct dma_chan *chan;
struct dma_async_tx_descriptor *tx;
tx = dev->device_prep_dma_<operation>(chan, len, int_flag)
tx->tx_set_src(dma_addr_t, tx, index /* for multi-source ops */)
tx->tx_set_dest(dma_addr_t, tx, index)
tx->tx_submit(tx)
In addition to the refactoring, dma_async_tx_descriptor also lays the
groundwork for definining cross-channel-operation dependencies, and a
callback facility for asynchronous notification of operation completion.
Changelog:
* drop dma mapping methods, suggested by Chris Leech
* fix ioat_dma_dependency_added, also caught by Andrew Morton
* fix dma_sync_wait, change from Andrew Morton
* uninline large functions, change from Andrew Morton
* add tx->callback = NULL to dmaengine calls to interoperate with async_tx
calls
* hookup ioat_tx_submit
* convert channel capabilities to a 'cpumask_t like' bitmap
* removed DMA_TX_ARRAY_INIT, no longer needed
* checkpatch.pl fixes
* make set_src, set_dest, and tx_submit descriptor specific methods
* fixup git-ioat merge
* move group_list and phys to dma_async_tx_descriptor
Cc: Jeff Garzik <jeff@garzik.org>
Cc: Chris Leech <christopher.leech@intel.com>
Signed-off-by: Shannon Nelson <shannon.nelson@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Acked-by: David S. Miller <davem@davemloft.net>
2007-01-02 18:10:43 +00:00
|
|
|
#define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx)
|
2006-05-24 00:35:34 +00:00
|
|
|
|
|
|
|
/* internal functions */
|
2007-10-16 08:27:39 +00:00
|
|
|
static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan);
|
|
|
|
static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan);
|
2006-05-24 00:35:34 +00:00
|
|
|
|
2007-10-16 08:27:39 +00:00
|
|
|
static int ioat_dma_enumerate_channels(struct ioatdma_device *device)
|
2006-05-24 00:35:34 +00:00
|
|
|
{
|
|
|
|
u8 xfercap_scale;
|
|
|
|
u32 xfercap;
|
|
|
|
int i;
|
|
|
|
struct ioat_dma_chan *ioat_chan;
|
|
|
|
|
2007-03-08 17:57:35 +00:00
|
|
|
device->common.chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
|
|
|
|
xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
|
2006-05-24 00:35:34 +00:00
|
|
|
xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
|
|
|
|
|
|
|
|
for (i = 0; i < device->common.chancnt; i++) {
|
|
|
|
ioat_chan = kzalloc(sizeof(*ioat_chan), GFP_KERNEL);
|
|
|
|
if (!ioat_chan) {
|
|
|
|
device->common.chancnt = i;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
ioat_chan->device = device;
|
|
|
|
ioat_chan->reg_base = device->reg_base + (0x80 * (i + 1));
|
|
|
|
ioat_chan->xfercap = xfercap;
|
|
|
|
spin_lock_init(&ioat_chan->cleanup_lock);
|
|
|
|
spin_lock_init(&ioat_chan->desc_lock);
|
|
|
|
INIT_LIST_HEAD(&ioat_chan->free_desc);
|
|
|
|
INIT_LIST_HEAD(&ioat_chan->used_desc);
|
|
|
|
/* This should be made common somewhere in dmaengine.c */
|
|
|
|
ioat_chan->common.device = &device->common;
|
|
|
|
list_add_tail(&ioat_chan->common.device_node,
|
2007-10-16 08:27:39 +00:00
|
|
|
&device->common.channels);
|
2006-05-24 00:35:34 +00:00
|
|
|
}
|
|
|
|
return device->common.chancnt;
|
|
|
|
}
|
|
|
|
|
2007-10-16 08:27:39 +00:00
|
|
|
static void ioat_set_src(dma_addr_t addr,
|
|
|
|
struct dma_async_tx_descriptor *tx,
|
|
|
|
int index)
|
dmaengine: refactor dmaengine around dma_async_tx_descriptor
The current dmaengine interface defines mutliple routines per operation,
i.e. dma_async_memcpy_buf_to_buf, dma_async_memcpy_buf_to_page etc. Adding
more operation types (xor, crc, etc) to this model would result in an
unmanageable number of method permutations.
Are we really going to add a set of hooks for each DMA engine
whizbang feature?
- Jeff Garzik
The descriptor creation process is refactored using the new common
dma_async_tx_descriptor structure. Instead of per driver
do_<operation>_<dest>_to_<src> methods, drivers integrate
dma_async_tx_descriptor into their private software descriptor and then
define a 'prep' routine per operation. The prep routine allocates a
descriptor and ensures that the tx_set_src, tx_set_dest, tx_submit routines
are valid. Descriptor creation and submission becomes:
struct dma_device *dev;
struct dma_chan *chan;
struct dma_async_tx_descriptor *tx;
tx = dev->device_prep_dma_<operation>(chan, len, int_flag)
tx->tx_set_src(dma_addr_t, tx, index /* for multi-source ops */)
tx->tx_set_dest(dma_addr_t, tx, index)
tx->tx_submit(tx)
In addition to the refactoring, dma_async_tx_descriptor also lays the
groundwork for definining cross-channel-operation dependencies, and a
callback facility for asynchronous notification of operation completion.
Changelog:
* drop dma mapping methods, suggested by Chris Leech
* fix ioat_dma_dependency_added, also caught by Andrew Morton
* fix dma_sync_wait, change from Andrew Morton
* uninline large functions, change from Andrew Morton
* add tx->callback = NULL to dmaengine calls to interoperate with async_tx
calls
* hookup ioat_tx_submit
* convert channel capabilities to a 'cpumask_t like' bitmap
* removed DMA_TX_ARRAY_INIT, no longer needed
* checkpatch.pl fixes
* make set_src, set_dest, and tx_submit descriptor specific methods
* fixup git-ioat merge
* move group_list and phys to dma_async_tx_descriptor
Cc: Jeff Garzik <jeff@garzik.org>
Cc: Chris Leech <christopher.leech@intel.com>
Signed-off-by: Shannon Nelson <shannon.nelson@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Acked-by: David S. Miller <davem@davemloft.net>
2007-01-02 18:10:43 +00:00
|
|
|
{
|
|
|
|
struct ioat_desc_sw *iter, *desc = tx_to_ioat_desc(tx);
|
|
|
|
struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
|
|
|
|
|
|
|
|
pci_unmap_addr_set(desc, src, addr);
|
|
|
|
|
|
|
|
list_for_each_entry(iter, &desc->async_tx.tx_list, node) {
|
|
|
|
iter->hw->src_addr = addr;
|
|
|
|
addr += ioat_chan->xfercap;
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2007-10-16 08:27:39 +00:00
|
|
|
static void ioat_set_dest(dma_addr_t addr,
|
|
|
|
struct dma_async_tx_descriptor *tx,
|
|
|
|
int index)
|
dmaengine: refactor dmaengine around dma_async_tx_descriptor
The current dmaengine interface defines mutliple routines per operation,
i.e. dma_async_memcpy_buf_to_buf, dma_async_memcpy_buf_to_page etc. Adding
more operation types (xor, crc, etc) to this model would result in an
unmanageable number of method permutations.
Are we really going to add a set of hooks for each DMA engine
whizbang feature?
- Jeff Garzik
The descriptor creation process is refactored using the new common
dma_async_tx_descriptor structure. Instead of per driver
do_<operation>_<dest>_to_<src> methods, drivers integrate
dma_async_tx_descriptor into their private software descriptor and then
define a 'prep' routine per operation. The prep routine allocates a
descriptor and ensures that the tx_set_src, tx_set_dest, tx_submit routines
are valid. Descriptor creation and submission becomes:
struct dma_device *dev;
struct dma_chan *chan;
struct dma_async_tx_descriptor *tx;
tx = dev->device_prep_dma_<operation>(chan, len, int_flag)
tx->tx_set_src(dma_addr_t, tx, index /* for multi-source ops */)
tx->tx_set_dest(dma_addr_t, tx, index)
tx->tx_submit(tx)
In addition to the refactoring, dma_async_tx_descriptor also lays the
groundwork for definining cross-channel-operation dependencies, and a
callback facility for asynchronous notification of operation completion.
Changelog:
* drop dma mapping methods, suggested by Chris Leech
* fix ioat_dma_dependency_added, also caught by Andrew Morton
* fix dma_sync_wait, change from Andrew Morton
* uninline large functions, change from Andrew Morton
* add tx->callback = NULL to dmaengine calls to interoperate with async_tx
calls
* hookup ioat_tx_submit
* convert channel capabilities to a 'cpumask_t like' bitmap
* removed DMA_TX_ARRAY_INIT, no longer needed
* checkpatch.pl fixes
* make set_src, set_dest, and tx_submit descriptor specific methods
* fixup git-ioat merge
* move group_list and phys to dma_async_tx_descriptor
Cc: Jeff Garzik <jeff@garzik.org>
Cc: Chris Leech <christopher.leech@intel.com>
Signed-off-by: Shannon Nelson <shannon.nelson@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Acked-by: David S. Miller <davem@davemloft.net>
2007-01-02 18:10:43 +00:00
|
|
|
{
|
|
|
|
struct ioat_desc_sw *iter, *desc = tx_to_ioat_desc(tx);
|
|
|
|
struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
|
|
|
|
|
|
|
|
pci_unmap_addr_set(desc, dst, addr);
|
|
|
|
|
|
|
|
list_for_each_entry(iter, &desc->async_tx.tx_list, node) {
|
|
|
|
iter->hw->dst_addr = addr;
|
|
|
|
addr += ioat_chan->xfercap;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-10-16 08:27:39 +00:00
|
|
|
static dma_cookie_t ioat_tx_submit(struct dma_async_tx_descriptor *tx)
|
dmaengine: refactor dmaengine around dma_async_tx_descriptor
The current dmaengine interface defines mutliple routines per operation,
i.e. dma_async_memcpy_buf_to_buf, dma_async_memcpy_buf_to_page etc. Adding
more operation types (xor, crc, etc) to this model would result in an
unmanageable number of method permutations.
Are we really going to add a set of hooks for each DMA engine
whizbang feature?
- Jeff Garzik
The descriptor creation process is refactored using the new common
dma_async_tx_descriptor structure. Instead of per driver
do_<operation>_<dest>_to_<src> methods, drivers integrate
dma_async_tx_descriptor into their private software descriptor and then
define a 'prep' routine per operation. The prep routine allocates a
descriptor and ensures that the tx_set_src, tx_set_dest, tx_submit routines
are valid. Descriptor creation and submission becomes:
struct dma_device *dev;
struct dma_chan *chan;
struct dma_async_tx_descriptor *tx;
tx = dev->device_prep_dma_<operation>(chan, len, int_flag)
tx->tx_set_src(dma_addr_t, tx, index /* for multi-source ops */)
tx->tx_set_dest(dma_addr_t, tx, index)
tx->tx_submit(tx)
In addition to the refactoring, dma_async_tx_descriptor also lays the
groundwork for definining cross-channel-operation dependencies, and a
callback facility for asynchronous notification of operation completion.
Changelog:
* drop dma mapping methods, suggested by Chris Leech
* fix ioat_dma_dependency_added, also caught by Andrew Morton
* fix dma_sync_wait, change from Andrew Morton
* uninline large functions, change from Andrew Morton
* add tx->callback = NULL to dmaengine calls to interoperate with async_tx
calls
* hookup ioat_tx_submit
* convert channel capabilities to a 'cpumask_t like' bitmap
* removed DMA_TX_ARRAY_INIT, no longer needed
* checkpatch.pl fixes
* make set_src, set_dest, and tx_submit descriptor specific methods
* fixup git-ioat merge
* move group_list and phys to dma_async_tx_descriptor
Cc: Jeff Garzik <jeff@garzik.org>
Cc: Chris Leech <christopher.leech@intel.com>
Signed-off-by: Shannon Nelson <shannon.nelson@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Acked-by: David S. Miller <davem@davemloft.net>
2007-01-02 18:10:43 +00:00
|
|
|
{
|
|
|
|
struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
|
|
|
|
struct ioat_desc_sw *desc = tx_to_ioat_desc(tx);
|
|
|
|
int append = 0;
|
|
|
|
dma_cookie_t cookie;
|
|
|
|
struct ioat_desc_sw *group_start;
|
|
|
|
|
|
|
|
group_start = list_entry(desc->async_tx.tx_list.next,
|
|
|
|
struct ioat_desc_sw, node);
|
|
|
|
spin_lock_bh(&ioat_chan->desc_lock);
|
|
|
|
/* cookie incr and addition to used_list must be atomic */
|
|
|
|
cookie = ioat_chan->common.cookie;
|
|
|
|
cookie++;
|
|
|
|
if (cookie < 0)
|
|
|
|
cookie = 1;
|
|
|
|
ioat_chan->common.cookie = desc->async_tx.cookie = cookie;
|
|
|
|
|
|
|
|
/* write address into NextDescriptor field of last desc in chain */
|
|
|
|
to_ioat_desc(ioat_chan->used_desc.prev)->hw->next =
|
|
|
|
group_start->async_tx.phys;
|
|
|
|
list_splice_init(&desc->async_tx.tx_list, ioat_chan->used_desc.prev);
|
|
|
|
|
|
|
|
ioat_chan->pending += desc->tx_cnt;
|
|
|
|
if (ioat_chan->pending >= 4) {
|
|
|
|
append = 1;
|
|
|
|
ioat_chan->pending = 0;
|
|
|
|
}
|
|
|
|
spin_unlock_bh(&ioat_chan->desc_lock);
|
|
|
|
|
|
|
|
if (append)
|
|
|
|
writeb(IOAT_CHANCMD_APPEND,
|
|
|
|
ioat_chan->reg_base + IOAT_CHANCMD_OFFSET);
|
2007-10-16 08:27:37 +00:00
|
|
|
|
dmaengine: refactor dmaengine around dma_async_tx_descriptor
The current dmaengine interface defines mutliple routines per operation,
i.e. dma_async_memcpy_buf_to_buf, dma_async_memcpy_buf_to_page etc. Adding
more operation types (xor, crc, etc) to this model would result in an
unmanageable number of method permutations.
Are we really going to add a set of hooks for each DMA engine
whizbang feature?
- Jeff Garzik
The descriptor creation process is refactored using the new common
dma_async_tx_descriptor structure. Instead of per driver
do_<operation>_<dest>_to_<src> methods, drivers integrate
dma_async_tx_descriptor into their private software descriptor and then
define a 'prep' routine per operation. The prep routine allocates a
descriptor and ensures that the tx_set_src, tx_set_dest, tx_submit routines
are valid. Descriptor creation and submission becomes:
struct dma_device *dev;
struct dma_chan *chan;
struct dma_async_tx_descriptor *tx;
tx = dev->device_prep_dma_<operation>(chan, len, int_flag)
tx->tx_set_src(dma_addr_t, tx, index /* for multi-source ops */)
tx->tx_set_dest(dma_addr_t, tx, index)
tx->tx_submit(tx)
In addition to the refactoring, dma_async_tx_descriptor also lays the
groundwork for definining cross-channel-operation dependencies, and a
callback facility for asynchronous notification of operation completion.
Changelog:
* drop dma mapping methods, suggested by Chris Leech
* fix ioat_dma_dependency_added, also caught by Andrew Morton
* fix dma_sync_wait, change from Andrew Morton
* uninline large functions, change from Andrew Morton
* add tx->callback = NULL to dmaengine calls to interoperate with async_tx
calls
* hookup ioat_tx_submit
* convert channel capabilities to a 'cpumask_t like' bitmap
* removed DMA_TX_ARRAY_INIT, no longer needed
* checkpatch.pl fixes
* make set_src, set_dest, and tx_submit descriptor specific methods
* fixup git-ioat merge
* move group_list and phys to dma_async_tx_descriptor
Cc: Jeff Garzik <jeff@garzik.org>
Cc: Chris Leech <christopher.leech@intel.com>
Signed-off-by: Shannon Nelson <shannon.nelson@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Acked-by: David S. Miller <davem@davemloft.net>
2007-01-02 18:10:43 +00:00
|
|
|
return cookie;
|
|
|
|
}
|
|
|
|
|
2006-05-24 00:35:34 +00:00
|
|
|
static struct ioat_desc_sw *ioat_dma_alloc_descriptor(
|
2007-10-16 08:27:39 +00:00
|
|
|
struct ioat_dma_chan *ioat_chan,
|
|
|
|
gfp_t flags)
|
2006-05-24 00:35:34 +00:00
|
|
|
{
|
|
|
|
struct ioat_dma_descriptor *desc;
|
|
|
|
struct ioat_desc_sw *desc_sw;
|
2007-10-16 08:27:39 +00:00
|
|
|
struct ioatdma_device *ioatdma_device;
|
2006-05-24 00:35:34 +00:00
|
|
|
dma_addr_t phys;
|
|
|
|
|
2007-10-16 08:27:39 +00:00
|
|
|
ioatdma_device = to_ioatdma_device(ioat_chan->common.device);
|
|
|
|
desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys);
|
2006-05-24 00:35:34 +00:00
|
|
|
if (unlikely(!desc))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
desc_sw = kzalloc(sizeof(*desc_sw), flags);
|
|
|
|
if (unlikely(!desc_sw)) {
|
2007-10-16 08:27:39 +00:00
|
|
|
pci_pool_free(ioatdma_device->dma_pool, desc, phys);
|
2006-05-24 00:35:34 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
memset(desc, 0, sizeof(*desc));
|
dmaengine: refactor dmaengine around dma_async_tx_descriptor
The current dmaengine interface defines mutliple routines per operation,
i.e. dma_async_memcpy_buf_to_buf, dma_async_memcpy_buf_to_page etc. Adding
more operation types (xor, crc, etc) to this model would result in an
unmanageable number of method permutations.
Are we really going to add a set of hooks for each DMA engine
whizbang feature?
- Jeff Garzik
The descriptor creation process is refactored using the new common
dma_async_tx_descriptor structure. Instead of per driver
do_<operation>_<dest>_to_<src> methods, drivers integrate
dma_async_tx_descriptor into their private software descriptor and then
define a 'prep' routine per operation. The prep routine allocates a
descriptor and ensures that the tx_set_src, tx_set_dest, tx_submit routines
are valid. Descriptor creation and submission becomes:
struct dma_device *dev;
struct dma_chan *chan;
struct dma_async_tx_descriptor *tx;
tx = dev->device_prep_dma_<operation>(chan, len, int_flag)
tx->tx_set_src(dma_addr_t, tx, index /* for multi-source ops */)
tx->tx_set_dest(dma_addr_t, tx, index)
tx->tx_submit(tx)
In addition to the refactoring, dma_async_tx_descriptor also lays the
groundwork for definining cross-channel-operation dependencies, and a
callback facility for asynchronous notification of operation completion.
Changelog:
* drop dma mapping methods, suggested by Chris Leech
* fix ioat_dma_dependency_added, also caught by Andrew Morton
* fix dma_sync_wait, change from Andrew Morton
* uninline large functions, change from Andrew Morton
* add tx->callback = NULL to dmaengine calls to interoperate with async_tx
calls
* hookup ioat_tx_submit
* convert channel capabilities to a 'cpumask_t like' bitmap
* removed DMA_TX_ARRAY_INIT, no longer needed
* checkpatch.pl fixes
* make set_src, set_dest, and tx_submit descriptor specific methods
* fixup git-ioat merge
* move group_list and phys to dma_async_tx_descriptor
Cc: Jeff Garzik <jeff@garzik.org>
Cc: Chris Leech <christopher.leech@intel.com>
Signed-off-by: Shannon Nelson <shannon.nelson@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Acked-by: David S. Miller <davem@davemloft.net>
2007-01-02 18:10:43 +00:00
|
|
|
dma_async_tx_descriptor_init(&desc_sw->async_tx, &ioat_chan->common);
|
|
|
|
desc_sw->async_tx.tx_set_src = ioat_set_src;
|
|
|
|
desc_sw->async_tx.tx_set_dest = ioat_set_dest;
|
|
|
|
desc_sw->async_tx.tx_submit = ioat_tx_submit;
|
|
|
|
INIT_LIST_HEAD(&desc_sw->async_tx.tx_list);
|
2006-05-24 00:35:34 +00:00
|
|
|
desc_sw->hw = desc;
|
dmaengine: refactor dmaengine around dma_async_tx_descriptor
The current dmaengine interface defines mutliple routines per operation,
i.e. dma_async_memcpy_buf_to_buf, dma_async_memcpy_buf_to_page etc. Adding
more operation types (xor, crc, etc) to this model would result in an
unmanageable number of method permutations.
Are we really going to add a set of hooks for each DMA engine
whizbang feature?
- Jeff Garzik
The descriptor creation process is refactored using the new common
dma_async_tx_descriptor structure. Instead of per driver
do_<operation>_<dest>_to_<src> methods, drivers integrate
dma_async_tx_descriptor into their private software descriptor and then
define a 'prep' routine per operation. The prep routine allocates a
descriptor and ensures that the tx_set_src, tx_set_dest, tx_submit routines
are valid. Descriptor creation and submission becomes:
struct dma_device *dev;
struct dma_chan *chan;
struct dma_async_tx_descriptor *tx;
tx = dev->device_prep_dma_<operation>(chan, len, int_flag)
tx->tx_set_src(dma_addr_t, tx, index /* for multi-source ops */)
tx->tx_set_dest(dma_addr_t, tx, index)
tx->tx_submit(tx)
In addition to the refactoring, dma_async_tx_descriptor also lays the
groundwork for definining cross-channel-operation dependencies, and a
callback facility for asynchronous notification of operation completion.
Changelog:
* drop dma mapping methods, suggested by Chris Leech
* fix ioat_dma_dependency_added, also caught by Andrew Morton
* fix dma_sync_wait, change from Andrew Morton
* uninline large functions, change from Andrew Morton
* add tx->callback = NULL to dmaengine calls to interoperate with async_tx
calls
* hookup ioat_tx_submit
* convert channel capabilities to a 'cpumask_t like' bitmap
* removed DMA_TX_ARRAY_INIT, no longer needed
* checkpatch.pl fixes
* make set_src, set_dest, and tx_submit descriptor specific methods
* fixup git-ioat merge
* move group_list and phys to dma_async_tx_descriptor
Cc: Jeff Garzik <jeff@garzik.org>
Cc: Chris Leech <christopher.leech@intel.com>
Signed-off-by: Shannon Nelson <shannon.nelson@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Acked-by: David S. Miller <davem@davemloft.net>
2007-01-02 18:10:43 +00:00
|
|
|
desc_sw->async_tx.phys = phys;
|
2006-05-24 00:35:34 +00:00
|
|
|
|
|
|
|
return desc_sw;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* returns the actual number of allocated descriptors */
|
|
|
|
static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)
|
|
|
|
{
|
|
|
|
struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
|
|
|
|
struct ioat_desc_sw *desc = NULL;
|
|
|
|
u16 chanctrl;
|
|
|
|
u32 chanerr;
|
|
|
|
int i;
|
|
|
|
LIST_HEAD(tmp_list);
|
|
|
|
|
2007-08-25 06:02:53 +00:00
|
|
|
/* have we already been set up? */
|
|
|
|
if (!list_empty(&ioat_chan->free_desc))
|
|
|
|
return INITIAL_IOAT_DESC_COUNT;
|
2006-05-24 00:35:34 +00:00
|
|
|
|
2007-10-16 08:27:39 +00:00
|
|
|
/* Setup register to interrupt and write completion status on error */
|
2007-08-25 06:02:53 +00:00
|
|
|
chanctrl = IOAT_CHANCTRL_ERR_INT_EN |
|
2006-05-24 00:35:34 +00:00
|
|
|
IOAT_CHANCTRL_ANY_ERR_ABORT_EN |
|
|
|
|
IOAT_CHANCTRL_ERR_COMPLETION_EN;
|
2007-10-16 08:27:39 +00:00
|
|
|
writew(chanctrl, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
|
2006-05-24 00:35:34 +00:00
|
|
|
|
2007-03-08 17:57:35 +00:00
|
|
|
chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
|
2006-05-24 00:35:34 +00:00
|
|
|
if (chanerr) {
|
2007-10-16 08:27:39 +00:00
|
|
|
dev_err(&ioat_chan->device->pdev->dev,
|
|
|
|
"ioatdma: CHANERR = %x, clearing\n", chanerr);
|
2007-03-08 17:57:35 +00:00
|
|
|
writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
|
2006-05-24 00:35:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Allocate descriptors */
|
|
|
|
for (i = 0; i < INITIAL_IOAT_DESC_COUNT; i++) {
|
|
|
|
desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL);
|
|
|
|
if (!desc) {
|
2007-10-16 08:27:39 +00:00
|
|
|
dev_err(&ioat_chan->device->pdev->dev,
|
|
|
|
"ioatdma: Only %d initial descriptors\n", i);
|
2006-05-24 00:35:34 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
list_add_tail(&desc->node, &tmp_list);
|
|
|
|
}
|
|
|
|
spin_lock_bh(&ioat_chan->desc_lock);
|
|
|
|
list_splice(&tmp_list, &ioat_chan->free_desc);
|
|
|
|
spin_unlock_bh(&ioat_chan->desc_lock);
|
|
|
|
|
|
|
|
/* allocate a completion writeback area */
|
|
|
|
/* doing 2 32bit writes to mmio since 1 64b write doesn't work */
|
|
|
|
ioat_chan->completion_virt =
|
|
|
|
pci_pool_alloc(ioat_chan->device->completion_pool,
|
2007-10-16 08:27:39 +00:00
|
|
|
GFP_KERNEL,
|
|
|
|
&ioat_chan->completion_addr);
|
2006-05-24 00:35:34 +00:00
|
|
|
memset(ioat_chan->completion_virt, 0,
|
|
|
|
sizeof(*ioat_chan->completion_virt));
|
2007-03-08 17:57:35 +00:00
|
|
|
writel(((u64) ioat_chan->completion_addr) & 0x00000000FFFFFFFF,
|
|
|
|
ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
|
|
|
|
writel(((u64) ioat_chan->completion_addr) >> 32,
|
|
|
|
ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
|
2006-05-24 00:35:34 +00:00
|
|
|
|
2007-10-16 08:27:39 +00:00
|
|
|
ioat_dma_start_null_desc(ioat_chan);
|
2006-05-24 00:35:34 +00:00
|
|
|
return i;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ioat_dma_free_chan_resources(struct dma_chan *chan)
|
|
|
|
{
|
|
|
|
struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
|
2007-10-16 08:27:39 +00:00
|
|
|
struct ioatdma_device *ioatdma_device = to_ioatdma_device(chan->device);
|
2006-05-24 00:35:34 +00:00
|
|
|
struct ioat_desc_sw *desc, *_desc;
|
|
|
|
int in_use_descs = 0;
|
|
|
|
|
|
|
|
ioat_dma_memcpy_cleanup(ioat_chan);
|
|
|
|
|
2007-03-08 17:57:35 +00:00
|
|
|
writeb(IOAT_CHANCMD_RESET, ioat_chan->reg_base + IOAT_CHANCMD_OFFSET);
|
2006-05-24 00:35:34 +00:00
|
|
|
|
|
|
|
spin_lock_bh(&ioat_chan->desc_lock);
|
|
|
|
list_for_each_entry_safe(desc, _desc, &ioat_chan->used_desc, node) {
|
|
|
|
in_use_descs++;
|
|
|
|
list_del(&desc->node);
|
2007-10-16 08:27:39 +00:00
|
|
|
pci_pool_free(ioatdma_device->dma_pool, desc->hw,
|
dmaengine: refactor dmaengine around dma_async_tx_descriptor
The current dmaengine interface defines mutliple routines per operation,
i.e. dma_async_memcpy_buf_to_buf, dma_async_memcpy_buf_to_page etc. Adding
more operation types (xor, crc, etc) to this model would result in an
unmanageable number of method permutations.
Are we really going to add a set of hooks for each DMA engine
whizbang feature?
- Jeff Garzik
The descriptor creation process is refactored using the new common
dma_async_tx_descriptor structure. Instead of per driver
do_<operation>_<dest>_to_<src> methods, drivers integrate
dma_async_tx_descriptor into their private software descriptor and then
define a 'prep' routine per operation. The prep routine allocates a
descriptor and ensures that the tx_set_src, tx_set_dest, tx_submit routines
are valid. Descriptor creation and submission becomes:
struct dma_device *dev;
struct dma_chan *chan;
struct dma_async_tx_descriptor *tx;
tx = dev->device_prep_dma_<operation>(chan, len, int_flag)
tx->tx_set_src(dma_addr_t, tx, index /* for multi-source ops */)
tx->tx_set_dest(dma_addr_t, tx, index)
tx->tx_submit(tx)
In addition to the refactoring, dma_async_tx_descriptor also lays the
groundwork for definining cross-channel-operation dependencies, and a
callback facility for asynchronous notification of operation completion.
Changelog:
* drop dma mapping methods, suggested by Chris Leech
* fix ioat_dma_dependency_added, also caught by Andrew Morton
* fix dma_sync_wait, change from Andrew Morton
* uninline large functions, change from Andrew Morton
* add tx->callback = NULL to dmaengine calls to interoperate with async_tx
calls
* hookup ioat_tx_submit
* convert channel capabilities to a 'cpumask_t like' bitmap
* removed DMA_TX_ARRAY_INIT, no longer needed
* checkpatch.pl fixes
* make set_src, set_dest, and tx_submit descriptor specific methods
* fixup git-ioat merge
* move group_list and phys to dma_async_tx_descriptor
Cc: Jeff Garzik <jeff@garzik.org>
Cc: Chris Leech <christopher.leech@intel.com>
Signed-off-by: Shannon Nelson <shannon.nelson@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Acked-by: David S. Miller <davem@davemloft.net>
2007-01-02 18:10:43 +00:00
|
|
|
desc->async_tx.phys);
|
2006-05-24 00:35:34 +00:00
|
|
|
kfree(desc);
|
|
|
|
}
|
|
|
|
list_for_each_entry_safe(desc, _desc, &ioat_chan->free_desc, node) {
|
|
|
|
list_del(&desc->node);
|
2007-10-16 08:27:39 +00:00
|
|
|
pci_pool_free(ioatdma_device->dma_pool, desc->hw,
|
dmaengine: refactor dmaengine around dma_async_tx_descriptor
The current dmaengine interface defines mutliple routines per operation,
i.e. dma_async_memcpy_buf_to_buf, dma_async_memcpy_buf_to_page etc. Adding
more operation types (xor, crc, etc) to this model would result in an
unmanageable number of method permutations.
Are we really going to add a set of hooks for each DMA engine
whizbang feature?
- Jeff Garzik
The descriptor creation process is refactored using the new common
dma_async_tx_descriptor structure. Instead of per driver
do_<operation>_<dest>_to_<src> methods, drivers integrate
dma_async_tx_descriptor into their private software descriptor and then
define a 'prep' routine per operation. The prep routine allocates a
descriptor and ensures that the tx_set_src, tx_set_dest, tx_submit routines
are valid. Descriptor creation and submission becomes:
struct dma_device *dev;
struct dma_chan *chan;
struct dma_async_tx_descriptor *tx;
tx = dev->device_prep_dma_<operation>(chan, len, int_flag)
tx->tx_set_src(dma_addr_t, tx, index /* for multi-source ops */)
tx->tx_set_dest(dma_addr_t, tx, index)
tx->tx_submit(tx)
In addition to the refactoring, dma_async_tx_descriptor also lays the
groundwork for definining cross-channel-operation dependencies, and a
callback facility for asynchronous notification of operation completion.
Changelog:
* drop dma mapping methods, suggested by Chris Leech
* fix ioat_dma_dependency_added, also caught by Andrew Morton
* fix dma_sync_wait, change from Andrew Morton
* uninline large functions, change from Andrew Morton
* add tx->callback = NULL to dmaengine calls to interoperate with async_tx
calls
* hookup ioat_tx_submit
* convert channel capabilities to a 'cpumask_t like' bitmap
* removed DMA_TX_ARRAY_INIT, no longer needed
* checkpatch.pl fixes
* make set_src, set_dest, and tx_submit descriptor specific methods
* fixup git-ioat merge
* move group_list and phys to dma_async_tx_descriptor
Cc: Jeff Garzik <jeff@garzik.org>
Cc: Chris Leech <christopher.leech@intel.com>
Signed-off-by: Shannon Nelson <shannon.nelson@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Acked-by: David S. Miller <davem@davemloft.net>
2007-01-02 18:10:43 +00:00
|
|
|
desc->async_tx.phys);
|
2006-05-24 00:35:34 +00:00
|
|
|
kfree(desc);
|
|
|
|
}
|
|
|
|
spin_unlock_bh(&ioat_chan->desc_lock);
|
|
|
|
|
2007-10-16 08:27:39 +00:00
|
|
|
pci_pool_free(ioatdma_device->completion_pool,
|
2007-10-16 08:27:39 +00:00
|
|
|
ioat_chan->completion_virt,
|
|
|
|
ioat_chan->completion_addr);
|
2006-05-24 00:35:34 +00:00
|
|
|
|
|
|
|
/* one is ok since we left it on there on purpose */
|
|
|
|
if (in_use_descs > 1)
|
2007-10-16 08:27:39 +00:00
|
|
|
dev_err(&ioat_chan->device->pdev->dev,
|
|
|
|
"ioatdma: Freeing %d in use descriptors!\n",
|
2006-05-24 00:35:34 +00:00
|
|
|
in_use_descs - 1);
|
|
|
|
|
|
|
|
ioat_chan->last_completion = ioat_chan->completion_addr = 0;
|
|
|
|
}
|
|
|
|
|
2007-10-16 08:27:39 +00:00
|
|
|
static struct dma_async_tx_descriptor *ioat_dma_prep_memcpy(
|
|
|
|
struct dma_chan *chan,
|
|
|
|
size_t len,
|
|
|
|
int int_en)
|
2006-05-24 00:35:34 +00:00
|
|
|
{
|
dmaengine: refactor dmaengine around dma_async_tx_descriptor
The current dmaengine interface defines mutliple routines per operation,
i.e. dma_async_memcpy_buf_to_buf, dma_async_memcpy_buf_to_page etc. Adding
more operation types (xor, crc, etc) to this model would result in an
unmanageable number of method permutations.
Are we really going to add a set of hooks for each DMA engine
whizbang feature?
- Jeff Garzik
The descriptor creation process is refactored using the new common
dma_async_tx_descriptor structure. Instead of per driver
do_<operation>_<dest>_to_<src> methods, drivers integrate
dma_async_tx_descriptor into their private software descriptor and then
define a 'prep' routine per operation. The prep routine allocates a
descriptor and ensures that the tx_set_src, tx_set_dest, tx_submit routines
are valid. Descriptor creation and submission becomes:
struct dma_device *dev;
struct dma_chan *chan;
struct dma_async_tx_descriptor *tx;
tx = dev->device_prep_dma_<operation>(chan, len, int_flag)
tx->tx_set_src(dma_addr_t, tx, index /* for multi-source ops */)
tx->tx_set_dest(dma_addr_t, tx, index)
tx->tx_submit(tx)
In addition to the refactoring, dma_async_tx_descriptor also lays the
groundwork for definining cross-channel-operation dependencies, and a
callback facility for asynchronous notification of operation completion.
Changelog:
* drop dma mapping methods, suggested by Chris Leech
* fix ioat_dma_dependency_added, also caught by Andrew Morton
* fix dma_sync_wait, change from Andrew Morton
* uninline large functions, change from Andrew Morton
* add tx->callback = NULL to dmaengine calls to interoperate with async_tx
calls
* hookup ioat_tx_submit
* convert channel capabilities to a 'cpumask_t like' bitmap
* removed DMA_TX_ARRAY_INIT, no longer needed
* checkpatch.pl fixes
* make set_src, set_dest, and tx_submit descriptor specific methods
* fixup git-ioat merge
* move group_list and phys to dma_async_tx_descriptor
Cc: Jeff Garzik <jeff@garzik.org>
Cc: Chris Leech <christopher.leech@intel.com>
Signed-off-by: Shannon Nelson <shannon.nelson@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Acked-by: David S. Miller <davem@davemloft.net>
2007-01-02 18:10:43 +00:00
|
|
|
struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
|
|
|
|
struct ioat_desc_sw *first, *prev, *new;
|
2006-05-24 00:35:34 +00:00
|
|
|
LIST_HEAD(new_chain);
|
|
|
|
u32 copy;
|
|
|
|
size_t orig_len;
|
dmaengine: refactor dmaengine around dma_async_tx_descriptor
The current dmaengine interface defines mutliple routines per operation,
i.e. dma_async_memcpy_buf_to_buf, dma_async_memcpy_buf_to_page etc. Adding
more operation types (xor, crc, etc) to this model would result in an
unmanageable number of method permutations.
Are we really going to add a set of hooks for each DMA engine
whizbang feature?
- Jeff Garzik
The descriptor creation process is refactored using the new common
dma_async_tx_descriptor structure. Instead of per driver
do_<operation>_<dest>_to_<src> methods, drivers integrate
dma_async_tx_descriptor into their private software descriptor and then
define a 'prep' routine per operation. The prep routine allocates a
descriptor and ensures that the tx_set_src, tx_set_dest, tx_submit routines
are valid. Descriptor creation and submission becomes:
struct dma_device *dev;
struct dma_chan *chan;
struct dma_async_tx_descriptor *tx;
tx = dev->device_prep_dma_<operation>(chan, len, int_flag)
tx->tx_set_src(dma_addr_t, tx, index /* for multi-source ops */)
tx->tx_set_dest(dma_addr_t, tx, index)
tx->tx_submit(tx)
In addition to the refactoring, dma_async_tx_descriptor also lays the
groundwork for definining cross-channel-operation dependencies, and a
callback facility for asynchronous notification of operation completion.
Changelog:
* drop dma mapping methods, suggested by Chris Leech
* fix ioat_dma_dependency_added, also caught by Andrew Morton
* fix dma_sync_wait, change from Andrew Morton
* uninline large functions, change from Andrew Morton
* add tx->callback = NULL to dmaengine calls to interoperate with async_tx
calls
* hookup ioat_tx_submit
* convert channel capabilities to a 'cpumask_t like' bitmap
* removed DMA_TX_ARRAY_INIT, no longer needed
* checkpatch.pl fixes
* make set_src, set_dest, and tx_submit descriptor specific methods
* fixup git-ioat merge
* move group_list and phys to dma_async_tx_descriptor
Cc: Jeff Garzik <jeff@garzik.org>
Cc: Chris Leech <christopher.leech@intel.com>
Signed-off-by: Shannon Nelson <shannon.nelson@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Acked-by: David S. Miller <davem@davemloft.net>
2007-01-02 18:10:43 +00:00
|
|
|
int desc_count = 0;
|
2006-05-24 00:35:34 +00:00
|
|
|
|
|
|
|
if (!len)
|
dmaengine: refactor dmaengine around dma_async_tx_descriptor
The current dmaengine interface defines mutliple routines per operation,
i.e. dma_async_memcpy_buf_to_buf, dma_async_memcpy_buf_to_page etc. Adding
more operation types (xor, crc, etc) to this model would result in an
unmanageable number of method permutations.
Are we really going to add a set of hooks for each DMA engine
whizbang feature?
- Jeff Garzik
The descriptor creation process is refactored using the new common
dma_async_tx_descriptor structure. Instead of per driver
do_<operation>_<dest>_to_<src> methods, drivers integrate
dma_async_tx_descriptor into their private software descriptor and then
define a 'prep' routine per operation. The prep routine allocates a
descriptor and ensures that the tx_set_src, tx_set_dest, tx_submit routines
are valid. Descriptor creation and submission becomes:
struct dma_device *dev;
struct dma_chan *chan;
struct dma_async_tx_descriptor *tx;
tx = dev->device_prep_dma_<operation>(chan, len, int_flag)
tx->tx_set_src(dma_addr_t, tx, index /* for multi-source ops */)
tx->tx_set_dest(dma_addr_t, tx, index)
tx->tx_submit(tx)
In addition to the refactoring, dma_async_tx_descriptor also lays the
groundwork for definining cross-channel-operation dependencies, and a
callback facility for asynchronous notification of operation completion.
Changelog:
* drop dma mapping methods, suggested by Chris Leech
* fix ioat_dma_dependency_added, also caught by Andrew Morton
* fix dma_sync_wait, change from Andrew Morton
* uninline large functions, change from Andrew Morton
* add tx->callback = NULL to dmaengine calls to interoperate with async_tx
calls
* hookup ioat_tx_submit
* convert channel capabilities to a 'cpumask_t like' bitmap
* removed DMA_TX_ARRAY_INIT, no longer needed
* checkpatch.pl fixes
* make set_src, set_dest, and tx_submit descriptor specific methods
* fixup git-ioat merge
* move group_list and phys to dma_async_tx_descriptor
Cc: Jeff Garzik <jeff@garzik.org>
Cc: Chris Leech <christopher.leech@intel.com>
Signed-off-by: Shannon Nelson <shannon.nelson@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Acked-by: David S. Miller <davem@davemloft.net>
2007-01-02 18:10:43 +00:00
|
|
|
return NULL;
|
2006-05-24 00:35:34 +00:00
|
|
|
|
|
|
|
orig_len = len;
|
|
|
|
|
|
|
|
first = NULL;
|
|
|
|
prev = NULL;
|
|
|
|
|
|
|
|
spin_lock_bh(&ioat_chan->desc_lock);
|
|
|
|
while (len) {
|
|
|
|
if (!list_empty(&ioat_chan->free_desc)) {
|
|
|
|
new = to_ioat_desc(ioat_chan->free_desc.next);
|
|
|
|
list_del(&new->node);
|
|
|
|
} else {
|
|
|
|
/* try to get another desc */
|
|
|
|
new = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC);
|
|
|
|
/* will this ever happen? */
|
|
|
|
/* TODO add upper limit on these */
|
|
|
|
BUG_ON(!new);
|
|
|
|
}
|
|
|
|
|
|
|
|
copy = min((u32) len, ioat_chan->xfercap);
|
|
|
|
|
|
|
|
new->hw->size = copy;
|
|
|
|
new->hw->ctl = 0;
|
dmaengine: refactor dmaengine around dma_async_tx_descriptor
The current dmaengine interface defines mutliple routines per operation,
i.e. dma_async_memcpy_buf_to_buf, dma_async_memcpy_buf_to_page etc. Adding
more operation types (xor, crc, etc) to this model would result in an
unmanageable number of method permutations.
Are we really going to add a set of hooks for each DMA engine
whizbang feature?
- Jeff Garzik
The descriptor creation process is refactored using the new common
dma_async_tx_descriptor structure. Instead of per driver
do_<operation>_<dest>_to_<src> methods, drivers integrate
dma_async_tx_descriptor into their private software descriptor and then
define a 'prep' routine per operation. The prep routine allocates a
descriptor and ensures that the tx_set_src, tx_set_dest, tx_submit routines
are valid. Descriptor creation and submission becomes:
struct dma_device *dev;
struct dma_chan *chan;
struct dma_async_tx_descriptor *tx;
tx = dev->device_prep_dma_<operation>(chan, len, int_flag)
tx->tx_set_src(dma_addr_t, tx, index /* for multi-source ops */)
tx->tx_set_dest(dma_addr_t, tx, index)
tx->tx_submit(tx)
In addition to the refactoring, dma_async_tx_descriptor also lays the
groundwork for definining cross-channel-operation dependencies, and a
callback facility for asynchronous notification of operation completion.
Changelog:
* drop dma mapping methods, suggested by Chris Leech
* fix ioat_dma_dependency_added, also caught by Andrew Morton
* fix dma_sync_wait, change from Andrew Morton
* uninline large functions, change from Andrew Morton
* add tx->callback = NULL to dmaengine calls to interoperate with async_tx
calls
* hookup ioat_tx_submit
* convert channel capabilities to a 'cpumask_t like' bitmap
* removed DMA_TX_ARRAY_INIT, no longer needed
* checkpatch.pl fixes
* make set_src, set_dest, and tx_submit descriptor specific methods
* fixup git-ioat merge
* move group_list and phys to dma_async_tx_descriptor
Cc: Jeff Garzik <jeff@garzik.org>
Cc: Chris Leech <christopher.leech@intel.com>
Signed-off-by: Shannon Nelson <shannon.nelson@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Acked-by: David S. Miller <davem@davemloft.net>
2007-01-02 18:10:43 +00:00
|
|
|
new->async_tx.cookie = 0;
|
|
|
|
new->async_tx.ack = 1;
|
2006-05-24 00:35:34 +00:00
|
|
|
|
|
|
|
/* chain together the physical address list for the HW */
|
|
|
|
if (!first)
|
|
|
|
first = new;
|
|
|
|
else
|
dmaengine: refactor dmaengine around dma_async_tx_descriptor
The current dmaengine interface defines mutliple routines per operation,
i.e. dma_async_memcpy_buf_to_buf, dma_async_memcpy_buf_to_page etc. Adding
more operation types (xor, crc, etc) to this model would result in an
unmanageable number of method permutations.
Are we really going to add a set of hooks for each DMA engine
whizbang feature?
- Jeff Garzik
The descriptor creation process is refactored using the new common
dma_async_tx_descriptor structure. Instead of per driver
do_<operation>_<dest>_to_<src> methods, drivers integrate
dma_async_tx_descriptor into their private software descriptor and then
define a 'prep' routine per operation. The prep routine allocates a
descriptor and ensures that the tx_set_src, tx_set_dest, tx_submit routines
are valid. Descriptor creation and submission becomes:
struct dma_device *dev;
struct dma_chan *chan;
struct dma_async_tx_descriptor *tx;
tx = dev->device_prep_dma_<operation>(chan, len, int_flag)
tx->tx_set_src(dma_addr_t, tx, index /* for multi-source ops */)
tx->tx_set_dest(dma_addr_t, tx, index)
tx->tx_submit(tx)
In addition to the refactoring, dma_async_tx_descriptor also lays the
groundwork for definining cross-channel-operation dependencies, and a
callback facility for asynchronous notification of operation completion.
Changelog:
* drop dma mapping methods, suggested by Chris Leech
* fix ioat_dma_dependency_added, also caught by Andrew Morton
* fix dma_sync_wait, change from Andrew Morton
* uninline large functions, change from Andrew Morton
* add tx->callback = NULL to dmaengine calls to interoperate with async_tx
calls
* hookup ioat_tx_submit
* convert channel capabilities to a 'cpumask_t like' bitmap
* removed DMA_TX_ARRAY_INIT, no longer needed
* checkpatch.pl fixes
* make set_src, set_dest, and tx_submit descriptor specific methods
* fixup git-ioat merge
* move group_list and phys to dma_async_tx_descriptor
Cc: Jeff Garzik <jeff@garzik.org>
Cc: Chris Leech <christopher.leech@intel.com>
Signed-off-by: Shannon Nelson <shannon.nelson@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Acked-by: David S. Miller <davem@davemloft.net>
2007-01-02 18:10:43 +00:00
|
|
|
prev->hw->next = (u64) new->async_tx.phys;
|
2006-05-24 00:35:34 +00:00
|
|
|
|
|
|
|
prev = new;
|
|
|
|
len -= copy;
|
|
|
|
list_add_tail(&new->node, &new_chain);
|
|
|
|
desc_count++;
|
|
|
|
}
|
|
|
|
|
dmaengine: refactor dmaengine around dma_async_tx_descriptor
The current dmaengine interface defines mutliple routines per operation,
i.e. dma_async_memcpy_buf_to_buf, dma_async_memcpy_buf_to_page etc. Adding
more operation types (xor, crc, etc) to this model would result in an
unmanageable number of method permutations.
Are we really going to add a set of hooks for each DMA engine
whizbang feature?
- Jeff Garzik
The descriptor creation process is refactored using the new common
dma_async_tx_descriptor structure. Instead of per driver
do_<operation>_<dest>_to_<src> methods, drivers integrate
dma_async_tx_descriptor into their private software descriptor and then
define a 'prep' routine per operation. The prep routine allocates a
descriptor and ensures that the tx_set_src, tx_set_dest, tx_submit routines
are valid. Descriptor creation and submission becomes:
struct dma_device *dev;
struct dma_chan *chan;
struct dma_async_tx_descriptor *tx;
tx = dev->device_prep_dma_<operation>(chan, len, int_flag)
tx->tx_set_src(dma_addr_t, tx, index /* for multi-source ops */)
tx->tx_set_dest(dma_addr_t, tx, index)
tx->tx_submit(tx)
In addition to the refactoring, dma_async_tx_descriptor also lays the
groundwork for definining cross-channel-operation dependencies, and a
callback facility for asynchronous notification of operation completion.
Changelog:
* drop dma mapping methods, suggested by Chris Leech
* fix ioat_dma_dependency_added, also caught by Andrew Morton
* fix dma_sync_wait, change from Andrew Morton
* uninline large functions, change from Andrew Morton
* add tx->callback = NULL to dmaengine calls to interoperate with async_tx
calls
* hookup ioat_tx_submit
* convert channel capabilities to a 'cpumask_t like' bitmap
* removed DMA_TX_ARRAY_INIT, no longer needed
* checkpatch.pl fixes
* make set_src, set_dest, and tx_submit descriptor specific methods
* fixup git-ioat merge
* move group_list and phys to dma_async_tx_descriptor
Cc: Jeff Garzik <jeff@garzik.org>
Cc: Chris Leech <christopher.leech@intel.com>
Signed-off-by: Shannon Nelson <shannon.nelson@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Acked-by: David S. Miller <davem@davemloft.net>
2007-01-02 18:10:43 +00:00
|
|
|
list_splice(&new_chain, &new->async_tx.tx_list);
|
2006-05-24 00:35:34 +00:00
|
|
|
|
dmaengine: refactor dmaengine around dma_async_tx_descriptor
The current dmaengine interface defines mutliple routines per operation,
i.e. dma_async_memcpy_buf_to_buf, dma_async_memcpy_buf_to_page etc. Adding
more operation types (xor, crc, etc) to this model would result in an
unmanageable number of method permutations.
Are we really going to add a set of hooks for each DMA engine
whizbang feature?
- Jeff Garzik
The descriptor creation process is refactored using the new common
dma_async_tx_descriptor structure. Instead of per driver
do_<operation>_<dest>_to_<src> methods, drivers integrate
dma_async_tx_descriptor into their private software descriptor and then
define a 'prep' routine per operation. The prep routine allocates a
descriptor and ensures that the tx_set_src, tx_set_dest, tx_submit routines
are valid. Descriptor creation and submission becomes:
struct dma_device *dev;
struct dma_chan *chan;
struct dma_async_tx_descriptor *tx;
tx = dev->device_prep_dma_<operation>(chan, len, int_flag)
tx->tx_set_src(dma_addr_t, tx, index /* for multi-source ops */)
tx->tx_set_dest(dma_addr_t, tx, index)
tx->tx_submit(tx)
In addition to the refactoring, dma_async_tx_descriptor also lays the
groundwork for definining cross-channel-operation dependencies, and a
callback facility for asynchronous notification of operation completion.
Changelog:
* drop dma mapping methods, suggested by Chris Leech
* fix ioat_dma_dependency_added, also caught by Andrew Morton
* fix dma_sync_wait, change from Andrew Morton
* uninline large functions, change from Andrew Morton
* add tx->callback = NULL to dmaengine calls to interoperate with async_tx
calls
* hookup ioat_tx_submit
* convert channel capabilities to a 'cpumask_t like' bitmap
* removed DMA_TX_ARRAY_INIT, no longer needed
* checkpatch.pl fixes
* make set_src, set_dest, and tx_submit descriptor specific methods
* fixup git-ioat merge
* move group_list and phys to dma_async_tx_descriptor
Cc: Jeff Garzik <jeff@garzik.org>
Cc: Chris Leech <christopher.leech@intel.com>
Signed-off-by: Shannon Nelson <shannon.nelson@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Acked-by: David S. Miller <davem@davemloft.net>
2007-01-02 18:10:43 +00:00
|
|
|
new->hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
|
|
|
|
new->hw->next = 0;
|
|
|
|
new->tx_cnt = desc_count;
|
|
|
|
new->async_tx.ack = 0; /* client is in control of this ack */
|
|
|
|
new->async_tx.cookie = -EBUSY;
|
2006-05-24 00:35:34 +00:00
|
|
|
|
2007-08-15 00:36:31 +00:00
|
|
|
pci_unmap_len_set(new, len, orig_len);
|
2006-05-24 00:35:34 +00:00
|
|
|
spin_unlock_bh(&ioat_chan->desc_lock);
|
|
|
|
|
dmaengine: refactor dmaengine around dma_async_tx_descriptor
The current dmaengine interface defines mutliple routines per operation,
i.e. dma_async_memcpy_buf_to_buf, dma_async_memcpy_buf_to_page etc. Adding
more operation types (xor, crc, etc) to this model would result in an
unmanageable number of method permutations.
Are we really going to add a set of hooks for each DMA engine
whizbang feature?
- Jeff Garzik
The descriptor creation process is refactored using the new common
dma_async_tx_descriptor structure. Instead of per driver
do_<operation>_<dest>_to_<src> methods, drivers integrate
dma_async_tx_descriptor into their private software descriptor and then
define a 'prep' routine per operation. The prep routine allocates a
descriptor and ensures that the tx_set_src, tx_set_dest, tx_submit routines
are valid. Descriptor creation and submission becomes:
struct dma_device *dev;
struct dma_chan *chan;
struct dma_async_tx_descriptor *tx;
tx = dev->device_prep_dma_<operation>(chan, len, int_flag)
tx->tx_set_src(dma_addr_t, tx, index /* for multi-source ops */)
tx->tx_set_dest(dma_addr_t, tx, index)
tx->tx_submit(tx)
In addition to the refactoring, dma_async_tx_descriptor also lays the
groundwork for definining cross-channel-operation dependencies, and a
callback facility for asynchronous notification of operation completion.
Changelog:
* drop dma mapping methods, suggested by Chris Leech
* fix ioat_dma_dependency_added, also caught by Andrew Morton
* fix dma_sync_wait, change from Andrew Morton
* uninline large functions, change from Andrew Morton
* add tx->callback = NULL to dmaengine calls to interoperate with async_tx
calls
* hookup ioat_tx_submit
* convert channel capabilities to a 'cpumask_t like' bitmap
* removed DMA_TX_ARRAY_INIT, no longer needed
* checkpatch.pl fixes
* make set_src, set_dest, and tx_submit descriptor specific methods
* fixup git-ioat merge
* move group_list and phys to dma_async_tx_descriptor
Cc: Jeff Garzik <jeff@garzik.org>
Cc: Chris Leech <christopher.leech@intel.com>
Signed-off-by: Shannon Nelson <shannon.nelson@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Acked-by: David S. Miller <davem@davemloft.net>
2007-01-02 18:10:43 +00:00
|
|
|
return new ? &new->async_tx : NULL;
|
2006-05-24 00:35:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2007-10-16 08:27:39 +00:00
|
|
|
* ioat_dma_memcpy_issue_pending - push potentially unrecognized appended
|
|
|
|
* descriptors to hw
|
2006-05-24 00:35:34 +00:00
|
|
|
* @chan: DMA channel handle
|
|
|
|
*/
|
|
|
|
static void ioat_dma_memcpy_issue_pending(struct dma_chan *chan)
|
|
|
|
{
|
|
|
|
struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
|
|
|
|
|
|
|
|
if (ioat_chan->pending != 0) {
|
|
|
|
ioat_chan->pending = 0;
|
2007-03-08 17:57:35 +00:00
|
|
|
writeb(IOAT_CHANCMD_APPEND,
|
|
|
|
ioat_chan->reg_base + IOAT_CHANCMD_OFFSET);
|
2006-05-24 00:35:34 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-10-16 08:27:39 +00:00
|
|
|
static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
|
2006-05-24 00:35:34 +00:00
|
|
|
{
|
|
|
|
unsigned long phys_complete;
|
|
|
|
struct ioat_desc_sw *desc, *_desc;
|
|
|
|
dma_cookie_t cookie = 0;
|
|
|
|
|
2007-10-16 08:27:39 +00:00
|
|
|
prefetch(ioat_chan->completion_virt);
|
2006-05-24 00:35:34 +00:00
|
|
|
|
2007-10-16 08:27:39 +00:00
|
|
|
if (!spin_trylock(&ioat_chan->cleanup_lock))
|
2006-05-24 00:35:34 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
/* The completion writeback can happen at any time,
|
|
|
|
so reads by the driver need to be atomic operations
|
|
|
|
The descriptor physical addresses are limited to 32-bits
|
|
|
|
when the CPU can only do a 32-bit mov */
|
|
|
|
|
|
|
|
#if (BITS_PER_LONG == 64)
|
|
|
|
phys_complete =
|
2007-10-16 08:27:39 +00:00
|
|
|
ioat_chan->completion_virt->full & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
|
2006-05-24 00:35:34 +00:00
|
|
|
#else
|
2007-10-16 08:27:39 +00:00
|
|
|
phys_complete = ioat_chan->completion_virt->low & IOAT_LOW_COMPLETION_MASK;
|
2006-05-24 00:35:34 +00:00
|
|
|
#endif
|
|
|
|
|
2007-10-16 08:27:39 +00:00
|
|
|
if ((ioat_chan->completion_virt->full & IOAT_CHANSTS_DMA_TRANSFER_STATUS) ==
|
|
|
|
IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) {
|
|
|
|
dev_err(&ioat_chan->device->pdev->dev,
|
|
|
|
"ioatdma: Channel halted, chanerr = %x\n",
|
|
|
|
readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET));
|
2006-05-24 00:35:34 +00:00
|
|
|
|
|
|
|
/* TODO do something to salvage the situation */
|
|
|
|
}
|
|
|
|
|
2007-10-16 08:27:39 +00:00
|
|
|
if (phys_complete == ioat_chan->last_completion) {
|
|
|
|
spin_unlock(&ioat_chan->cleanup_lock);
|
2006-05-24 00:35:34 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2007-10-16 08:27:39 +00:00
|
|
|
spin_lock_bh(&ioat_chan->desc_lock);
|
|
|
|
list_for_each_entry_safe(desc, _desc, &ioat_chan->used_desc, node) {
|
2006-05-24 00:35:34 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Incoming DMA requests may use multiple descriptors, due to
|
|
|
|
* exceeding xfercap, perhaps. If so, only the last one will
|
|
|
|
* have a cookie, and require unmapping.
|
|
|
|
*/
|
dmaengine: refactor dmaengine around dma_async_tx_descriptor
The current dmaengine interface defines mutliple routines per operation,
i.e. dma_async_memcpy_buf_to_buf, dma_async_memcpy_buf_to_page etc. Adding
more operation types (xor, crc, etc) to this model would result in an
unmanageable number of method permutations.
Are we really going to add a set of hooks for each DMA engine
whizbang feature?
- Jeff Garzik
The descriptor creation process is refactored using the new common
dma_async_tx_descriptor structure. Instead of per driver
do_<operation>_<dest>_to_<src> methods, drivers integrate
dma_async_tx_descriptor into their private software descriptor and then
define a 'prep' routine per operation. The prep routine allocates a
descriptor and ensures that the tx_set_src, tx_set_dest, tx_submit routines
are valid. Descriptor creation and submission becomes:
struct dma_device *dev;
struct dma_chan *chan;
struct dma_async_tx_descriptor *tx;
tx = dev->device_prep_dma_<operation>(chan, len, int_flag)
tx->tx_set_src(dma_addr_t, tx, index /* for multi-source ops */)
tx->tx_set_dest(dma_addr_t, tx, index)
tx->tx_submit(tx)
In addition to the refactoring, dma_async_tx_descriptor also lays the
groundwork for definining cross-channel-operation dependencies, and a
callback facility for asynchronous notification of operation completion.
Changelog:
* drop dma mapping methods, suggested by Chris Leech
* fix ioat_dma_dependency_added, also caught by Andrew Morton
* fix dma_sync_wait, change from Andrew Morton
* uninline large functions, change from Andrew Morton
* add tx->callback = NULL to dmaengine calls to interoperate with async_tx
calls
* hookup ioat_tx_submit
* convert channel capabilities to a 'cpumask_t like' bitmap
* removed DMA_TX_ARRAY_INIT, no longer needed
* checkpatch.pl fixes
* make set_src, set_dest, and tx_submit descriptor specific methods
* fixup git-ioat merge
* move group_list and phys to dma_async_tx_descriptor
Cc: Jeff Garzik <jeff@garzik.org>
Cc: Chris Leech <christopher.leech@intel.com>
Signed-off-by: Shannon Nelson <shannon.nelson@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Acked-by: David S. Miller <davem@davemloft.net>
2007-01-02 18:10:43 +00:00
|
|
|
if (desc->async_tx.cookie) {
|
|
|
|
cookie = desc->async_tx.cookie;
|
2006-05-24 00:35:34 +00:00
|
|
|
|
2007-10-16 08:27:39 +00:00
|
|
|
/*
|
|
|
|
* yes we are unmapping both _page and _single alloc'd
|
|
|
|
* regions with unmap_page. Is this *really* that bad?
|
|
|
|
*/
|
|
|
|
pci_unmap_page(ioat_chan->device->pdev,
|
2006-05-24 00:35:34 +00:00
|
|
|
pci_unmap_addr(desc, dst),
|
2007-08-15 00:36:31 +00:00
|
|
|
pci_unmap_len(desc, len),
|
2006-05-24 00:35:34 +00:00
|
|
|
PCI_DMA_FROMDEVICE);
|
2007-10-16 08:27:39 +00:00
|
|
|
pci_unmap_page(ioat_chan->device->pdev,
|
2006-05-24 00:35:34 +00:00
|
|
|
pci_unmap_addr(desc, src),
|
2007-08-15 00:36:31 +00:00
|
|
|
pci_unmap_len(desc, len),
|
2006-05-24 00:35:34 +00:00
|
|
|
PCI_DMA_TODEVICE);
|
|
|
|
}
|
|
|
|
|
dmaengine: refactor dmaengine around dma_async_tx_descriptor
The current dmaengine interface defines mutliple routines per operation,
i.e. dma_async_memcpy_buf_to_buf, dma_async_memcpy_buf_to_page etc. Adding
more operation types (xor, crc, etc) to this model would result in an
unmanageable number of method permutations.
Are we really going to add a set of hooks for each DMA engine
whizbang feature?
- Jeff Garzik
The descriptor creation process is refactored using the new common
dma_async_tx_descriptor structure. Instead of per driver
do_<operation>_<dest>_to_<src> methods, drivers integrate
dma_async_tx_descriptor into their private software descriptor and then
define a 'prep' routine per operation. The prep routine allocates a
descriptor and ensures that the tx_set_src, tx_set_dest, tx_submit routines
are valid. Descriptor creation and submission becomes:
struct dma_device *dev;
struct dma_chan *chan;
struct dma_async_tx_descriptor *tx;
tx = dev->device_prep_dma_<operation>(chan, len, int_flag)
tx->tx_set_src(dma_addr_t, tx, index /* for multi-source ops */)
tx->tx_set_dest(dma_addr_t, tx, index)
tx->tx_submit(tx)
In addition to the refactoring, dma_async_tx_descriptor also lays the
groundwork for definining cross-channel-operation dependencies, and a
callback facility for asynchronous notification of operation completion.
Changelog:
* drop dma mapping methods, suggested by Chris Leech
* fix ioat_dma_dependency_added, also caught by Andrew Morton
* fix dma_sync_wait, change from Andrew Morton
* uninline large functions, change from Andrew Morton
* add tx->callback = NULL to dmaengine calls to interoperate with async_tx
calls
* hookup ioat_tx_submit
* convert channel capabilities to a 'cpumask_t like' bitmap
* removed DMA_TX_ARRAY_INIT, no longer needed
* checkpatch.pl fixes
* make set_src, set_dest, and tx_submit descriptor specific methods
* fixup git-ioat merge
* move group_list and phys to dma_async_tx_descriptor
Cc: Jeff Garzik <jeff@garzik.org>
Cc: Chris Leech <christopher.leech@intel.com>
Signed-off-by: Shannon Nelson <shannon.nelson@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Acked-by: David S. Miller <davem@davemloft.net>
2007-01-02 18:10:43 +00:00
|
|
|
if (desc->async_tx.phys != phys_complete) {
|
2007-10-16 08:27:39 +00:00
|
|
|
/*
|
|
|
|
* a completed entry, but not the last, so cleanup
|
dmaengine: refactor dmaengine around dma_async_tx_descriptor
The current dmaengine interface defines mutliple routines per operation,
i.e. dma_async_memcpy_buf_to_buf, dma_async_memcpy_buf_to_page etc. Adding
more operation types (xor, crc, etc) to this model would result in an
unmanageable number of method permutations.
Are we really going to add a set of hooks for each DMA engine
whizbang feature?
- Jeff Garzik
The descriptor creation process is refactored using the new common
dma_async_tx_descriptor structure. Instead of per driver
do_<operation>_<dest>_to_<src> methods, drivers integrate
dma_async_tx_descriptor into their private software descriptor and then
define a 'prep' routine per operation. The prep routine allocates a
descriptor and ensures that the tx_set_src, tx_set_dest, tx_submit routines
are valid. Descriptor creation and submission becomes:
struct dma_device *dev;
struct dma_chan *chan;
struct dma_async_tx_descriptor *tx;
tx = dev->device_prep_dma_<operation>(chan, len, int_flag)
tx->tx_set_src(dma_addr_t, tx, index /* for multi-source ops */)
tx->tx_set_dest(dma_addr_t, tx, index)
tx->tx_submit(tx)
In addition to the refactoring, dma_async_tx_descriptor also lays the
groundwork for definining cross-channel-operation dependencies, and a
callback facility for asynchronous notification of operation completion.
Changelog:
* drop dma mapping methods, suggested by Chris Leech
* fix ioat_dma_dependency_added, also caught by Andrew Morton
* fix dma_sync_wait, change from Andrew Morton
* uninline large functions, change from Andrew Morton
* add tx->callback = NULL to dmaengine calls to interoperate with async_tx
calls
* hookup ioat_tx_submit
* convert channel capabilities to a 'cpumask_t like' bitmap
* removed DMA_TX_ARRAY_INIT, no longer needed
* checkpatch.pl fixes
* make set_src, set_dest, and tx_submit descriptor specific methods
* fixup git-ioat merge
* move group_list and phys to dma_async_tx_descriptor
Cc: Jeff Garzik <jeff@garzik.org>
Cc: Chris Leech <christopher.leech@intel.com>
Signed-off-by: Shannon Nelson <shannon.nelson@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Acked-by: David S. Miller <davem@davemloft.net>
2007-01-02 18:10:43 +00:00
|
|
|
* if the client is done with the descriptor
|
|
|
|
*/
|
|
|
|
if (desc->async_tx.ack) {
|
|
|
|
list_del(&desc->node);
|
2007-10-16 08:27:39 +00:00
|
|
|
list_add_tail(&desc->node,
|
|
|
|
&ioat_chan->free_desc);
|
dmaengine: refactor dmaengine around dma_async_tx_descriptor
The current dmaengine interface defines mutliple routines per operation,
i.e. dma_async_memcpy_buf_to_buf, dma_async_memcpy_buf_to_page etc. Adding
more operation types (xor, crc, etc) to this model would result in an
unmanageable number of method permutations.
Are we really going to add a set of hooks for each DMA engine
whizbang feature?
- Jeff Garzik
The descriptor creation process is refactored using the new common
dma_async_tx_descriptor structure. Instead of per driver
do_<operation>_<dest>_to_<src> methods, drivers integrate
dma_async_tx_descriptor into their private software descriptor and then
define a 'prep' routine per operation. The prep routine allocates a
descriptor and ensures that the tx_set_src, tx_set_dest, tx_submit routines
are valid. Descriptor creation and submission becomes:
struct dma_device *dev;
struct dma_chan *chan;
struct dma_async_tx_descriptor *tx;
tx = dev->device_prep_dma_<operation>(chan, len, int_flag)
tx->tx_set_src(dma_addr_t, tx, index /* for multi-source ops */)
tx->tx_set_dest(dma_addr_t, tx, index)
tx->tx_submit(tx)
In addition to the refactoring, dma_async_tx_descriptor also lays the
groundwork for definining cross-channel-operation dependencies, and a
callback facility for asynchronous notification of operation completion.
Changelog:
* drop dma mapping methods, suggested by Chris Leech
* fix ioat_dma_dependency_added, also caught by Andrew Morton
* fix dma_sync_wait, change from Andrew Morton
* uninline large functions, change from Andrew Morton
* add tx->callback = NULL to dmaengine calls to interoperate with async_tx
calls
* hookup ioat_tx_submit
* convert channel capabilities to a 'cpumask_t like' bitmap
* removed DMA_TX_ARRAY_INIT, no longer needed
* checkpatch.pl fixes
* make set_src, set_dest, and tx_submit descriptor specific methods
* fixup git-ioat merge
* move group_list and phys to dma_async_tx_descriptor
Cc: Jeff Garzik <jeff@garzik.org>
Cc: Chris Leech <christopher.leech@intel.com>
Signed-off-by: Shannon Nelson <shannon.nelson@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Acked-by: David S. Miller <davem@davemloft.net>
2007-01-02 18:10:43 +00:00
|
|
|
} else
|
|
|
|
desc->async_tx.cookie = 0;
|
2006-05-24 00:35:34 +00:00
|
|
|
} else {
|
2007-10-16 08:27:39 +00:00
|
|
|
/*
|
|
|
|
* last used desc. Do not remove, so we can append from
|
|
|
|
* it, but don't look at it next time, either
|
|
|
|
*/
|
dmaengine: refactor dmaengine around dma_async_tx_descriptor
The current dmaengine interface defines mutliple routines per operation,
i.e. dma_async_memcpy_buf_to_buf, dma_async_memcpy_buf_to_page etc. Adding
more operation types (xor, crc, etc) to this model would result in an
unmanageable number of method permutations.
Are we really going to add a set of hooks for each DMA engine
whizbang feature?
- Jeff Garzik
The descriptor creation process is refactored using the new common
dma_async_tx_descriptor structure. Instead of per driver
do_<operation>_<dest>_to_<src> methods, drivers integrate
dma_async_tx_descriptor into their private software descriptor and then
define a 'prep' routine per operation. The prep routine allocates a
descriptor and ensures that the tx_set_src, tx_set_dest, tx_submit routines
are valid. Descriptor creation and submission becomes:
struct dma_device *dev;
struct dma_chan *chan;
struct dma_async_tx_descriptor *tx;
tx = dev->device_prep_dma_<operation>(chan, len, int_flag)
tx->tx_set_src(dma_addr_t, tx, index /* for multi-source ops */)
tx->tx_set_dest(dma_addr_t, tx, index)
tx->tx_submit(tx)
In addition to the refactoring, dma_async_tx_descriptor also lays the
groundwork for definining cross-channel-operation dependencies, and a
callback facility for asynchronous notification of operation completion.
Changelog:
* drop dma mapping methods, suggested by Chris Leech
* fix ioat_dma_dependency_added, also caught by Andrew Morton
* fix dma_sync_wait, change from Andrew Morton
* uninline large functions, change from Andrew Morton
* add tx->callback = NULL to dmaengine calls to interoperate with async_tx
calls
* hookup ioat_tx_submit
* convert channel capabilities to a 'cpumask_t like' bitmap
* removed DMA_TX_ARRAY_INIT, no longer needed
* checkpatch.pl fixes
* make set_src, set_dest, and tx_submit descriptor specific methods
* fixup git-ioat merge
* move group_list and phys to dma_async_tx_descriptor
Cc: Jeff Garzik <jeff@garzik.org>
Cc: Chris Leech <christopher.leech@intel.com>
Signed-off-by: Shannon Nelson <shannon.nelson@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Acked-by: David S. Miller <davem@davemloft.net>
2007-01-02 18:10:43 +00:00
|
|
|
desc->async_tx.cookie = 0;
|
2006-05-24 00:35:34 +00:00
|
|
|
|
|
|
|
/* TODO check status bits? */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-10-16 08:27:39 +00:00
|
|
|
spin_unlock_bh(&ioat_chan->desc_lock);
|
2006-05-24 00:35:34 +00:00
|
|
|
|
2007-10-16 08:27:39 +00:00
|
|
|
ioat_chan->last_completion = phys_complete;
|
2006-05-24 00:35:34 +00:00
|
|
|
if (cookie != 0)
|
2007-10-16 08:27:39 +00:00
|
|
|
ioat_chan->completed_cookie = cookie;
|
2006-05-24 00:35:34 +00:00
|
|
|
|
2007-10-16 08:27:39 +00:00
|
|
|
spin_unlock(&ioat_chan->cleanup_lock);
|
2006-05-24 00:35:34 +00:00
|
|
|
}
|
|
|
|
|
dmaengine: refactor dmaengine around dma_async_tx_descriptor
The current dmaengine interface defines mutliple routines per operation,
i.e. dma_async_memcpy_buf_to_buf, dma_async_memcpy_buf_to_page etc. Adding
more operation types (xor, crc, etc) to this model would result in an
unmanageable number of method permutations.
Are we really going to add a set of hooks for each DMA engine
whizbang feature?
- Jeff Garzik
The descriptor creation process is refactored using the new common
dma_async_tx_descriptor structure. Instead of per driver
do_<operation>_<dest>_to_<src> methods, drivers integrate
dma_async_tx_descriptor into their private software descriptor and then
define a 'prep' routine per operation. The prep routine allocates a
descriptor and ensures that the tx_set_src, tx_set_dest, tx_submit routines
are valid. Descriptor creation and submission becomes:
struct dma_device *dev;
struct dma_chan *chan;
struct dma_async_tx_descriptor *tx;
tx = dev->device_prep_dma_<operation>(chan, len, int_flag)
tx->tx_set_src(dma_addr_t, tx, index /* for multi-source ops */)
tx->tx_set_dest(dma_addr_t, tx, index)
tx->tx_submit(tx)
In addition to the refactoring, dma_async_tx_descriptor also lays the
groundwork for definining cross-channel-operation dependencies, and a
callback facility for asynchronous notification of operation completion.
Changelog:
* drop dma mapping methods, suggested by Chris Leech
* fix ioat_dma_dependency_added, also caught by Andrew Morton
* fix dma_sync_wait, change from Andrew Morton
* uninline large functions, change from Andrew Morton
* add tx->callback = NULL to dmaengine calls to interoperate with async_tx
calls
* hookup ioat_tx_submit
* convert channel capabilities to a 'cpumask_t like' bitmap
* removed DMA_TX_ARRAY_INIT, no longer needed
* checkpatch.pl fixes
* make set_src, set_dest, and tx_submit descriptor specific methods
* fixup git-ioat merge
* move group_list and phys to dma_async_tx_descriptor
Cc: Jeff Garzik <jeff@garzik.org>
Cc: Chris Leech <christopher.leech@intel.com>
Signed-off-by: Shannon Nelson <shannon.nelson@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Acked-by: David S. Miller <davem@davemloft.net>
2007-01-02 18:10:43 +00:00
|
|
|
static void ioat_dma_dependency_added(struct dma_chan *chan)
|
|
|
|
{
|
|
|
|
struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
|
|
|
|
spin_lock_bh(&ioat_chan->desc_lock);
|
|
|
|
if (ioat_chan->pending == 0) {
|
|
|
|
spin_unlock_bh(&ioat_chan->desc_lock);
|
|
|
|
ioat_dma_memcpy_cleanup(ioat_chan);
|
|
|
|
} else
|
|
|
|
spin_unlock_bh(&ioat_chan->desc_lock);
|
|
|
|
}
|
|
|
|
|
2006-05-24 00:35:34 +00:00
|
|
|
/**
|
|
|
|
* ioat_dma_is_complete - poll the status of a IOAT DMA transaction
|
|
|
|
* @chan: IOAT DMA channel handle
|
|
|
|
* @cookie: DMA transaction identifier
|
2006-07-04 02:45:31 +00:00
|
|
|
* @done: if not %NULL, updated with last completed transaction
|
|
|
|
* @used: if not %NULL, updated with last used transaction
|
2006-05-24 00:35:34 +00:00
|
|
|
*/
|
|
|
|
static enum dma_status ioat_dma_is_complete(struct dma_chan *chan,
|
2007-10-16 08:27:39 +00:00
|
|
|
dma_cookie_t cookie,
|
|
|
|
dma_cookie_t *done,
|
|
|
|
dma_cookie_t *used)
|
2006-05-24 00:35:34 +00:00
|
|
|
{
|
|
|
|
struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
|
|
|
|
dma_cookie_t last_used;
|
|
|
|
dma_cookie_t last_complete;
|
|
|
|
enum dma_status ret;
|
|
|
|
|
|
|
|
last_used = chan->cookie;
|
|
|
|
last_complete = ioat_chan->completed_cookie;
|
|
|
|
|
|
|
|
if (done)
|
2007-10-16 08:27:39 +00:00
|
|
|
*done = last_complete;
|
2006-05-24 00:35:34 +00:00
|
|
|
if (used)
|
|
|
|
*used = last_used;
|
|
|
|
|
|
|
|
ret = dma_async_is_complete(cookie, last_complete, last_used);
|
|
|
|
if (ret == DMA_SUCCESS)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
ioat_dma_memcpy_cleanup(ioat_chan);
|
|
|
|
|
|
|
|
last_used = chan->cookie;
|
|
|
|
last_complete = ioat_chan->completed_cookie;
|
|
|
|
|
|
|
|
if (done)
|
2007-10-16 08:27:39 +00:00
|
|
|
*done = last_complete;
|
2006-05-24 00:35:34 +00:00
|
|
|
if (used)
|
|
|
|
*used = last_used;
|
|
|
|
|
|
|
|
return dma_async_is_complete(cookie, last_complete, last_used);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* PCI API */
|
|
|
|
|
IRQ: Maintain regs pointer globally rather than passing to IRQ handlers
Maintain a per-CPU global "struct pt_regs *" variable which can be used instead
of passing regs around manually through all ~1800 interrupt handlers in the
Linux kernel.
The regs pointer is used in few places, but it potentially costs both stack
space and code to pass it around. On the FRV arch, removing the regs parameter
from all the genirq function results in a 20% speed up of the IRQ exit path
(ie: from leaving timer_interrupt() to leaving do_IRQ()).
Where appropriate, an arch may override the generic storage facility and do
something different with the variable. On FRV, for instance, the address is
maintained in GR28 at all times inside the kernel as part of general exception
handling.
Having looked over the code, it appears that the parameter may be handed down
through up to twenty or so layers of functions. Consider a USB character
device attached to a USB hub, attached to a USB controller that posts its
interrupts through a cascaded auxiliary interrupt controller. A character
device driver may want to pass regs to the sysrq handler through the input
layer which adds another few layers of parameter passing.
I've build this code with allyesconfig for x86_64 and i386. I've runtested the
main part of the code on FRV and i386, though I can't test most of the drivers.
I've also done partial conversion for powerpc and MIPS - these at least compile
with minimal configurations.
This will affect all archs. Mostly the changes should be relatively easy.
Take do_IRQ(), store the regs pointer at the beginning, saving the old one:
struct pt_regs *old_regs = set_irq_regs(regs);
And put the old one back at the end:
set_irq_regs(old_regs);
Don't pass regs through to generic_handle_irq() or __do_IRQ().
In timer_interrupt(), this sort of change will be necessary:
- update_process_times(user_mode(regs));
- profile_tick(CPU_PROFILING, regs);
+ update_process_times(user_mode(get_irq_regs()));
+ profile_tick(CPU_PROFILING);
I'd like to move update_process_times()'s use of get_irq_regs() into itself,
except that i386, alone of the archs, uses something other than user_mode().
Some notes on the interrupt handling in the drivers:
(*) input_dev() is now gone entirely. The regs pointer is no longer stored in
the input_dev struct.
(*) finish_unlinks() in drivers/usb/host/ohci-q.c needs checking. It does
something different depending on whether it's been supplied with a regs
pointer or not.
(*) Various IRQ handler function pointers have been moved to type
irq_handler_t.
Signed-Off-By: David Howells <dhowells@redhat.com>
(cherry picked from 1b16e7ac850969f38b375e511e3fa2f474a33867 commit)
2006-10-05 13:55:46 +00:00
|
|
|
static irqreturn_t ioat_do_interrupt(int irq, void *data)
|
2006-05-24 00:35:34 +00:00
|
|
|
{
|
2007-10-16 08:27:39 +00:00
|
|
|
struct ioatdma_device *instance = data;
|
2006-05-24 00:35:34 +00:00
|
|
|
unsigned long attnstatus;
|
|
|
|
u8 intrctrl;
|
|
|
|
|
2007-03-08 17:57:35 +00:00
|
|
|
intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
|
2006-05-24 00:35:34 +00:00
|
|
|
|
|
|
|
if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
|
|
|
|
return IRQ_NONE;
|
|
|
|
|
|
|
|
if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
|
2007-03-08 17:57:35 +00:00
|
|
|
writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
|
2006-05-24 00:35:34 +00:00
|
|
|
return IRQ_NONE;
|
|
|
|
}
|
|
|
|
|
2007-03-08 17:57:35 +00:00
|
|
|
attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
|
2006-05-24 00:35:34 +00:00
|
|
|
|
2007-10-16 08:27:39 +00:00
|
|
|
printk(KERN_ERR "ioatdma: interrupt! status %lx\n", attnstatus);
|
2006-05-24 00:35:34 +00:00
|
|
|
|
2007-03-08 17:57:35 +00:00
|
|
|
writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
|
2006-05-24 00:35:34 +00:00
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
2007-10-16 08:27:39 +00:00
|
|
|
static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan)
|
2006-05-24 00:35:34 +00:00
|
|
|
{
|
|
|
|
struct ioat_desc_sw *desc;
|
|
|
|
|
|
|
|
spin_lock_bh(&ioat_chan->desc_lock);
|
|
|
|
|
|
|
|
if (!list_empty(&ioat_chan->free_desc)) {
|
|
|
|
desc = to_ioat_desc(ioat_chan->free_desc.next);
|
|
|
|
list_del(&desc->node);
|
|
|
|
} else {
|
|
|
|
/* try to get another desc */
|
|
|
|
spin_unlock_bh(&ioat_chan->desc_lock);
|
|
|
|
desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL);
|
|
|
|
spin_lock_bh(&ioat_chan->desc_lock);
|
|
|
|
/* will this ever happen? */
|
|
|
|
BUG_ON(!desc);
|
|
|
|
}
|
|
|
|
|
|
|
|
desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL;
|
|
|
|
desc->hw->next = 0;
|
dmaengine: refactor dmaengine around dma_async_tx_descriptor
The current dmaengine interface defines mutliple routines per operation,
i.e. dma_async_memcpy_buf_to_buf, dma_async_memcpy_buf_to_page etc. Adding
more operation types (xor, crc, etc) to this model would result in an
unmanageable number of method permutations.
Are we really going to add a set of hooks for each DMA engine
whizbang feature?
- Jeff Garzik
The descriptor creation process is refactored using the new common
dma_async_tx_descriptor structure. Instead of per driver
do_<operation>_<dest>_to_<src> methods, drivers integrate
dma_async_tx_descriptor into their private software descriptor and then
define a 'prep' routine per operation. The prep routine allocates a
descriptor and ensures that the tx_set_src, tx_set_dest, tx_submit routines
are valid. Descriptor creation and submission becomes:
struct dma_device *dev;
struct dma_chan *chan;
struct dma_async_tx_descriptor *tx;
tx = dev->device_prep_dma_<operation>(chan, len, int_flag)
tx->tx_set_src(dma_addr_t, tx, index /* for multi-source ops */)
tx->tx_set_dest(dma_addr_t, tx, index)
tx->tx_submit(tx)
In addition to the refactoring, dma_async_tx_descriptor also lays the
groundwork for definining cross-channel-operation dependencies, and a
callback facility for asynchronous notification of operation completion.
Changelog:
* drop dma mapping methods, suggested by Chris Leech
* fix ioat_dma_dependency_added, also caught by Andrew Morton
* fix dma_sync_wait, change from Andrew Morton
* uninline large functions, change from Andrew Morton
* add tx->callback = NULL to dmaengine calls to interoperate with async_tx
calls
* hookup ioat_tx_submit
* convert channel capabilities to a 'cpumask_t like' bitmap
* removed DMA_TX_ARRAY_INIT, no longer needed
* checkpatch.pl fixes
* make set_src, set_dest, and tx_submit descriptor specific methods
* fixup git-ioat merge
* move group_list and phys to dma_async_tx_descriptor
Cc: Jeff Garzik <jeff@garzik.org>
Cc: Chris Leech <christopher.leech@intel.com>
Signed-off-by: Shannon Nelson <shannon.nelson@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Acked-by: David S. Miller <davem@davemloft.net>
2007-01-02 18:10:43 +00:00
|
|
|
desc->async_tx.ack = 1;
|
2006-05-24 00:35:34 +00:00
|
|
|
|
|
|
|
list_add_tail(&desc->node, &ioat_chan->used_desc);
|
|
|
|
spin_unlock_bh(&ioat_chan->desc_lock);
|
|
|
|
|
dmaengine: refactor dmaengine around dma_async_tx_descriptor
The current dmaengine interface defines mutliple routines per operation,
i.e. dma_async_memcpy_buf_to_buf, dma_async_memcpy_buf_to_page etc. Adding
more operation types (xor, crc, etc) to this model would result in an
unmanageable number of method permutations.
Are we really going to add a set of hooks for each DMA engine
whizbang feature?
- Jeff Garzik
The descriptor creation process is refactored using the new common
dma_async_tx_descriptor structure. Instead of per driver
do_<operation>_<dest>_to_<src> methods, drivers integrate
dma_async_tx_descriptor into their private software descriptor and then
define a 'prep' routine per operation. The prep routine allocates a
descriptor and ensures that the tx_set_src, tx_set_dest, tx_submit routines
are valid. Descriptor creation and submission becomes:
struct dma_device *dev;
struct dma_chan *chan;
struct dma_async_tx_descriptor *tx;
tx = dev->device_prep_dma_<operation>(chan, len, int_flag)
tx->tx_set_src(dma_addr_t, tx, index /* for multi-source ops */)
tx->tx_set_dest(dma_addr_t, tx, index)
tx->tx_submit(tx)
In addition to the refactoring, dma_async_tx_descriptor also lays the
groundwork for definining cross-channel-operation dependencies, and a
callback facility for asynchronous notification of operation completion.
Changelog:
* drop dma mapping methods, suggested by Chris Leech
* fix ioat_dma_dependency_added, also caught by Andrew Morton
* fix dma_sync_wait, change from Andrew Morton
* uninline large functions, change from Andrew Morton
* add tx->callback = NULL to dmaengine calls to interoperate with async_tx
calls
* hookup ioat_tx_submit
* convert channel capabilities to a 'cpumask_t like' bitmap
* removed DMA_TX_ARRAY_INIT, no longer needed
* checkpatch.pl fixes
* make set_src, set_dest, and tx_submit descriptor specific methods
* fixup git-ioat merge
* move group_list and phys to dma_async_tx_descriptor
Cc: Jeff Garzik <jeff@garzik.org>
Cc: Chris Leech <christopher.leech@intel.com>
Signed-off-by: Shannon Nelson <shannon.nelson@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Acked-by: David S. Miller <davem@davemloft.net>
2007-01-02 18:10:43 +00:00
|
|
|
writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
|
2007-03-08 17:57:35 +00:00
|
|
|
ioat_chan->reg_base + IOAT_CHAINADDR_OFFSET_LOW);
|
dmaengine: refactor dmaengine around dma_async_tx_descriptor
The current dmaengine interface defines mutliple routines per operation,
i.e. dma_async_memcpy_buf_to_buf, dma_async_memcpy_buf_to_page etc. Adding
more operation types (xor, crc, etc) to this model would result in an
unmanageable number of method permutations.
Are we really going to add a set of hooks for each DMA engine
whizbang feature?
- Jeff Garzik
The descriptor creation process is refactored using the new common
dma_async_tx_descriptor structure. Instead of per driver
do_<operation>_<dest>_to_<src> methods, drivers integrate
dma_async_tx_descriptor into their private software descriptor and then
define a 'prep' routine per operation. The prep routine allocates a
descriptor and ensures that the tx_set_src, tx_set_dest, tx_submit routines
are valid. Descriptor creation and submission becomes:
struct dma_device *dev;
struct dma_chan *chan;
struct dma_async_tx_descriptor *tx;
tx = dev->device_prep_dma_<operation>(chan, len, int_flag)
tx->tx_set_src(dma_addr_t, tx, index /* for multi-source ops */)
tx->tx_set_dest(dma_addr_t, tx, index)
tx->tx_submit(tx)
In addition to the refactoring, dma_async_tx_descriptor also lays the
groundwork for definining cross-channel-operation dependencies, and a
callback facility for asynchronous notification of operation completion.
Changelog:
* drop dma mapping methods, suggested by Chris Leech
* fix ioat_dma_dependency_added, also caught by Andrew Morton
* fix dma_sync_wait, change from Andrew Morton
* uninline large functions, change from Andrew Morton
* add tx->callback = NULL to dmaengine calls to interoperate with async_tx
calls
* hookup ioat_tx_submit
* convert channel capabilities to a 'cpumask_t like' bitmap
* removed DMA_TX_ARRAY_INIT, no longer needed
* checkpatch.pl fixes
* make set_src, set_dest, and tx_submit descriptor specific methods
* fixup git-ioat merge
* move group_list and phys to dma_async_tx_descriptor
Cc: Jeff Garzik <jeff@garzik.org>
Cc: Chris Leech <christopher.leech@intel.com>
Signed-off-by: Shannon Nelson <shannon.nelson@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Acked-by: David S. Miller <davem@davemloft.net>
2007-01-02 18:10:43 +00:00
|
|
|
writel(((u64) desc->async_tx.phys) >> 32,
|
2007-03-08 17:57:35 +00:00
|
|
|
ioat_chan->reg_base + IOAT_CHAINADDR_OFFSET_HIGH);
|
|
|
|
|
2007-03-08 17:57:35 +00:00
|
|
|
writeb(IOAT_CHANCMD_START, ioat_chan->reg_base + IOAT_CHANCMD_OFFSET);
|
2006-05-24 00:35:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Perform a IOAT transaction to verify the HW works.
|
|
|
|
*/
|
|
|
|
#define IOAT_TEST_SIZE 2000
|
|
|
|
|
2007-10-16 08:27:39 +00:00
|
|
|
static int ioat_self_test(struct ioatdma_device *device)
|
2006-05-24 00:35:34 +00:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
u8 *src;
|
|
|
|
u8 *dest;
|
|
|
|
struct dma_chan *dma_chan;
|
dmaengine: refactor dmaengine around dma_async_tx_descriptor
The current dmaengine interface defines mutliple routines per operation,
i.e. dma_async_memcpy_buf_to_buf, dma_async_memcpy_buf_to_page etc. Adding
more operation types (xor, crc, etc) to this model would result in an
unmanageable number of method permutations.
Are we really going to add a set of hooks for each DMA engine
whizbang feature?
- Jeff Garzik
The descriptor creation process is refactored using the new common
dma_async_tx_descriptor structure. Instead of per driver
do_<operation>_<dest>_to_<src> methods, drivers integrate
dma_async_tx_descriptor into their private software descriptor and then
define a 'prep' routine per operation. The prep routine allocates a
descriptor and ensures that the tx_set_src, tx_set_dest, tx_submit routines
are valid. Descriptor creation and submission becomes:
struct dma_device *dev;
struct dma_chan *chan;
struct dma_async_tx_descriptor *tx;
tx = dev->device_prep_dma_<operation>(chan, len, int_flag)
tx->tx_set_src(dma_addr_t, tx, index /* for multi-source ops */)
tx->tx_set_dest(dma_addr_t, tx, index)
tx->tx_submit(tx)
In addition to the refactoring, dma_async_tx_descriptor also lays the
groundwork for definining cross-channel-operation dependencies, and a
callback facility for asynchronous notification of operation completion.
Changelog:
* drop dma mapping methods, suggested by Chris Leech
* fix ioat_dma_dependency_added, also caught by Andrew Morton
* fix dma_sync_wait, change from Andrew Morton
* uninline large functions, change from Andrew Morton
* add tx->callback = NULL to dmaengine calls to interoperate with async_tx
calls
* hookup ioat_tx_submit
* convert channel capabilities to a 'cpumask_t like' bitmap
* removed DMA_TX_ARRAY_INIT, no longer needed
* checkpatch.pl fixes
* make set_src, set_dest, and tx_submit descriptor specific methods
* fixup git-ioat merge
* move group_list and phys to dma_async_tx_descriptor
Cc: Jeff Garzik <jeff@garzik.org>
Cc: Chris Leech <christopher.leech@intel.com>
Signed-off-by: Shannon Nelson <shannon.nelson@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Acked-by: David S. Miller <davem@davemloft.net>
2007-01-02 18:10:43 +00:00
|
|
|
struct dma_async_tx_descriptor *tx;
|
|
|
|
dma_addr_t addr;
|
2006-05-24 00:35:34 +00:00
|
|
|
dma_cookie_t cookie;
|
|
|
|
int err = 0;
|
|
|
|
|
2006-12-07 04:33:17 +00:00
|
|
|
src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
|
2006-05-24 00:35:34 +00:00
|
|
|
if (!src)
|
|
|
|
return -ENOMEM;
|
2006-12-07 04:33:17 +00:00
|
|
|
dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
|
2006-05-24 00:35:34 +00:00
|
|
|
if (!dest) {
|
|
|
|
kfree(src);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Fill in src buffer */
|
|
|
|
for (i = 0; i < IOAT_TEST_SIZE; i++)
|
|
|
|
src[i] = (u8)i;
|
|
|
|
|
|
|
|
/* Start copy, using first DMA channel */
|
|
|
|
dma_chan = container_of(device->common.channels.next,
|
2007-10-16 08:27:39 +00:00
|
|
|
struct dma_chan,
|
|
|
|
device_node);
|
2006-05-24 00:35:34 +00:00
|
|
|
if (ioat_dma_alloc_chan_resources(dma_chan) < 1) {
|
2007-10-16 08:27:39 +00:00
|
|
|
dev_err(&device->pdev->dev,
|
|
|
|
"selftest cannot allocate chan resource\n");
|
2006-05-24 00:35:34 +00:00
|
|
|
err = -ENODEV;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
dmaengine: refactor dmaengine around dma_async_tx_descriptor
The current dmaengine interface defines mutliple routines per operation,
i.e. dma_async_memcpy_buf_to_buf, dma_async_memcpy_buf_to_page etc. Adding
more operation types (xor, crc, etc) to this model would result in an
unmanageable number of method permutations.
Are we really going to add a set of hooks for each DMA engine
whizbang feature?
- Jeff Garzik
The descriptor creation process is refactored using the new common
dma_async_tx_descriptor structure. Instead of per driver
do_<operation>_<dest>_to_<src> methods, drivers integrate
dma_async_tx_descriptor into their private software descriptor and then
define a 'prep' routine per operation. The prep routine allocates a
descriptor and ensures that the tx_set_src, tx_set_dest, tx_submit routines
are valid. Descriptor creation and submission becomes:
struct dma_device *dev;
struct dma_chan *chan;
struct dma_async_tx_descriptor *tx;
tx = dev->device_prep_dma_<operation>(chan, len, int_flag)
tx->tx_set_src(dma_addr_t, tx, index /* for multi-source ops */)
tx->tx_set_dest(dma_addr_t, tx, index)
tx->tx_submit(tx)
In addition to the refactoring, dma_async_tx_descriptor also lays the
groundwork for definining cross-channel-operation dependencies, and a
callback facility for asynchronous notification of operation completion.
Changelog:
* drop dma mapping methods, suggested by Chris Leech
* fix ioat_dma_dependency_added, also caught by Andrew Morton
* fix dma_sync_wait, change from Andrew Morton
* uninline large functions, change from Andrew Morton
* add tx->callback = NULL to dmaengine calls to interoperate with async_tx
calls
* hookup ioat_tx_submit
* convert channel capabilities to a 'cpumask_t like' bitmap
* removed DMA_TX_ARRAY_INIT, no longer needed
* checkpatch.pl fixes
* make set_src, set_dest, and tx_submit descriptor specific methods
* fixup git-ioat merge
* move group_list and phys to dma_async_tx_descriptor
Cc: Jeff Garzik <jeff@garzik.org>
Cc: Chris Leech <christopher.leech@intel.com>
Signed-off-by: Shannon Nelson <shannon.nelson@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Acked-by: David S. Miller <davem@davemloft.net>
2007-01-02 18:10:43 +00:00
|
|
|
tx = ioat_dma_prep_memcpy(dma_chan, IOAT_TEST_SIZE, 0);
|
|
|
|
async_tx_ack(tx);
|
|
|
|
addr = dma_map_single(dma_chan->device->dev, src, IOAT_TEST_SIZE,
|
|
|
|
DMA_TO_DEVICE);
|
|
|
|
ioat_set_src(addr, tx, 0);
|
|
|
|
addr = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE,
|
|
|
|
DMA_FROM_DEVICE);
|
|
|
|
ioat_set_dest(addr, tx, 0);
|
|
|
|
cookie = ioat_tx_submit(tx);
|
2006-05-24 00:35:34 +00:00
|
|
|
ioat_dma_memcpy_issue_pending(dma_chan);
|
|
|
|
msleep(1);
|
|
|
|
|
|
|
|
if (ioat_dma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
|
2007-10-16 08:27:39 +00:00
|
|
|
dev_err(&device->pdev->dev,
|
|
|
|
"ioatdma: Self-test copy timed out, disabling\n");
|
2006-05-24 00:35:34 +00:00
|
|
|
err = -ENODEV;
|
|
|
|
goto free_resources;
|
|
|
|
}
|
|
|
|
if (memcmp(src, dest, IOAT_TEST_SIZE)) {
|
2007-10-16 08:27:39 +00:00
|
|
|
dev_err(&device->pdev->dev,
|
|
|
|
"ioatdma: Self-test copy failed compare, disabling\n");
|
2006-05-24 00:35:34 +00:00
|
|
|
err = -ENODEV;
|
|
|
|
goto free_resources;
|
|
|
|
}
|
|
|
|
|
|
|
|
free_resources:
|
|
|
|
ioat_dma_free_chan_resources(dma_chan);
|
|
|
|
out:
|
|
|
|
kfree(src);
|
|
|
|
kfree(dest);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2007-10-16 08:27:39 +00:00
|
|
|
struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev,
|
|
|
|
void __iomem *iobase)
|
2006-05-24 00:35:34 +00:00
|
|
|
{
|
|
|
|
int err;
|
2007-10-16 08:27:39 +00:00
|
|
|
struct ioatdma_device *device;
|
2006-05-24 00:35:34 +00:00
|
|
|
|
|
|
|
device = kzalloc(sizeof(*device), GFP_KERNEL);
|
|
|
|
if (!device) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto err_kzalloc;
|
|
|
|
}
|
2007-10-16 08:27:39 +00:00
|
|
|
device->pdev = pdev;
|
|
|
|
device->reg_base = iobase;
|
|
|
|
device->version = readb(device->reg_base + IOAT_VER_OFFSET);
|
2006-05-24 00:35:34 +00:00
|
|
|
|
|
|
|
/* DMA coherent memory pool for DMA descriptor allocations */
|
|
|
|
device->dma_pool = pci_pool_create("dma_desc_pool", pdev,
|
2007-10-16 08:27:39 +00:00
|
|
|
sizeof(struct ioat_dma_descriptor),
|
|
|
|
64, 0);
|
2006-05-24 00:35:34 +00:00
|
|
|
if (!device->dma_pool) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto err_dma_pool;
|
|
|
|
}
|
|
|
|
|
2007-10-16 08:27:39 +00:00
|
|
|
device->completion_pool = pci_pool_create("completion_pool", pdev,
|
|
|
|
sizeof(u64), SMP_CACHE_BYTES,
|
|
|
|
SMP_CACHE_BYTES);
|
2006-05-24 00:35:34 +00:00
|
|
|
if (!device->completion_pool) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto err_completion_pool;
|
|
|
|
}
|
|
|
|
|
|
|
|
INIT_LIST_HEAD(&device->common.channels);
|
2007-10-16 08:27:39 +00:00
|
|
|
ioat_dma_enumerate_channels(device);
|
2006-05-24 00:35:34 +00:00
|
|
|
|
dmaengine: refactor dmaengine around dma_async_tx_descriptor
The current dmaengine interface defines mutliple routines per operation,
i.e. dma_async_memcpy_buf_to_buf, dma_async_memcpy_buf_to_page etc. Adding
more operation types (xor, crc, etc) to this model would result in an
unmanageable number of method permutations.
Are we really going to add a set of hooks for each DMA engine
whizbang feature?
- Jeff Garzik
The descriptor creation process is refactored using the new common
dma_async_tx_descriptor structure. Instead of per driver
do_<operation>_<dest>_to_<src> methods, drivers integrate
dma_async_tx_descriptor into their private software descriptor and then
define a 'prep' routine per operation. The prep routine allocates a
descriptor and ensures that the tx_set_src, tx_set_dest, tx_submit routines
are valid. Descriptor creation and submission becomes:
struct dma_device *dev;
struct dma_chan *chan;
struct dma_async_tx_descriptor *tx;
tx = dev->device_prep_dma_<operation>(chan, len, int_flag)
tx->tx_set_src(dma_addr_t, tx, index /* for multi-source ops */)
tx->tx_set_dest(dma_addr_t, tx, index)
tx->tx_submit(tx)
In addition to the refactoring, dma_async_tx_descriptor also lays the
groundwork for definining cross-channel-operation dependencies, and a
callback facility for asynchronous notification of operation completion.
Changelog:
* drop dma mapping methods, suggested by Chris Leech
* fix ioat_dma_dependency_added, also caught by Andrew Morton
* fix dma_sync_wait, change from Andrew Morton
* uninline large functions, change from Andrew Morton
* add tx->callback = NULL to dmaengine calls to interoperate with async_tx
calls
* hookup ioat_tx_submit
* convert channel capabilities to a 'cpumask_t like' bitmap
* removed DMA_TX_ARRAY_INIT, no longer needed
* checkpatch.pl fixes
* make set_src, set_dest, and tx_submit descriptor specific methods
* fixup git-ioat merge
* move group_list and phys to dma_async_tx_descriptor
Cc: Jeff Garzik <jeff@garzik.org>
Cc: Chris Leech <christopher.leech@intel.com>
Signed-off-by: Shannon Nelson <shannon.nelson@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Acked-by: David S. Miller <davem@davemloft.net>
2007-01-02 18:10:43 +00:00
|
|
|
dma_cap_set(DMA_MEMCPY, device->common.cap_mask);
|
2007-10-16 08:27:39 +00:00
|
|
|
device->common.device_alloc_chan_resources =
|
|
|
|
ioat_dma_alloc_chan_resources;
|
|
|
|
device->common.device_free_chan_resources =
|
|
|
|
ioat_dma_free_chan_resources;
|
dmaengine: refactor dmaengine around dma_async_tx_descriptor
The current dmaengine interface defines mutliple routines per operation,
i.e. dma_async_memcpy_buf_to_buf, dma_async_memcpy_buf_to_page etc. Adding
more operation types (xor, crc, etc) to this model would result in an
unmanageable number of method permutations.
Are we really going to add a set of hooks for each DMA engine
whizbang feature?
- Jeff Garzik
The descriptor creation process is refactored using the new common
dma_async_tx_descriptor structure. Instead of per driver
do_<operation>_<dest>_to_<src> methods, drivers integrate
dma_async_tx_descriptor into their private software descriptor and then
define a 'prep' routine per operation. The prep routine allocates a
descriptor and ensures that the tx_set_src, tx_set_dest, tx_submit routines
are valid. Descriptor creation and submission becomes:
struct dma_device *dev;
struct dma_chan *chan;
struct dma_async_tx_descriptor *tx;
tx = dev->device_prep_dma_<operation>(chan, len, int_flag)
tx->tx_set_src(dma_addr_t, tx, index /* for multi-source ops */)
tx->tx_set_dest(dma_addr_t, tx, index)
tx->tx_submit(tx)
In addition to the refactoring, dma_async_tx_descriptor also lays the
groundwork for definining cross-channel-operation dependencies, and a
callback facility for asynchronous notification of operation completion.
Changelog:
* drop dma mapping methods, suggested by Chris Leech
* fix ioat_dma_dependency_added, also caught by Andrew Morton
* fix dma_sync_wait, change from Andrew Morton
* uninline large functions, change from Andrew Morton
* add tx->callback = NULL to dmaengine calls to interoperate with async_tx
calls
* hookup ioat_tx_submit
* convert channel capabilities to a 'cpumask_t like' bitmap
* removed DMA_TX_ARRAY_INIT, no longer needed
* checkpatch.pl fixes
* make set_src, set_dest, and tx_submit descriptor specific methods
* fixup git-ioat merge
* move group_list and phys to dma_async_tx_descriptor
Cc: Jeff Garzik <jeff@garzik.org>
Cc: Chris Leech <christopher.leech@intel.com>
Signed-off-by: Shannon Nelson <shannon.nelson@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Acked-by: David S. Miller <davem@davemloft.net>
2007-01-02 18:10:43 +00:00
|
|
|
device->common.device_prep_dma_memcpy = ioat_dma_prep_memcpy;
|
|
|
|
device->common.device_is_tx_complete = ioat_dma_is_complete;
|
|
|
|
device->common.device_issue_pending = ioat_dma_memcpy_issue_pending;
|
|
|
|
device->common.device_dependency_added = ioat_dma_dependency_added;
|
|
|
|
device->common.dev = &pdev->dev;
|
2007-10-16 08:27:39 +00:00
|
|
|
printk(KERN_INFO "ioatdma: Intel(R) I/OAT DMA Engine found,"
|
|
|
|
" %d channels, device version 0x%02x\n",
|
|
|
|
device->common.chancnt, device->version);
|
|
|
|
|
|
|
|
pci_set_drvdata(pdev, device);
|
|
|
|
err = request_irq(pdev->irq, &ioat_do_interrupt, IRQF_SHARED, "ioat",
|
|
|
|
device);
|
|
|
|
if (err)
|
|
|
|
goto err_irq;
|
|
|
|
|
|
|
|
writeb(IOAT_INTRCTRL_MASTER_INT_EN,
|
|
|
|
device->reg_base + IOAT_INTRCTRL_OFFSET);
|
|
|
|
pci_set_master(pdev);
|
2006-05-24 00:35:34 +00:00
|
|
|
|
|
|
|
err = ioat_self_test(device);
|
|
|
|
if (err)
|
|
|
|
goto err_self_test;
|
|
|
|
|
|
|
|
dma_async_device_register(&device->common);
|
|
|
|
|
2007-10-16 08:27:39 +00:00
|
|
|
return device;
|
2006-05-24 00:35:34 +00:00
|
|
|
|
|
|
|
err_self_test:
|
2007-10-16 08:27:39 +00:00
|
|
|
free_irq(device->pdev->irq, device);
|
2006-05-24 00:35:34 +00:00
|
|
|
err_irq:
|
|
|
|
pci_pool_destroy(device->completion_pool);
|
|
|
|
err_completion_pool:
|
|
|
|
pci_pool_destroy(device->dma_pool);
|
|
|
|
err_dma_pool:
|
|
|
|
kfree(device);
|
|
|
|
err_kzalloc:
|
2007-10-16 08:27:39 +00:00
|
|
|
iounmap(iobase);
|
|
|
|
printk(KERN_ERR
|
|
|
|
"ioatdma: Intel(R) I/OAT DMA Engine initialization failed\n");
|
|
|
|
return NULL;
|
2007-03-08 17:57:36 +00:00
|
|
|
}
|
|
|
|
|
2007-10-16 08:27:39 +00:00
|
|
|
void ioat_dma_remove(struct ioatdma_device *device)
|
2006-05-24 00:35:34 +00:00
|
|
|
{
|
|
|
|
struct dma_chan *chan, *_chan;
|
|
|
|
struct ioat_dma_chan *ioat_chan;
|
|
|
|
|
|
|
|
dma_async_device_unregister(&device->common);
|
|
|
|
|
|
|
|
free_irq(device->pdev->irq, device);
|
2007-10-16 08:27:39 +00:00
|
|
|
|
2006-05-24 00:35:34 +00:00
|
|
|
pci_pool_destroy(device->dma_pool);
|
|
|
|
pci_pool_destroy(device->completion_pool);
|
2007-10-16 08:27:39 +00:00
|
|
|
|
2007-10-16 08:27:39 +00:00
|
|
|
list_for_each_entry_safe(chan, _chan,
|
|
|
|
&device->common.channels, device_node) {
|
2006-05-24 00:35:34 +00:00
|
|
|
ioat_chan = to_ioat_chan(chan);
|
|
|
|
list_del(&chan->device_node);
|
|
|
|
kfree(ioat_chan);
|
|
|
|
}
|
|
|
|
kfree(device);
|
|
|
|
}
|
|
|
|
|