Commit 45711f1a authored by Jens Axboe's avatar Jens Axboe
Browse files

[SG] Update drivers to use sg helpers


Signed-off-by: default avatarJens Axboe <jens.axboe@oracle.com>
parent 78c2f0b8
......@@ -1115,7 +1115,7 @@ static void do_ubd_request(struct request_queue *q)
}
prepare_request(req, io_req,
(unsigned long long) req->sector << 9,
sg->offset, sg->length, sg->page);
sg->offset, sg->length, sg_page(sg));
last_sectors = sg->length >> 9;
n = os_write_file(thread_fd, &io_req,
......
......@@ -4296,7 +4296,7 @@ void ata_sg_clean(struct ata_queued_cmd *qc)
sg_last(sg, qc->orig_n_elem)->length += qc->pad_len;
if (pad_buf) {
struct scatterlist *psg = &qc->pad_sgent;
void *addr = kmap_atomic(psg->page, KM_IRQ0);
void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
memcpy(addr + psg->offset, pad_buf, qc->pad_len);
kunmap_atomic(addr, KM_IRQ0);
}
......@@ -4686,11 +4686,11 @@ static int ata_sg_setup(struct ata_queued_cmd *qc)
* data in this function or read data in ata_sg_clean.
*/
offset = lsg->offset + lsg->length - qc->pad_len;
psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
sg_set_page(psg, nth_page(sg_page(lsg), offset >> PAGE_SHIFT));
psg->offset = offset_in_page(offset);
if (qc->tf.flags & ATA_TFLAG_WRITE) {
void *addr = kmap_atomic(psg->page, KM_IRQ0);
void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
memcpy(pad_buf, addr + psg->offset, qc->pad_len);
kunmap_atomic(addr, KM_IRQ0);
}
......@@ -4836,7 +4836,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
if (qc->curbytes == qc->nbytes - qc->sect_size)
ap->hsm_task_state = HSM_ST_LAST;
page = qc->cursg->page;
page = sg_page(qc->cursg);
offset = qc->cursg->offset + qc->cursg_ofs;
/* get the current page and offset */
......@@ -4988,7 +4988,7 @@ next_sg:
sg = qc->cursg;
page = sg->page;
page = sg_page(sg);
offset = sg->offset + qc->cursg_ofs;
/* get the current page and offset */
......
......@@ -1544,7 +1544,7 @@ static unsigned int ata_scsi_rbuf_get(struct scsi_cmnd *cmd, u8 **buf_out)
struct scatterlist *sg = scsi_sglist(cmd);
if (sg) {
buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
buf = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
buflen = sg->length;
} else {
buf = NULL;
......
......@@ -345,6 +345,7 @@ static bool DAC960_CreateAuxiliaryStructures(DAC960_Controller_T *Controller)
Command->V1.ScatterGatherList =
(DAC960_V1_ScatterGatherSegment_T *)ScatterGatherCPU;
Command->V1.ScatterGatherListDMA = ScatterGatherDMA;
sg_init_table(Command->cmd_sglist, DAC960_V1_ScatterGatherLimit);
} else {
Command->cmd_sglist = Command->V2.ScatterList;
Command->V2.ScatterGatherList =
......@@ -353,6 +354,7 @@ static bool DAC960_CreateAuxiliaryStructures(DAC960_Controller_T *Controller)
Command->V2.RequestSense =
(DAC960_SCSI_RequestSense_T *)RequestSenseCPU;
Command->V2.RequestSenseDMA = RequestSenseDMA;
sg_init_table(Command->cmd_sglist, DAC960_V2_ScatterGatherLimit);
}
}
return true;
......
......@@ -2610,7 +2610,7 @@ static void do_cciss_request(struct request_queue *q)
(int)creq->nr_sectors);
#endif /* CCISS_DEBUG */
memset(tmp_sg, 0, sizeof(tmp_sg));
sg_init_table(tmp_sg, MAXSGENTRIES);
seg = blk_rq_map_sg(q, creq, tmp_sg);
/* get the DMA records for the setup */
......@@ -2621,7 +2621,7 @@ static void do_cciss_request(struct request_queue *q)
for (i = 0; i < seg; i++) {
c->SG[i].Len = tmp_sg[i].length;
temp64.val = (__u64) pci_map_page(h->pdev, tmp_sg[i].page,
temp64.val = (__u64) pci_map_page(h->pdev, sg_page(&tmp_sg[i]),
tmp_sg[i].offset,
tmp_sg[i].length, dir);
c->SG[i].Addr.lower = temp64.val32.lower;
......
......@@ -918,6 +918,7 @@ queue_next:
DBGPX(
printk("sector=%d, nr_sectors=%d\n", creq->sector, creq->nr_sectors);
);
sg_init_table(tmp_sg, SG_MAX);
seg = blk_rq_map_sg(q, creq, tmp_sg);
/* Now do all the DMA Mappings */
......@@ -929,7 +930,7 @@ DBGPX(
{
c->req.sg[i].size = tmp_sg[i].length;
c->req.sg[i].addr = (__u32) pci_map_page(h->pci_dev,
tmp_sg[i].page,
sg_page(&tmp_sg[i]),
tmp_sg[i].offset,
tmp_sg[i].length, dir);
}
......
......@@ -26,6 +26,7 @@
#include <linux/crypto.h>
#include <linux/blkdev.h>
#include <linux/loop.h>
#include <linux/scatterlist.h>
#include <asm/semaphore.h>
#include <asm/uaccess.h>
......@@ -119,14 +120,17 @@ cryptoloop_transfer(struct loop_device *lo, int cmd,
.tfm = tfm,
.flags = CRYPTO_TFM_REQ_MAY_SLEEP,
};
struct scatterlist sg_out = { NULL, };
struct scatterlist sg_in = { NULL, };
struct scatterlist sg_out;
struct scatterlist sg_in;
encdec_cbc_t encdecfunc;
struct page *in_page, *out_page;
unsigned in_offs, out_offs;
int err;
sg_init_table(&sg_out, 1);
sg_init_table(&sg_in, 1);
if (cmd == READ) {
in_page = raw_page;
in_offs = raw_off;
......@@ -146,11 +150,11 @@ cryptoloop_transfer(struct loop_device *lo, int cmd,
u32 iv[4] = { 0, };
iv[0] = cpu_to_le32(IV & 0xffffffff);
sg_in.page = in_page;
sg_set_page(&sg_in, in_page);
sg_in.offset = in_offs;
sg_in.length = sz;
sg_out.page = out_page;
sg_set_page(&sg_out, out_page);
sg_out.offset = out_offs;
sg_out.length = sz;
......
......@@ -388,6 +388,7 @@ static int __send_request(struct request *req)
op = VD_OP_BWRITE;
}
sg_init_table(sg, port->ring_cookies);
nsg = blk_rq_map_sg(req->q, req, sg);
len = 0;
......
......@@ -522,6 +522,7 @@ static struct carm_request *carm_get_request(struct carm_host *host)
host->n_msgs++;
assert(host->n_msgs <= CARM_MAX_REQ);
sg_init_table(crq->sg, CARM_MAX_REQ_SG);
return crq;
}
......
......@@ -25,6 +25,7 @@
#include <linux/usb_usual.h>
#include <linux/blkdev.h>
#include <linux/timer.h>
#include <linux/scatterlist.h>
#include <scsi/scsi.h>
#define DRV_NAME "ub"
......@@ -656,6 +657,7 @@ static int ub_request_fn_1(struct ub_lun *lun, struct request *rq)
if ((cmd = ub_get_cmd(lun)) == NULL)
return -1;
memset(cmd, 0, sizeof(struct ub_scsi_cmd));
sg_init_table(cmd->sgv, UB_MAX_REQ_SG);
blkdev_dequeue_request(rq);
......@@ -1309,9 +1311,8 @@ static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
else
pipe = sc->send_bulk_pipe;
sc->last_pipe = pipe;
usb_fill_bulk_urb(&sc->work_urb, sc->dev, pipe,
page_address(sg->page) + sg->offset, sg->length,
ub_urb_complete, sc);
usb_fill_bulk_urb(&sc->work_urb, sc->dev, pipe, sg_virt(sg),
sg->length, ub_urb_complete, sc);
sc->work_urb.actual_length = 0;
sc->work_urb.error_count = 0;
sc->work_urb.status = 0;
......@@ -1427,7 +1428,7 @@ static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
scmd->state = UB_CMDST_INIT;
scmd->nsg = 1;
sg = &scmd->sgv[0];
sg->page = virt_to_page(sc->top_sense);
sg_set_page(sg, virt_to_page(sc->top_sense));
sg->offset = (unsigned long)sc->top_sense & (PAGE_SIZE-1);
sg->length = UB_SENSE_SIZE;
scmd->len = UB_SENSE_SIZE;
......@@ -1863,7 +1864,7 @@ static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun,
cmd->state = UB_CMDST_INIT;
cmd->nsg = 1;
sg = &cmd->sgv[0];
sg->page = virt_to_page(p);
sg_set_page(sg, virt_to_page(p));
sg->offset = (unsigned long)p & (PAGE_SIZE-1);
sg->length = 8;
cmd->len = 8;
......
......@@ -41,6 +41,7 @@
#include <linux/dma-mapping.h>
#include <linux/completion.h>
#include <linux/device.h>
#include <linux/scatterlist.h>
#include <asm/uaccess.h>
#include <asm/vio.h>
......@@ -270,6 +271,7 @@ static int send_request(struct request *req)
d = req->rq_disk->private_data;
/* Now build the scatter-gather list */
sg_init_table(sg, VIOMAXBLOCKDMA);
nsg = blk_rq_map_sg(req->q, req, sg);
nsg = dma_map_sg(d->dev, sg, nsg, direction);
......
......@@ -935,11 +935,11 @@ static int cris_ide_build_dmatable (ide_drive_t *drive)
* than two possibly non-adjacent physical 4kB pages.
*/
/* group sequential buffers into one large buffer */
addr = page_to_phys(sg->page) + sg->offset;
addr = sg_phys(sg);
size = sg_dma_len(sg);
while (--i) {
sg = sg_next(sg);
if ((addr + size) != page_to_phys(sg->page) + sg->offset)
if ((addr + size) != sg_phys(sg))
break;
size += sg_dma_len(sg);
}
......
......@@ -1317,12 +1317,14 @@ static int hwif_init(ide_hwif_t *hwif)
if (!hwif->sg_max_nents)
hwif->sg_max_nents = PRD_ENTRIES;
hwif->sg_table = kzalloc(sizeof(struct scatterlist)*hwif->sg_max_nents,
hwif->sg_table = kmalloc(sizeof(struct scatterlist)*hwif->sg_max_nents,
GFP_KERNEL);
if (!hwif->sg_table) {
printk(KERN_ERR "%s: unable to allocate SG table.\n", hwif->name);
goto out;
}
sg_init_table(hwif->sg_table, hwif->sg_max_nents);
if (init_irq(hwif) == 0)
goto done;
......
......@@ -261,7 +261,7 @@ static void ide_pio_sector(ide_drive_t *drive, unsigned int write)
hwif->cursg = sg;
}
page = cursg->page;
page = sg_page(cursg);
offset = cursg->offset + hwif->cursg_ofs * SECTOR_SIZE;
/* get the current page and offset */
......
......@@ -276,8 +276,7 @@ static int auide_build_dmatable(ide_drive_t *drive)
if (iswrite) {
if(!put_source_flags(ahwif->tx_chan,
(void*)(page_address(sg->page)
+ sg->offset),
(void*) sg_virt(sg),
tc, flags)) {
printk(KERN_ERR "%s failed %d\n",
__FUNCTION__, __LINE__);
......@@ -285,8 +284,7 @@ static int auide_build_dmatable(ide_drive_t *drive)
} else
{
if(!put_dest_flags(ahwif->rx_chan,
(void*)(page_address(sg->page)
+ sg->offset),
(void*) sg_virt(sg),
tc, flags)) {
printk(KERN_ERR "%s failed %d\n",
__FUNCTION__, __LINE__);
......
......@@ -111,7 +111,7 @@ int dma_region_alloc(struct dma_region *dma, unsigned long n_bytes,
unsigned long va =
(unsigned long)dma->kvirt + (i << PAGE_SHIFT);
dma->sglist[i].page = vmalloc_to_page((void *)va);
sg_set_page(&dma->sglist[i], vmalloc_to_page((void *)va));
dma->sglist[i].length = PAGE_SIZE;
}
......
......@@ -1466,7 +1466,7 @@ static void sbp2_prep_command_orb_sg(struct sbp2_command_orb *orb,
cmd->dma_size = sgpnt[0].length;
cmd->dma_type = CMD_DMA_PAGE;
cmd->cmd_dma = dma_map_page(hi->host->device.parent,
sgpnt[0].page, sgpnt[0].offset,
sg_page(&sgpnt[0]), sgpnt[0].offset,
cmd->dma_size, cmd->dma_dir);
orb->data_descriptor_lo = cmd->cmd_dma;
......
......@@ -55,9 +55,11 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
ib_dma_unmap_sg(dev, chunk->page_list,
chunk->nents, DMA_BIDIRECTIONAL);
for (i = 0; i < chunk->nents; ++i) {
struct page *page = sg_page(&chunk->page_list[i]);
if (umem->writable && dirty)
set_page_dirty_lock(chunk->page_list[i].page);
put_page(chunk->page_list[i].page);
set_page_dirty_lock(page);
put_page(page);
}
kfree(chunk);
......@@ -164,11 +166,12 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
}
chunk->nents = min_t(int, ret, IB_UMEM_MAX_PAGE_CHUNK);
sg_init_table(chunk->page_list, chunk->nents);
for (i = 0; i < chunk->nents; ++i) {
if (vma_list &&
!is_vm_hugetlb_page(vma_list[i + off]))
umem->hugetlb = 0;
chunk->page_list[i].page = page_list[i + off];
sg_set_page(&chunk->page_list[i], page_list[i + off]);
chunk->page_list[i].offset = 0;
chunk->page_list[i].length = PAGE_SIZE;
}
......@@ -179,7 +182,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
DMA_BIDIRECTIONAL);
if (chunk->nmap <= 0) {
for (i = 0; i < chunk->nents; ++i)
put_page(chunk->page_list[i].page);
put_page(sg_page(&chunk->page_list[i]));
kfree(chunk);
ret = -ENOMEM;
......
......@@ -108,7 +108,7 @@ static int ipath_map_sg(struct ib_device *dev, struct scatterlist *sgl,
BUG_ON(!valid_dma_direction(direction));
for_each_sg(sgl, sg, nents, i) {
addr = (u64) page_address(sg->page);
addr = (u64) page_address(sg_page(sg));
/* TODO: handle highmem pages */
if (!addr) {
ret = 0;
......@@ -127,7 +127,7 @@ static void ipath_unmap_sg(struct ib_device *dev,
static u64 ipath_sg_dma_address(struct ib_device *dev, struct scatterlist *sg)
{
u64 addr = (u64) page_address(sg->page);
u64 addr = (u64) page_address(sg_page(sg));
if (addr)
addr += sg->offset;
......
......@@ -225,7 +225,7 @@ struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
for (i = 0; i < chunk->nents; i++) {
void *vaddr;
vaddr = page_address(chunk->page_list[i].page);
vaddr = page_address(sg_page(&chunk->page_list[i]));
if (!vaddr) {
ret = ERR_PTR(-EINVAL);
goto bail;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment