mirror of https://gitee.com/openkylin/qemu.git
dma-helpers: explicitly pass alignment into DMA helpers
The hard-coded default alignment is BDRV_SECTOR_SIZE, however this is not necessarily the case for all platforms. Use this as the default alignment for all current callers. Signed-off-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk> Reviewed-by: Eric Blake <eblake@redhat.com> Acked-by: John Snow <jsnow@redhat.com> Message-id: 1476445266-27503-2-git-send-email-mark.cave-ayland@ilande.co.uk Signed-off-by: John Snow <jsnow@redhat.com>
This commit is contained in:
parent
835f3d24b4
commit
99868af3d0
|
@ -73,6 +73,7 @@ typedef struct {
|
|||
AioContext *ctx;
|
||||
BlockAIOCB *acb;
|
||||
QEMUSGList *sg;
|
||||
uint32_t align;
|
||||
uint64_t offset;
|
||||
DMADirection dir;
|
||||
int sg_cur_index;
|
||||
|
@ -160,8 +161,9 @@ static void dma_blk_cb(void *opaque, int ret)
|
|||
return;
|
||||
}
|
||||
|
||||
if (dbs->iov.size & ~BDRV_SECTOR_MASK) {
|
||||
qemu_iovec_discard_back(&dbs->iov, dbs->iov.size & ~BDRV_SECTOR_MASK);
|
||||
if (!QEMU_IS_ALIGNED(dbs->iov.size, dbs->align)) {
|
||||
qemu_iovec_discard_back(&dbs->iov,
|
||||
QEMU_ALIGN_DOWN(dbs->iov.size, dbs->align));
|
||||
}
|
||||
|
||||
dbs->acb = dbs->io_func(dbs->offset, &dbs->iov,
|
||||
|
@ -199,7 +201,7 @@ static const AIOCBInfo dma_aiocb_info = {
|
|||
};
|
||||
|
||||
BlockAIOCB *dma_blk_io(AioContext *ctx,
|
||||
QEMUSGList *sg, uint64_t offset,
|
||||
QEMUSGList *sg, uint64_t offset, uint32_t align,
|
||||
DMAIOFunc *io_func, void *io_func_opaque,
|
||||
BlockCompletionFunc *cb,
|
||||
void *opaque, DMADirection dir)
|
||||
|
@ -212,6 +214,7 @@ BlockAIOCB *dma_blk_io(AioContext *ctx,
|
|||
dbs->sg = sg;
|
||||
dbs->ctx = ctx;
|
||||
dbs->offset = offset;
|
||||
dbs->align = align;
|
||||
dbs->sg_cur_index = 0;
|
||||
dbs->sg_cur_byte = 0;
|
||||
dbs->dir = dir;
|
||||
|
@ -234,11 +237,11 @@ BlockAIOCB *dma_blk_read_io_func(int64_t offset, QEMUIOVector *iov,
|
|||
}
|
||||
|
||||
BlockAIOCB *dma_blk_read(BlockBackend *blk,
|
||||
QEMUSGList *sg, uint64_t offset,
|
||||
QEMUSGList *sg, uint64_t offset, uint32_t align,
|
||||
void (*cb)(void *opaque, int ret), void *opaque)
|
||||
{
|
||||
return dma_blk_io(blk_get_aio_context(blk),
|
||||
sg, offset, dma_blk_read_io_func, blk, cb, opaque,
|
||||
return dma_blk_io(blk_get_aio_context(blk), sg, offset, align,
|
||||
dma_blk_read_io_func, blk, cb, opaque,
|
||||
DMA_DIRECTION_FROM_DEVICE);
|
||||
}
|
||||
|
||||
|
@ -252,11 +255,11 @@ BlockAIOCB *dma_blk_write_io_func(int64_t offset, QEMUIOVector *iov,
|
|||
}
|
||||
|
||||
BlockAIOCB *dma_blk_write(BlockBackend *blk,
|
||||
QEMUSGList *sg, uint64_t offset,
|
||||
QEMUSGList *sg, uint64_t offset, uint32_t align,
|
||||
void (*cb)(void *opaque, int ret), void *opaque)
|
||||
{
|
||||
return dma_blk_io(blk_get_aio_context(blk),
|
||||
sg, offset, dma_blk_write_io_func, blk, cb, opaque,
|
||||
return dma_blk_io(blk_get_aio_context(blk), sg, offset, align,
|
||||
dma_blk_write_io_func, blk, cb, opaque,
|
||||
DMA_DIRECTION_TO_DEVICE);
|
||||
}
|
||||
|
||||
|
|
|
@ -258,8 +258,10 @@ static uint16_t nvme_rw(NvmeCtrl *n, NvmeNamespace *ns, NvmeCmd *cmd,
|
|||
req->has_sg = true;
|
||||
dma_acct_start(n->conf.blk, &req->acct, &req->qsg, acct);
|
||||
req->aiocb = is_write ?
|
||||
dma_blk_write(n->conf.blk, &req->qsg, data_offset, nvme_rw_cb, req) :
|
||||
dma_blk_read(n->conf.blk, &req->qsg, data_offset, nvme_rw_cb, req);
|
||||
dma_blk_write(n->conf.blk, &req->qsg, data_offset, BDRV_SECTOR_SIZE,
|
||||
nvme_rw_cb, req) :
|
||||
dma_blk_read(n->conf.blk, &req->qsg, data_offset, BDRV_SECTOR_SIZE,
|
||||
nvme_rw_cb, req);
|
||||
|
||||
return NVME_NO_COMPLETE;
|
||||
}
|
||||
|
|
|
@ -1009,6 +1009,7 @@ static void execute_ncq_command(NCQTransferState *ncq_tfs)
|
|||
&ncq_tfs->sglist, BLOCK_ACCT_READ);
|
||||
ncq_tfs->aiocb = dma_blk_read(ide_state->blk, &ncq_tfs->sglist,
|
||||
ncq_tfs->lba << BDRV_SECTOR_BITS,
|
||||
BDRV_SECTOR_SIZE,
|
||||
ncq_cb, ncq_tfs);
|
||||
break;
|
||||
case WRITE_FPDMA_QUEUED:
|
||||
|
@ -1022,6 +1023,7 @@ static void execute_ncq_command(NCQTransferState *ncq_tfs)
|
|||
&ncq_tfs->sglist, BLOCK_ACCT_WRITE);
|
||||
ncq_tfs->aiocb = dma_blk_write(ide_state->blk, &ncq_tfs->sglist,
|
||||
ncq_tfs->lba << BDRV_SECTOR_BITS,
|
||||
BDRV_SECTOR_SIZE,
|
||||
ncq_cb, ncq_tfs);
|
||||
break;
|
||||
default:
|
||||
|
|
|
@ -882,15 +882,15 @@ static void ide_dma_cb(void *opaque, int ret)
|
|||
switch (s->dma_cmd) {
|
||||
case IDE_DMA_READ:
|
||||
s->bus->dma->aiocb = dma_blk_read(s->blk, &s->sg, offset,
|
||||
ide_dma_cb, s);
|
||||
BDRV_SECTOR_SIZE, ide_dma_cb, s);
|
||||
break;
|
||||
case IDE_DMA_WRITE:
|
||||
s->bus->dma->aiocb = dma_blk_write(s->blk, &s->sg, offset,
|
||||
ide_dma_cb, s);
|
||||
BDRV_SECTOR_SIZE, ide_dma_cb, s);
|
||||
break;
|
||||
case IDE_DMA_TRIM:
|
||||
s->bus->dma->aiocb = dma_blk_io(blk_get_aio_context(s->blk),
|
||||
&s->sg, offset,
|
||||
&s->sg, offset, BDRV_SECTOR_SIZE,
|
||||
ide_issue_trim, s->blk, ide_dma_cb, s,
|
||||
DMA_DIRECTION_TO_DEVICE);
|
||||
break;
|
||||
|
|
|
@ -341,6 +341,7 @@ static void scsi_do_read(SCSIDiskReq *r, int ret)
|
|||
r->req.resid -= r->req.sg->size;
|
||||
r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk),
|
||||
r->req.sg, r->sector << BDRV_SECTOR_BITS,
|
||||
BDRV_SECTOR_SIZE,
|
||||
sdc->dma_readv, r, scsi_dma_complete, r,
|
||||
DMA_DIRECTION_FROM_DEVICE);
|
||||
} else {
|
||||
|
@ -539,6 +540,7 @@ static void scsi_write_data(SCSIRequest *req)
|
|||
r->req.resid -= r->req.sg->size;
|
||||
r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk),
|
||||
r->req.sg, r->sector << BDRV_SECTOR_BITS,
|
||||
BDRV_SECTOR_SIZE,
|
||||
sdc->dma_writev, r, scsi_dma_complete, r,
|
||||
DMA_DIRECTION_TO_DEVICE);
|
||||
} else {
|
||||
|
|
|
@ -199,14 +199,14 @@ typedef BlockAIOCB *DMAIOFunc(int64_t offset, QEMUIOVector *iov,
|
|||
void *opaque);
|
||||
|
||||
BlockAIOCB *dma_blk_io(AioContext *ctx,
|
||||
QEMUSGList *sg, uint64_t offset,
|
||||
QEMUSGList *sg, uint64_t offset, uint32_t align,
|
||||
DMAIOFunc *io_func, void *io_func_opaque,
|
||||
BlockCompletionFunc *cb, void *opaque, DMADirection dir);
|
||||
BlockAIOCB *dma_blk_read(BlockBackend *blk,
|
||||
QEMUSGList *sg, uint64_t offset,
|
||||
QEMUSGList *sg, uint64_t offset, uint32_t align,
|
||||
BlockCompletionFunc *cb, void *opaque);
|
||||
BlockAIOCB *dma_blk_write(BlockBackend *blk,
|
||||
QEMUSGList *sg, uint64_t offset,
|
||||
QEMUSGList *sg, uint64_t offset, uint32_t align,
|
||||
BlockCompletionFunc *cb, void *opaque);
|
||||
uint64_t dma_buf_read(uint8_t *ptr, int32_t len, QEMUSGList *sg);
|
||||
uint64_t dma_buf_write(uint8_t *ptr, int32_t len, QEMUSGList *sg);
|
||||
|
|
Loading…
Reference in New Issue