xen/blkback: Union the blkif_request request specific fields

Following in the steps of patch:
"xen: Union the blkif_request request specific fields" this patch
changes the blkback. Per the original patch:

"Prepare for extending the block device ring to allow request
specific fields, by moving the request specific fields for
reads, writes and barrier requests to a union member."

Cc: Owen Smith <owen.smith@citrix.com>
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
This commit is contained in:
Konrad Rzeszutek Wilk 2011-03-01 16:22:28 -05:00
parent e8e28871ed
commit c35950bfa9
2 changed files with 11 additions and 11 deletions

View File

@ -426,7 +426,7 @@ static void dispatch_rw_block_io(blkif_t *blkif,
} }
preq.dev = req->handle; preq.dev = req->handle;
preq.sector_number = req->sector_number; preq.sector_number = req->u.rw.sector_number;
preq.nr_sects = 0; preq.nr_sects = 0;
pending_req->blkif = blkif; pending_req->blkif = blkif;
@ -438,11 +438,11 @@ static void dispatch_rw_block_io(blkif_t *blkif,
for (i = 0; i < nseg; i++) { for (i = 0; i < nseg; i++) {
uint32_t flags; uint32_t flags;
seg[i].nsec = req->seg[i].last_sect - seg[i].nsec = req->u.rw.seg[i].last_sect -
req->seg[i].first_sect + 1; req->u.rw.seg[i].first_sect + 1;
if ((req->seg[i].last_sect >= (PAGE_SIZE >> 9)) || if ((req->u.rw.seg[i].last_sect >= (PAGE_SIZE >> 9)) ||
(req->seg[i].last_sect < req->seg[i].first_sect)) (req->u.rw.seg[i].last_sect < req->u.rw.seg[i].first_sect))
goto fail_response; goto fail_response;
preq.nr_sects += seg[i].nsec; preq.nr_sects += seg[i].nsec;
@ -450,7 +450,7 @@ static void dispatch_rw_block_io(blkif_t *blkif,
if (operation != READ) if (operation != READ)
flags |= GNTMAP_readonly; flags |= GNTMAP_readonly;
gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags, gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags,
req->seg[i].gref, blkif->domid); req->u.rw.seg[i].gref, blkif->domid);
} }
ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nseg); ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nseg);
@ -472,7 +472,7 @@ static void dispatch_rw_block_io(blkif_t *blkif,
page_to_pfn(blkbk->pending_page(pending_req, i)), page_to_pfn(blkbk->pending_page(pending_req, i)),
FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT)); FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT));
seg[i].buf = map[i].dev_bus_addr | seg[i].buf = map[i].dev_bus_addr |
(req->seg[i].first_sect << 9); (req->u.rw.seg[i].first_sect << 9);
} }
if (ret) if (ret)

View File

@ -96,12 +96,12 @@ static void inline blkif_get_x86_32_req(struct blkif_request *dst, struct blkif_
dst->nr_segments = src->nr_segments; dst->nr_segments = src->nr_segments;
dst->handle = src->handle; dst->handle = src->handle;
dst->id = src->id; dst->id = src->id;
dst->sector_number = src->sector_number; dst->u.rw.sector_number = src->sector_number;
barrier(); barrier();
if (n > dst->nr_segments) if (n > dst->nr_segments)
n = dst->nr_segments; n = dst->nr_segments;
for (i = 0; i < n; i++) for (i = 0; i < n; i++)
dst->seg[i] = src->seg[i]; dst->u.rw.seg[i] = src->seg[i];
} }
static void inline blkif_get_x86_64_req(struct blkif_request *dst, struct blkif_x86_64_request *src) static void inline blkif_get_x86_64_req(struct blkif_request *dst, struct blkif_x86_64_request *src)
@ -111,12 +111,12 @@ static void inline blkif_get_x86_64_req(struct blkif_request *dst, struct blkif_
dst->nr_segments = src->nr_segments; dst->nr_segments = src->nr_segments;
dst->handle = src->handle; dst->handle = src->handle;
dst->id = src->id; dst->id = src->id;
dst->sector_number = src->sector_number; dst->u.rw.sector_number = src->sector_number;
barrier(); barrier();
if (n > dst->nr_segments) if (n > dst->nr_segments)
n = dst->nr_segments; n = dst->nr_segments;
for (i = 0; i < n; i++) for (i = 0; i < n; i++)
dst->seg[i] = src->seg[i]; dst->u.rw.seg[i] = src->seg[i];
} }
#endif /* __XEN_BLKIF_H__ */ #endif /* __XEN_BLKIF_H__ */