xen/blkfront: force data bouncing when backend is untrusted
Split the current bounce buffering logic used with persistent grants into it's own option, and allow enabling it independently of persistent grants. This allows to reuse the same code paths to perform the bounce buffering required to avoid leaking contiguous data in shared pages not part of the request fragments. Reporting whether the backend is to be trusted can be done using a module parameter, or from the xenstore frontend path as set by the toolstack when adding the device. This is CVE-2022-33742, part of XSA-403. Signed-off-by: Roger Pau Monné <roger.pau@citrix.com> Reviewed-by: Juergen Gross <jgross@suse.com> Signed-off-by: Juergen Gross <jgross@suse.com>
This commit is contained in:
parent
4491001c2e
commit
2400617da7
|
@ -152,6 +152,10 @@ static unsigned int xen_blkif_max_ring_order;
|
|||
module_param_named(max_ring_page_order, xen_blkif_max_ring_order, int, 0444);
|
||||
MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the shared ring");
|
||||
|
||||
static bool __read_mostly xen_blkif_trusted = true;
|
||||
module_param_named(trusted, xen_blkif_trusted, bool, 0644);
|
||||
MODULE_PARM_DESC(trusted, "Is the backend trusted");
|
||||
|
||||
#define BLK_RING_SIZE(info) \
|
||||
__CONST_RING_SIZE(blkif, XEN_PAGE_SIZE * (info)->nr_ring_pages)
|
||||
|
||||
|
@ -210,6 +214,7 @@ struct blkfront_info
|
|||
unsigned int feature_discard:1;
|
||||
unsigned int feature_secdiscard:1;
|
||||
unsigned int feature_persistent:1;
|
||||
unsigned int bounce:1;
|
||||
unsigned int discard_granularity;
|
||||
unsigned int discard_alignment;
|
||||
/* Number of 4KB segments handled */
|
||||
|
@ -310,7 +315,7 @@ static int fill_grant_buffer(struct blkfront_ring_info *rinfo, int num)
|
|||
if (!gnt_list_entry)
|
||||
goto out_of_memory;
|
||||
|
||||
if (info->feature_persistent) {
|
||||
if (info->bounce) {
|
||||
granted_page = alloc_page(GFP_NOIO | __GFP_ZERO);
|
||||
if (!granted_page) {
|
||||
kfree(gnt_list_entry);
|
||||
|
@ -330,7 +335,7 @@ static int fill_grant_buffer(struct blkfront_ring_info *rinfo, int num)
|
|||
list_for_each_entry_safe(gnt_list_entry, n,
|
||||
&rinfo->grants, node) {
|
||||
list_del(&gnt_list_entry->node);
|
||||
if (info->feature_persistent)
|
||||
if (info->bounce)
|
||||
__free_page(gnt_list_entry->page);
|
||||
kfree(gnt_list_entry);
|
||||
i--;
|
||||
|
@ -376,7 +381,7 @@ static struct grant *get_grant(grant_ref_t *gref_head,
|
|||
/* Assign a gref to this page */
|
||||
gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head);
|
||||
BUG_ON(gnt_list_entry->gref == -ENOSPC);
|
||||
if (info->feature_persistent)
|
||||
if (info->bounce)
|
||||
grant_foreign_access(gnt_list_entry, info);
|
||||
else {
|
||||
/* Grant access to the GFN passed by the caller */
|
||||
|
@ -400,7 +405,7 @@ static struct grant *get_indirect_grant(grant_ref_t *gref_head,
|
|||
/* Assign a gref to this page */
|
||||
gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head);
|
||||
BUG_ON(gnt_list_entry->gref == -ENOSPC);
|
||||
if (!info->feature_persistent) {
|
||||
if (!info->bounce) {
|
||||
struct page *indirect_page;
|
||||
|
||||
/* Fetch a pre-allocated page to use for indirect grefs */
|
||||
|
@ -703,7 +708,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
|
|||
.grant_idx = 0,
|
||||
.segments = NULL,
|
||||
.rinfo = rinfo,
|
||||
.need_copy = rq_data_dir(req) && info->feature_persistent,
|
||||
.need_copy = rq_data_dir(req) && info->bounce,
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -981,11 +986,12 @@ static void xlvbd_flush(struct blkfront_info *info)
|
|||
{
|
||||
blk_queue_write_cache(info->rq, info->feature_flush ? true : false,
|
||||
info->feature_fua ? true : false);
|
||||
pr_info("blkfront: %s: %s %s %s %s %s\n",
|
||||
pr_info("blkfront: %s: %s %s %s %s %s %s %s\n",
|
||||
info->gd->disk_name, flush_info(info),
|
||||
"persistent grants:", info->feature_persistent ?
|
||||
"enabled;" : "disabled;", "indirect descriptors:",
|
||||
info->max_indirect_segments ? "enabled;" : "disabled;");
|
||||
info->max_indirect_segments ? "enabled;" : "disabled;",
|
||||
"bounce buffer:", info->bounce ? "enabled" : "disabled;");
|
||||
}
|
||||
|
||||
static int xen_translate_vdev(int vdevice, int *minor, unsigned int *offset)
|
||||
|
@ -1207,7 +1213,7 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo)
|
|||
if (!list_empty(&rinfo->indirect_pages)) {
|
||||
struct page *indirect_page, *n;
|
||||
|
||||
BUG_ON(info->feature_persistent);
|
||||
BUG_ON(info->bounce);
|
||||
list_for_each_entry_safe(indirect_page, n, &rinfo->indirect_pages, lru) {
|
||||
list_del(&indirect_page->lru);
|
||||
__free_page(indirect_page);
|
||||
|
@ -1224,7 +1230,7 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo)
|
|||
NULL);
|
||||
rinfo->persistent_gnts_c--;
|
||||
}
|
||||
if (info->feature_persistent)
|
||||
if (info->bounce)
|
||||
__free_page(persistent_gnt->page);
|
||||
kfree(persistent_gnt);
|
||||
}
|
||||
|
@ -1245,7 +1251,7 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo)
|
|||
for (j = 0; j < segs; j++) {
|
||||
persistent_gnt = rinfo->shadow[i].grants_used[j];
|
||||
gnttab_end_foreign_access(persistent_gnt->gref, NULL);
|
||||
if (info->feature_persistent)
|
||||
if (info->bounce)
|
||||
__free_page(persistent_gnt->page);
|
||||
kfree(persistent_gnt);
|
||||
}
|
||||
|
@ -1428,7 +1434,7 @@ static int blkif_completion(unsigned long *id,
|
|||
data.s = s;
|
||||
num_sg = s->num_sg;
|
||||
|
||||
if (bret->operation == BLKIF_OP_READ && info->feature_persistent) {
|
||||
if (bret->operation == BLKIF_OP_READ && info->bounce) {
|
||||
for_each_sg(s->sg, sg, num_sg, i) {
|
||||
BUG_ON(sg->offset + sg->length > PAGE_SIZE);
|
||||
|
||||
|
@ -1487,7 +1493,7 @@ static int blkif_completion(unsigned long *id,
|
|||
* Add the used indirect page back to the list of
|
||||
* available pages for indirect grefs.
|
||||
*/
|
||||
if (!info->feature_persistent) {
|
||||
if (!info->bounce) {
|
||||
indirect_page = s->indirect_grants[i]->page;
|
||||
list_add(&indirect_page->lru, &rinfo->indirect_pages);
|
||||
}
|
||||
|
@ -1764,6 +1770,10 @@ static int talk_to_blkback(struct xenbus_device *dev,
|
|||
if (!info)
|
||||
return -ENODEV;
|
||||
|
||||
/* Check if backend is trusted. */
|
||||
info->bounce = !xen_blkif_trusted ||
|
||||
!xenbus_read_unsigned(dev->nodename, "trusted", 1);
|
||||
|
||||
max_page_order = xenbus_read_unsigned(info->xbdev->otherend,
|
||||
"max-ring-page-order", 0);
|
||||
ring_page_order = min(xen_blkif_max_ring_order, max_page_order);
|
||||
|
@ -2173,10 +2183,10 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
|
|||
if (err)
|
||||
goto out_of_memory;
|
||||
|
||||
if (!info->feature_persistent && info->max_indirect_segments) {
|
||||
if (!info->bounce && info->max_indirect_segments) {
|
||||
/*
|
||||
* We are using indirect descriptors but not persistent
|
||||
* grants, we need to allocate a set of pages that can be
|
||||
* We are using indirect descriptors but don't have a bounce
|
||||
* buffer, we need to allocate a set of pages that can be
|
||||
* used for mapping indirect grefs
|
||||
*/
|
||||
int num = INDIRECT_GREFS(grants) * BLK_RING_SIZE(info);
|
||||
|
@ -2277,6 +2287,8 @@ static void blkfront_gather_backend_features(struct blkfront_info *info)
|
|||
info->feature_persistent =
|
||||
!!xenbus_read_unsigned(info->xbdev->otherend,
|
||||
"feature-persistent", 0);
|
||||
if (info->feature_persistent)
|
||||
info->bounce = true;
|
||||
|
||||
indirect_segments = xenbus_read_unsigned(info->xbdev->otherend,
|
||||
"feature-max-indirect-segments", 0);
|
||||
|
@ -2548,6 +2560,13 @@ static void blkfront_delay_work(struct work_struct *work)
|
|||
struct blkfront_info *info;
|
||||
bool need_schedule_work = false;
|
||||
|
||||
/*
|
||||
* Note that when using bounce buffers but not persistent grants
|
||||
* there's no need to run blkfront_delay_work because grants are
|
||||
* revoked in blkif_completion or else an error is reported and the
|
||||
* connection is closed.
|
||||
*/
|
||||
|
||||
mutex_lock(&blkfront_mutex);
|
||||
|
||||
list_for_each_entry(info, &info_list, info_list) {
|
||||
|
|
Loading…
Reference in New Issue