x86/intel_rdt: Prevent pseudo-locking from using stale pointers

When the last CPU in an rdt_domain goes offline, its rdt_domain struct gets
freed. Current pseudo-locking code is unaware of this scenario and tries to
dereference the freed structure in a few places.

Add checks to prevent pseudo-locking code from doing this.

While further work is needed to seamlessly restore resource groups (not
just pseudo-locking) to their configuration when the domain is brought back
online, the immediate issue of invalid pointers is addressed here.

Fixes: f4e80d67a5 ("x86/intel_rdt: Resctrl files reflect pseudo-locked information")
Fixes: 443810fe61 ("x86/intel_rdt: Create debugfs files for pseudo-locking testing")
Fixes: 746e08590b ("x86/intel_rdt: Create character device exposing pseudo-locked region")
Fixes: 33dc3e410a ("x86/intel_rdt: Make CPU information accessible for pseudo-locked regions")
Signed-off-by: Jithu Joseph <jithu.joseph@intel.com>
Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: fenghua.yu@intel.com
Cc: tony.luck@intel.com
Cc: gavin.hindman@intel.com
Cc: hpa@zytor.com
Link: https://lkml.kernel.org/r/231f742dbb7b00a31cc104416860e27dba6b072d.1539384145.git.reinette.chatre@intel.com
This commit is contained in:
Jithu Joseph 2018-10-12 15:51:01 -07:00 committed by Thomas Gleixner
parent 2a7adf6ce6
commit b61b8bba18
4 changed files with 55 additions and 12 deletions

View File

@ -608,6 +608,13 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r)
cancel_delayed_work(&d->cqm_limbo); cancel_delayed_work(&d->cqm_limbo);
} }
/*
* rdt_domain "d" is going to be freed below, so clear
* its pointer from pseudo_lock_region struct.
*/
if (d->plr)
d->plr->d = NULL;
kfree(d->ctrl_val); kfree(d->ctrl_val);
kfree(d->mbps_val); kfree(d->mbps_val);
bitmap_free(d->rmid_busy_llc); bitmap_free(d->rmid_busy_llc);

View File

@ -404,8 +404,16 @@ int rdtgroup_schemata_show(struct kernfs_open_file *of,
for_each_alloc_enabled_rdt_resource(r) for_each_alloc_enabled_rdt_resource(r)
seq_printf(s, "%s:uninitialized\n", r->name); seq_printf(s, "%s:uninitialized\n", r->name);
} else if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { } else if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
seq_printf(s, "%s:%d=%x\n", rdtgrp->plr->r->name, if (!rdtgrp->plr->d) {
rdtgrp->plr->d->id, rdtgrp->plr->cbm); rdt_last_cmd_clear();
rdt_last_cmd_puts("Cache domain offline\n");
ret = -ENODEV;
} else {
seq_printf(s, "%s:%d=%x\n",
rdtgrp->plr->r->name,
rdtgrp->plr->d->id,
rdtgrp->plr->cbm);
}
} else { } else {
closid = rdtgrp->closid; closid = rdtgrp->closid;
for_each_alloc_enabled_rdt_resource(r) { for_each_alloc_enabled_rdt_resource(r) {

View File

@ -1174,6 +1174,11 @@ static int pseudo_lock_measure_cycles(struct rdtgroup *rdtgrp, int sel)
goto out; goto out;
} }
if (!plr->d) {
ret = -ENODEV;
goto out;
}
plr->thread_done = 0; plr->thread_done = 0;
cpu = cpumask_first(&plr->d->cpu_mask); cpu = cpumask_first(&plr->d->cpu_mask);
if (!cpu_online(cpu)) { if (!cpu_online(cpu)) {
@ -1494,6 +1499,11 @@ static int pseudo_lock_dev_mmap(struct file *filp, struct vm_area_struct *vma)
plr = rdtgrp->plr; plr = rdtgrp->plr;
if (!plr->d) {
mutex_unlock(&rdtgroup_mutex);
return -ENODEV;
}
/* /*
* Task is required to run with affinity to the cpus associated * Task is required to run with affinity to the cpus associated
* with the pseudo-locked region. If this is not the case the task * with the pseudo-locked region. If this is not the case the task

View File

@ -268,17 +268,27 @@ static int rdtgroup_cpus_show(struct kernfs_open_file *of,
struct seq_file *s, void *v) struct seq_file *s, void *v)
{ {
struct rdtgroup *rdtgrp; struct rdtgroup *rdtgrp;
struct cpumask *mask;
int ret = 0; int ret = 0;
rdtgrp = rdtgroup_kn_lock_live(of->kn); rdtgrp = rdtgroup_kn_lock_live(of->kn);
if (rdtgrp) { if (rdtgrp) {
if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
seq_printf(s, is_cpu_list(of) ? "%*pbl\n" : "%*pb\n", if (!rdtgrp->plr->d) {
cpumask_pr_args(&rdtgrp->plr->d->cpu_mask)); rdt_last_cmd_clear();
else rdt_last_cmd_puts("Cache domain offline\n");
ret = -ENODEV;
} else {
mask = &rdtgrp->plr->d->cpu_mask;
seq_printf(s, is_cpu_list(of) ?
"%*pbl\n" : "%*pb\n",
cpumask_pr_args(mask));
}
} else {
seq_printf(s, is_cpu_list(of) ? "%*pbl\n" : "%*pb\n", seq_printf(s, is_cpu_list(of) ? "%*pbl\n" : "%*pb\n",
cpumask_pr_args(&rdtgrp->cpu_mask)); cpumask_pr_args(&rdtgrp->cpu_mask));
}
} else { } else {
ret = -ENOENT; ret = -ENOENT;
} }
@ -1282,6 +1292,7 @@ static int rdtgroup_size_show(struct kernfs_open_file *of,
struct rdt_resource *r; struct rdt_resource *r;
struct rdt_domain *d; struct rdt_domain *d;
unsigned int size; unsigned int size;
int ret = 0;
bool sep; bool sep;
u32 ctrl; u32 ctrl;
@ -1292,11 +1303,18 @@ static int rdtgroup_size_show(struct kernfs_open_file *of,
} }
if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
seq_printf(s, "%*s:", max_name_width, rdtgrp->plr->r->name); if (!rdtgrp->plr->d) {
size = rdtgroup_cbm_to_size(rdtgrp->plr->r, rdt_last_cmd_clear();
rdtgrp->plr->d, rdt_last_cmd_puts("Cache domain offline\n");
rdtgrp->plr->cbm); ret = -ENODEV;
seq_printf(s, "%d=%u\n", rdtgrp->plr->d->id, size); } else {
seq_printf(s, "%*s:", max_name_width,
rdtgrp->plr->r->name);
size = rdtgroup_cbm_to_size(rdtgrp->plr->r,
rdtgrp->plr->d,
rdtgrp->plr->cbm);
seq_printf(s, "%d=%u\n", rdtgrp->plr->d->id, size);
}
goto out; goto out;
} }
@ -1326,7 +1344,7 @@ static int rdtgroup_size_show(struct kernfs_open_file *of,
out: out:
rdtgroup_kn_unlock(of->kn); rdtgroup_kn_unlock(of->kn);
return 0; return ret;
} }
/* rdtgroup information files for one cache resource. */ /* rdtgroup information files for one cache resource. */