mirror of https://gitee.com/openkylin/linux.git
Avoid IPI-ing a task in certain cases and prevent load/store tearing
when accessing a task's resctrl fields concurrently. -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEzv7L6UO9uDPlPSfHEsHwGGHeVUoFAmAqYZAACgkQEsHwGGHe VUpz6hAAlF52eAXnnsWUjsY55oyqAj099LzqshOIFJnxbefudO8WcgV0P1QtQzY8 pglccnOlLH1d/HPXAscQtr6chebD6EfJkWfGIk1cN7TRSCIiZ2XpYDRvTrbdXl0b OibCgigUHkUEv128B4Ntma7ESEkbro5gVgSz571rCeEhFXS7yv7V9S/7dEu8wl4f A3J91JSpX4v+ETEkQPIjQBCTdChqQS9ZPW54HsFaucXzgrFV/HDPseT4vzuv8XvL EIqGdvjRaUJEDVq5hYZX2DouJ2WMbpc6c7AUzisWD09dGvxiZdRG6jRC4WwYHaBz ocjGf4PfedqDCda0+LjzdOjxS0pdwGMvYT9vG4TUZjwQIIL9Q6JG/DKJq1s62WV3 fTnJk6MQNeim/1lCGTFdNqv+OFi1q5TL9NsFHp54QBoJOtGDyZKXV/ur2vUT0XQP pXKkKhIHb9QYL2marm+BDZbLfiRbXEIgg3Ran/s4PogyFlK07KOjLALtpX0zziZu VZEX+DgitQAz4fZ41cCY3okAb1AzDM5JXqVauw71iPRdPctGnhHOFJ9Df0Sgzj/O D2aUIwAQY0hjJ2C8he/UpT9oJX0BjtKQIj7/6KpYQ8siM6taoy39d8nyJapLpW3j sMDQYnrmGIT2mZTcaVFeOA+ixezXkYH8LeyZNYFIlT5wKeqUBBg= =hY6T -----END PGP SIGNATURE----- Merge tag 'x86_cache_for_v5.12' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull x86 resource control updates from Borislav Petkov: "Avoid IPI-ing a task in certain cases and prevent load/store tearing when accessing a task's resctrl fields concurrently" * tag 'x86_cache_for_v5.12' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/resctrl: Apply READ_ONCE/WRITE_ONCE to task_struct.{rmid,closid} x86/resctrl: Use task_curr() instead of task_struct->on_cpu to prevent unnecessary IPI x86/resctrl: Add printf attribute to log function
This commit is contained in:
commit
b0fb29382d
|
@ -56,19 +56,22 @@ static void __resctrl_sched_in(void)
|
|||
struct resctrl_pqr_state *state = this_cpu_ptr(&pqr_state);
|
||||
u32 closid = state->default_closid;
|
||||
u32 rmid = state->default_rmid;
|
||||
u32 tmp;
|
||||
|
||||
/*
|
||||
* If this task has a closid/rmid assigned, use it.
|
||||
* Else use the closid/rmid assigned to this cpu.
|
||||
*/
|
||||
if (static_branch_likely(&rdt_alloc_enable_key)) {
|
||||
if (current->closid)
|
||||
closid = current->closid;
|
||||
tmp = READ_ONCE(current->closid);
|
||||
if (tmp)
|
||||
closid = tmp;
|
||||
}
|
||||
|
||||
if (static_branch_likely(&rdt_mon_enable_key)) {
|
||||
if (current->rmid)
|
||||
rmid = current->rmid;
|
||||
tmp = READ_ONCE(current->rmid);
|
||||
if (tmp)
|
||||
rmid = tmp;
|
||||
}
|
||||
|
||||
if (closid != state->cur_closid || rmid != state->cur_rmid) {
|
||||
|
|
|
@ -572,6 +572,7 @@ union cpuid_0x10_x_edx {
|
|||
|
||||
void rdt_last_cmd_clear(void);
|
||||
void rdt_last_cmd_puts(const char *s);
|
||||
__printf(1, 2)
|
||||
void rdt_last_cmd_printf(const char *fmt, ...);
|
||||
|
||||
void rdt_ctrl_update(void *arg);
|
||||
|
|
|
@ -563,11 +563,11 @@ static int __rdtgroup_move_task(struct task_struct *tsk,
|
|||
*/
|
||||
|
||||
if (rdtgrp->type == RDTCTRL_GROUP) {
|
||||
tsk->closid = rdtgrp->closid;
|
||||
tsk->rmid = rdtgrp->mon.rmid;
|
||||
WRITE_ONCE(tsk->closid, rdtgrp->closid);
|
||||
WRITE_ONCE(tsk->rmid, rdtgrp->mon.rmid);
|
||||
} else if (rdtgrp->type == RDTMON_GROUP) {
|
||||
if (rdtgrp->mon.parent->closid == tsk->closid) {
|
||||
tsk->rmid = rdtgrp->mon.rmid;
|
||||
WRITE_ONCE(tsk->rmid, rdtgrp->mon.rmid);
|
||||
} else {
|
||||
rdt_last_cmd_puts("Can't move task to different control group\n");
|
||||
return -EINVAL;
|
||||
|
@ -2310,22 +2310,18 @@ static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to,
|
|||
for_each_process_thread(p, t) {
|
||||
if (!from || is_closid_match(t, from) ||
|
||||
is_rmid_match(t, from)) {
|
||||
t->closid = to->closid;
|
||||
t->rmid = to->mon.rmid;
|
||||
WRITE_ONCE(t->closid, to->closid);
|
||||
WRITE_ONCE(t->rmid, to->mon.rmid);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/*
|
||||
* This is safe on x86 w/o barriers as the ordering
|
||||
* of writing to task_cpu() and t->on_cpu is
|
||||
* reverse to the reading here. The detection is
|
||||
* inaccurate as tasks might move or schedule
|
||||
* before the smp function call takes place. In
|
||||
* such a case the function call is pointless, but
|
||||
* If the task is on a CPU, set the CPU in the mask.
|
||||
* The detection is inaccurate as tasks might move or
|
||||
* schedule before the smp function call takes place.
|
||||
* In such a case the function call is pointless, but
|
||||
* there is no other side effect.
|
||||
*/
|
||||
if (mask && t->on_cpu)
|
||||
if (IS_ENABLED(CONFIG_SMP) && mask && task_curr(t))
|
||||
cpumask_set_cpu(task_cpu(t), mask);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
read_unlock(&tasklist_lock);
|
||||
|
|
Loading…
Reference in New Issue