2011-08-31 07:41:05 +08:00
|
|
|
#include <linux/perf_event.h>
|
2012-02-29 21:57:32 +08:00
|
|
|
#include <linux/export.h>
|
2011-08-31 07:41:05 +08:00
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/slab.h>
|
2011-09-28 01:00:40 +08:00
|
|
|
#include <asm/apicdef.h>
|
2011-08-31 07:41:05 +08:00
|
|
|
|
2016-02-10 17:55:23 +08:00
|
|
|
#include "../perf_event.h"
|
2010-02-26 19:05:05 +08:00
|
|
|
|
2010-03-29 19:09:53 +08:00
|
|
|
static __initconst const u64 amd_hw_cache_event_ids
|
2010-02-26 19:05:05 +08:00
|
|
|
[PERF_COUNT_HW_CACHE_MAX]
|
|
|
|
[PERF_COUNT_HW_CACHE_OP_MAX]
|
|
|
|
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
|
|
|
|
{
|
|
|
|
[ C(L1D) ] = {
|
|
|
|
[ C(OP_READ) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
|
2011-04-16 08:27:53 +08:00
|
|
|
[ C(RESULT_MISS) ] = 0x0141, /* Data Cache Misses */
|
2010-02-26 19:05:05 +08:00
|
|
|
},
|
|
|
|
[ C(OP_WRITE) ] = {
|
2015-12-10 00:34:45 +08:00
|
|
|
[ C(RESULT_ACCESS) ] = 0,
|
2010-02-26 19:05:05 +08:00
|
|
|
[ C(RESULT_MISS) ] = 0,
|
|
|
|
},
|
|
|
|
[ C(OP_PREFETCH) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x0267, /* Data Prefetcher :attempts */
|
|
|
|
[ C(RESULT_MISS) ] = 0x0167, /* Data Prefetcher :cancelled */
|
|
|
|
},
|
|
|
|
},
|
|
|
|
[ C(L1I ) ] = {
|
|
|
|
[ C(OP_READ) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches */
|
|
|
|
[ C(RESULT_MISS) ] = 0x0081, /* Instruction cache misses */
|
|
|
|
},
|
|
|
|
[ C(OP_WRITE) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = -1,
|
|
|
|
[ C(RESULT_MISS) ] = -1,
|
|
|
|
},
|
|
|
|
[ C(OP_PREFETCH) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x014B, /* Prefetch Instructions :Load */
|
|
|
|
[ C(RESULT_MISS) ] = 0,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
[ C(LL ) ] = {
|
|
|
|
[ C(OP_READ) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x037D, /* Requests to L2 Cache :IC+DC */
|
|
|
|
[ C(RESULT_MISS) ] = 0x037E, /* L2 Cache Misses : IC+DC */
|
|
|
|
},
|
|
|
|
[ C(OP_WRITE) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x017F, /* L2 Fill/Writeback */
|
|
|
|
[ C(RESULT_MISS) ] = 0,
|
|
|
|
},
|
|
|
|
[ C(OP_PREFETCH) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0,
|
|
|
|
[ C(RESULT_MISS) ] = 0,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
[ C(DTLB) ] = {
|
|
|
|
[ C(OP_READ) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
|
2010-10-15 21:15:01 +08:00
|
|
|
[ C(RESULT_MISS) ] = 0x0746, /* L1_DTLB_AND_L2_DLTB_MISS.ALL */
|
2010-02-26 19:05:05 +08:00
|
|
|
},
|
|
|
|
[ C(OP_WRITE) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0,
|
|
|
|
[ C(RESULT_MISS) ] = 0,
|
|
|
|
},
|
|
|
|
[ C(OP_PREFETCH) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0,
|
|
|
|
[ C(RESULT_MISS) ] = 0,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
[ C(ITLB) ] = {
|
|
|
|
[ C(OP_READ) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes */
|
2010-10-15 21:15:01 +08:00
|
|
|
[ C(RESULT_MISS) ] = 0x0385, /* L1_ITLB_AND_L2_ITLB_MISS.ALL */
|
2010-02-26 19:05:05 +08:00
|
|
|
},
|
|
|
|
[ C(OP_WRITE) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = -1,
|
|
|
|
[ C(RESULT_MISS) ] = -1,
|
|
|
|
},
|
|
|
|
[ C(OP_PREFETCH) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = -1,
|
|
|
|
[ C(RESULT_MISS) ] = -1,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
[ C(BPU ) ] = {
|
|
|
|
[ C(OP_READ) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr. */
|
|
|
|
[ C(RESULT_MISS) ] = 0x00c3, /* Retired Mispredicted BI */
|
|
|
|
},
|
|
|
|
[ C(OP_WRITE) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = -1,
|
|
|
|
[ C(RESULT_MISS) ] = -1,
|
|
|
|
},
|
|
|
|
[ C(OP_PREFETCH) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = -1,
|
|
|
|
[ C(RESULT_MISS) ] = -1,
|
|
|
|
},
|
|
|
|
},
|
2011-04-23 05:37:06 +08:00
|
|
|
[ C(NODE) ] = {
|
|
|
|
[ C(OP_READ) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = 0xb8e9, /* CPU Request to Memory, l+r */
|
|
|
|
[ C(RESULT_MISS) ] = 0x98e9, /* CPU Request to Memory, r */
|
|
|
|
},
|
|
|
|
[ C(OP_WRITE) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = -1,
|
|
|
|
[ C(RESULT_MISS) ] = -1,
|
|
|
|
},
|
|
|
|
[ C(OP_PREFETCH) ] = {
|
|
|
|
[ C(RESULT_ACCESS) ] = -1,
|
|
|
|
[ C(RESULT_MISS) ] = -1,
|
|
|
|
},
|
|
|
|
},
|
2010-02-26 19:05:05 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* AMD Performance Monitor K7 and later.
|
|
|
|
*/
|
2016-04-27 17:35:31 +08:00
|
|
|
static const u64 amd_perfmon_event_map[PERF_COUNT_HW_MAX] =
|
2010-02-26 19:05:05 +08:00
|
|
|
{
|
2011-04-29 20:17:19 +08:00
|
|
|
[PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
|
|
|
|
[PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
|
2016-08-24 21:12:08 +08:00
|
|
|
[PERF_COUNT_HW_CACHE_REFERENCES] = 0x077d,
|
|
|
|
[PERF_COUNT_HW_CACHE_MISSES] = 0x077e,
|
2011-04-29 20:17:19 +08:00
|
|
|
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
|
|
|
|
[PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
|
|
|
|
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */
|
|
|
|
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x00d1, /* "Dispatch stalls" event */
|
2010-02-26 19:05:05 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static u64 amd_pmu_event_map(int hw_event)
|
|
|
|
{
|
|
|
|
return amd_perfmon_event_map[hw_event];
|
|
|
|
}
|
|
|
|
|
2013-02-07 01:26:27 +08:00
|
|
|
/*
|
|
|
|
* Previously calculated offsets
|
|
|
|
*/
|
|
|
|
static unsigned int event_offsets[X86_PMC_IDX_MAX] __read_mostly;
|
|
|
|
static unsigned int count_offsets[X86_PMC_IDX_MAX] __read_mostly;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Legacy CPUs:
|
|
|
|
* 4 counters starting at 0xc0010000 each offset by 1
|
|
|
|
*
|
|
|
|
* CPUs with core performance counter extensions:
|
|
|
|
* 6 counters starting at 0xc0010200 each offset by 2
|
|
|
|
*/
|
|
|
|
static inline int amd_pmu_addr_offset(int index, bool eventsel)
|
|
|
|
{
|
2013-04-16 01:21:22 +08:00
|
|
|
int offset;
|
2013-02-07 01:26:27 +08:00
|
|
|
|
|
|
|
if (!index)
|
|
|
|
return index;
|
|
|
|
|
|
|
|
if (eventsel)
|
|
|
|
offset = event_offsets[index];
|
|
|
|
else
|
|
|
|
offset = count_offsets[index];
|
|
|
|
|
|
|
|
if (offset)
|
|
|
|
return offset;
|
|
|
|
|
2015-12-07 17:39:41 +08:00
|
|
|
if (!boot_cpu_has(X86_FEATURE_PERFCTR_CORE))
|
2013-02-07 01:26:27 +08:00
|
|
|
offset = index;
|
|
|
|
else
|
|
|
|
offset = index << 1;
|
|
|
|
|
|
|
|
if (eventsel)
|
|
|
|
event_offsets[index] = offset;
|
|
|
|
else
|
|
|
|
count_offsets[index] = offset;
|
|
|
|
|
|
|
|
return offset;
|
|
|
|
}
|
|
|
|
|
2013-02-07 01:26:29 +08:00
|
|
|
static int amd_core_hw_config(struct perf_event *event)
|
|
|
|
{
|
2011-10-05 20:01:17 +08:00
|
|
|
if (event->attr.exclude_host && event->attr.exclude_guest)
|
|
|
|
/*
|
|
|
|
* When HO == GO == 1 the hardware treats that as GO == HO == 0
|
|
|
|
* and will count in both modes. We don't want to count in that
|
|
|
|
* case so we emulate no-counting by setting US = OS = 0.
|
|
|
|
*/
|
|
|
|
event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR |
|
|
|
|
ARCH_PERFMON_EVENTSEL_OS);
|
|
|
|
else if (event->attr.exclude_host)
|
2013-02-07 01:26:26 +08:00
|
|
|
event->hw.config |= AMD64_EVENTSEL_GUESTONLY;
|
2011-10-05 20:01:17 +08:00
|
|
|
else if (event->attr.exclude_guest)
|
2013-02-07 01:26:26 +08:00
|
|
|
event->hw.config |= AMD64_EVENTSEL_HOSTONLY;
|
2011-10-05 20:01:17 +08:00
|
|
|
|
2013-02-07 01:26:29 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2010-03-30 23:00:06 +08:00
|
|
|
|
2010-02-26 19:05:05 +08:00
|
|
|
/*
|
|
|
|
* AMD64 events are detected based on their event codes.
|
|
|
|
*/
|
2011-02-03 00:36:12 +08:00
|
|
|
static inline unsigned int amd_get_event_code(struct hw_perf_event *hwc)
|
|
|
|
{
|
|
|
|
return ((hwc->config >> 24) & 0x0f00) | (hwc->config & 0x00ff);
|
|
|
|
}
|
|
|
|
|
2010-02-26 19:05:05 +08:00
|
|
|
static inline int amd_is_nb_event(struct hw_perf_event *hwc)
|
|
|
|
{
|
|
|
|
return (hwc->config & 0xe0) == 0xe0;
|
|
|
|
}
|
|
|
|
|
2010-03-24 02:31:15 +08:00
|
|
|
static inline int amd_has_nb(struct cpu_hw_events *cpuc)
|
|
|
|
{
|
|
|
|
struct amd_nb *nb = cpuc->amd_nb;
|
|
|
|
|
|
|
|
return nb && nb->nb_id != -1;
|
|
|
|
}
|
|
|
|
|
2013-02-07 01:26:29 +08:00
|
|
|
static int amd_pmu_hw_config(struct perf_event *event)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* pass precise event sampling to ibs: */
|
|
|
|
if (event->attr.precise_ip && get_ibs_caps())
|
|
|
|
return -ENOENT;
|
|
|
|
|
|
|
|
if (has_branch_stack(event))
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
ret = x86_pmu_hw_config(event);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
if (event->attr.type == PERF_TYPE_RAW)
|
|
|
|
event->hw.config |= event->attr.config & AMD64_RAW_EVENT_MASK;
|
|
|
|
|
|
|
|
return amd_core_hw_config(event);
|
|
|
|
}
|
|
|
|
|
2013-02-07 01:26:25 +08:00
|
|
|
static void __amd_put_nb_event_constraints(struct cpu_hw_events *cpuc,
|
|
|
|
struct perf_event *event)
|
2010-02-26 19:05:05 +08:00
|
|
|
{
|
|
|
|
struct amd_nb *nb = cpuc->amd_nb;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* need to scan whole list because event may not have
|
|
|
|
* been assigned during scheduling
|
|
|
|
*
|
|
|
|
* no race condition possible because event can only
|
|
|
|
* be removed on one CPU at a time AND PMU is disabled
|
|
|
|
* when we come here
|
|
|
|
*/
|
2010-03-30 00:36:50 +08:00
|
|
|
for (i = 0; i < x86_pmu.num_counters; i++) {
|
2012-04-06 00:24:42 +08:00
|
|
|
if (cmpxchg(nb->owners + i, event, NULL) == event)
|
2010-02-26 19:05:05 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* AMD64 NorthBridge events need special treatment because
|
|
|
|
* counter access needs to be synchronized across all cores
|
|
|
|
* of a package. Refer to BKDG section 3.12
|
|
|
|
*
|
|
|
|
* NB events are events measuring L3 cache, Hypertransport
|
|
|
|
* traffic. They are identified by an event code >= 0xe00.
|
|
|
|
* They measure events on the NorthBride which is shared
|
|
|
|
* by all cores on a package. NB events are counted on a
|
|
|
|
* shared set of counters. When a NB event is programmed
|
|
|
|
* in a counter, the data actually comes from a shared
|
|
|
|
* counter. Thus, access to those counters needs to be
|
|
|
|
* synchronized.
|
|
|
|
*
|
|
|
|
* We implement the synchronization such that no two cores
|
|
|
|
* can be measuring NB events using the same counters. Thus,
|
|
|
|
* we maintain a per-NB allocation table. The available slot
|
|
|
|
* is propagated using the event_constraint structure.
|
|
|
|
*
|
|
|
|
* We provide only one choice for each NB event based on
|
|
|
|
* the fact that only NB events have restrictions. Consequently,
|
|
|
|
* if a counter is available, there is a guarantee the NB event
|
|
|
|
* will be assigned to it. If no slot is available, an empty
|
|
|
|
* constraint is returned and scheduling will eventually fail
|
|
|
|
* for this event.
|
|
|
|
*
|
|
|
|
* Note that all cores attached the same NB compete for the same
|
|
|
|
* counters to host NB events, this is why we use atomic ops. Some
|
|
|
|
* multi-chip CPUs may have more than one NB.
|
|
|
|
*
|
|
|
|
* Given that resources are allocated (cmpxchg), they must be
|
|
|
|
* eventually freed for others to use. This is accomplished by
|
2013-02-07 01:26:25 +08:00
|
|
|
* calling __amd_put_nb_event_constraints()
|
2010-02-26 19:05:05 +08:00
|
|
|
*
|
|
|
|
* Non NB events are not impacted by this restriction.
|
|
|
|
*/
|
|
|
|
static struct event_constraint *
|
2013-02-07 01:26:25 +08:00
|
|
|
__amd_get_nb_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
|
|
|
|
struct event_constraint *c)
|
2010-02-26 19:05:05 +08:00
|
|
|
{
|
|
|
|
struct hw_perf_event *hwc = &event->hw;
|
|
|
|
struct amd_nb *nb = cpuc->amd_nb;
|
2013-02-07 01:26:24 +08:00
|
|
|
struct perf_event *old;
|
|
|
|
int idx, new = -1;
|
2010-02-26 19:05:05 +08:00
|
|
|
|
2013-02-07 01:26:29 +08:00
|
|
|
if (!c)
|
|
|
|
c = &unconstrained;
|
|
|
|
|
|
|
|
if (cpuc->is_fake)
|
|
|
|
return c;
|
|
|
|
|
2010-02-26 19:05:05 +08:00
|
|
|
/*
|
|
|
|
* detect if already present, if so reuse
|
|
|
|
*
|
|
|
|
* cannot merge with actual allocation
|
|
|
|
* because of possible holes
|
|
|
|
*
|
|
|
|
* event can already be present yet not assigned (in hwc->idx)
|
|
|
|
* because of successive calls to x86_schedule_events() from
|
|
|
|
* hw_perf_group_sched_in() without hw_perf_enable()
|
|
|
|
*/
|
2013-02-07 01:26:25 +08:00
|
|
|
for_each_set_bit(idx, c->idxmsk, x86_pmu.num_counters) {
|
2013-02-07 01:26:24 +08:00
|
|
|
if (new == -1 || hwc->idx == idx)
|
|
|
|
/* assign free slot, prefer hwc->idx */
|
|
|
|
old = cmpxchg(nb->owners + idx, NULL, event);
|
|
|
|
else if (nb->owners[idx] == event)
|
|
|
|
/* event already present */
|
|
|
|
old = event;
|
|
|
|
else
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (old && old != event)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* reassign to this slot */
|
|
|
|
if (new != -1)
|
|
|
|
cmpxchg(nb->owners + new, event, NULL);
|
|
|
|
new = idx;
|
2010-02-26 19:05:05 +08:00
|
|
|
|
|
|
|
/* already present, reuse */
|
2013-02-07 01:26:24 +08:00
|
|
|
if (old == event)
|
2010-02-26 19:05:05 +08:00
|
|
|
break;
|
2013-02-07 01:26:24 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (new == -1)
|
|
|
|
return &emptyconstraint;
|
|
|
|
|
|
|
|
return &nb->event_constraints[new];
|
2010-02-26 19:05:05 +08:00
|
|
|
}
|
|
|
|
|
2010-11-25 15:56:17 +08:00
|
|
|
static struct amd_nb *amd_alloc_nb(int cpu)
|
2010-02-26 19:05:05 +08:00
|
|
|
{
|
|
|
|
struct amd_nb *nb;
|
|
|
|
int i;
|
|
|
|
|
2013-08-30 04:59:17 +08:00
|
|
|
nb = kzalloc_node(sizeof(struct amd_nb), GFP_KERNEL, cpu_to_node(cpu));
|
2010-02-26 19:05:05 +08:00
|
|
|
if (!nb)
|
|
|
|
return NULL;
|
|
|
|
|
2010-11-25 15:56:17 +08:00
|
|
|
nb->nb_id = -1;
|
2010-02-26 19:05:05 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* initialize all possible NB constraints
|
|
|
|
*/
|
2010-03-30 00:36:50 +08:00
|
|
|
for (i = 0; i < x86_pmu.num_counters; i++) {
|
2010-03-03 04:16:55 +08:00
|
|
|
__set_bit(i, nb->event_constraints[i].idxmsk);
|
2010-02-26 19:05:05 +08:00
|
|
|
nb->event_constraints[i].weight = 1;
|
|
|
|
}
|
|
|
|
return nb;
|
|
|
|
}
|
|
|
|
|
2010-03-24 02:31:15 +08:00
|
|
|
static int amd_pmu_cpu_prepare(int cpu)
|
|
|
|
{
|
|
|
|
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
|
|
|
|
|
|
|
|
WARN_ON_ONCE(cpuc->amd_nb);
|
|
|
|
|
2016-03-25 22:52:35 +08:00
|
|
|
if (!x86_pmu.amd_nb_constraints)
|
2016-07-14 01:16:10 +08:00
|
|
|
return 0;
|
2010-03-24 02:31:15 +08:00
|
|
|
|
2010-11-25 15:56:17 +08:00
|
|
|
cpuc->amd_nb = amd_alloc_nb(cpu);
|
2010-03-24 02:31:15 +08:00
|
|
|
if (!cpuc->amd_nb)
|
2016-07-14 01:16:10 +08:00
|
|
|
return -ENOMEM;
|
2010-03-24 02:31:15 +08:00
|
|
|
|
2016-07-14 01:16:10 +08:00
|
|
|
return 0;
|
2010-03-24 02:31:15 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void amd_pmu_cpu_starting(int cpu)
|
2010-02-26 19:05:05 +08:00
|
|
|
{
|
2010-03-24 02:31:15 +08:00
|
|
|
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
|
2014-11-18 03:06:54 +08:00
|
|
|
void **onln = &cpuc->kfree_on_online[X86_PERF_KFREE_SHARED];
|
2010-03-24 02:31:15 +08:00
|
|
|
struct amd_nb *nb;
|
2010-02-26 19:05:05 +08:00
|
|
|
int i, nb_id;
|
|
|
|
|
2013-02-07 01:26:26 +08:00
|
|
|
cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
|
2012-02-29 21:57:32 +08:00
|
|
|
|
2016-03-25 22:52:35 +08:00
|
|
|
if (!x86_pmu.amd_nb_constraints)
|
2010-02-26 19:05:05 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
nb_id = amd_get_nb_id(cpu);
|
2010-03-24 02:31:15 +08:00
|
|
|
WARN_ON_ONCE(nb_id == BAD_APICID);
|
2010-02-26 19:05:05 +08:00
|
|
|
|
|
|
|
for_each_online_cpu(i) {
|
2010-03-24 02:31:15 +08:00
|
|
|
nb = per_cpu(cpu_hw_events, i).amd_nb;
|
|
|
|
if (WARN_ON_ONCE(!nb))
|
2010-02-26 19:05:05 +08:00
|
|
|
continue;
|
|
|
|
|
2010-03-24 02:31:15 +08:00
|
|
|
if (nb->nb_id == nb_id) {
|
2014-11-18 03:06:54 +08:00
|
|
|
*onln = cpuc->amd_nb;
|
2010-03-24 02:31:15 +08:00
|
|
|
cpuc->amd_nb = nb;
|
|
|
|
break;
|
|
|
|
}
|
2010-02-26 19:05:05 +08:00
|
|
|
}
|
2010-03-24 02:31:15 +08:00
|
|
|
|
|
|
|
cpuc->amd_nb->nb_id = nb_id;
|
|
|
|
cpuc->amd_nb->refcnt++;
|
2010-02-26 19:05:05 +08:00
|
|
|
}
|
|
|
|
|
2010-03-24 02:31:15 +08:00
|
|
|
static void amd_pmu_cpu_dead(int cpu)
|
2010-02-26 19:05:05 +08:00
|
|
|
{
|
|
|
|
struct cpu_hw_events *cpuhw;
|
|
|
|
|
2016-03-25 22:52:35 +08:00
|
|
|
if (!x86_pmu.amd_nb_constraints)
|
2010-02-26 19:05:05 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
cpuhw = &per_cpu(cpu_hw_events, cpu);
|
|
|
|
|
2010-03-22 04:51:51 +08:00
|
|
|
if (cpuhw->amd_nb) {
|
2010-03-24 02:31:15 +08:00
|
|
|
struct amd_nb *nb = cpuhw->amd_nb;
|
|
|
|
|
|
|
|
if (nb->nb_id == -1 || --nb->refcnt == 0)
|
|
|
|
kfree(nb);
|
2010-02-26 19:05:05 +08:00
|
|
|
|
2010-03-22 04:51:51 +08:00
|
|
|
cpuhw->amd_nb = NULL;
|
|
|
|
}
|
2010-02-26 19:05:05 +08:00
|
|
|
}
|
|
|
|
|
2013-02-07 01:26:25 +08:00
|
|
|
static struct event_constraint *
|
2014-11-18 03:06:56 +08:00
|
|
|
amd_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
|
|
|
|
struct perf_event *event)
|
2013-02-07 01:26:25 +08:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* if not NB event or no NB, then no constraints
|
|
|
|
*/
|
|
|
|
if (!(amd_has_nb(cpuc) && amd_is_nb_event(&event->hw)))
|
|
|
|
return &unconstrained;
|
|
|
|
|
2013-04-16 01:21:22 +08:00
|
|
|
return __amd_get_nb_event_constraints(cpuc, event, NULL);
|
2013-02-07 01:26:25 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
|
|
|
|
struct perf_event *event)
|
|
|
|
{
|
|
|
|
if (amd_has_nb(cpuc) && amd_is_nb_event(&event->hw))
|
|
|
|
__amd_put_nb_event_constraints(cpuc, event);
|
|
|
|
}
|
|
|
|
|
2012-03-16 03:09:14 +08:00
|
|
|
PMU_FORMAT_ATTR(event, "config:0-7,32-35");
|
|
|
|
PMU_FORMAT_ATTR(umask, "config:8-15" );
|
|
|
|
PMU_FORMAT_ATTR(edge, "config:18" );
|
|
|
|
PMU_FORMAT_ATTR(inv, "config:23" );
|
|
|
|
PMU_FORMAT_ATTR(cmask, "config:24-31" );
|
|
|
|
|
|
|
|
static struct attribute *amd_format_attr[] = {
|
|
|
|
&format_attr_event.attr,
|
|
|
|
&format_attr_umask.attr,
|
|
|
|
&format_attr_edge.attr,
|
|
|
|
&format_attr_inv.attr,
|
|
|
|
&format_attr_cmask.attr,
|
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
2011-02-03 00:36:12 +08:00
|
|
|
/* AMD Family 15h */
|
|
|
|
|
|
|
|
#define AMD_EVENT_TYPE_MASK 0x000000F0ULL
|
|
|
|
|
|
|
|
#define AMD_EVENT_FP 0x00000000ULL ... 0x00000010ULL
|
|
|
|
#define AMD_EVENT_LS 0x00000020ULL ... 0x00000030ULL
|
|
|
|
#define AMD_EVENT_DC 0x00000040ULL ... 0x00000050ULL
|
|
|
|
#define AMD_EVENT_CU 0x00000060ULL ... 0x00000070ULL
|
|
|
|
#define AMD_EVENT_IC_DE 0x00000080ULL ... 0x00000090ULL
|
|
|
|
#define AMD_EVENT_EX_LS 0x000000C0ULL
|
|
|
|
#define AMD_EVENT_DE 0x000000D0ULL
|
|
|
|
#define AMD_EVENT_NB 0x000000E0ULL ... 0x000000F0ULL
|
|
|
|
|
|
|
|
/*
|
|
|
|
* AMD family 15h event code/PMC mappings:
|
|
|
|
*
|
|
|
|
* type = event_code & 0x0F0:
|
|
|
|
*
|
|
|
|
* 0x000 FP PERF_CTL[5:3]
|
|
|
|
* 0x010 FP PERF_CTL[5:3]
|
|
|
|
* 0x020 LS PERF_CTL[5:0]
|
|
|
|
* 0x030 LS PERF_CTL[5:0]
|
|
|
|
* 0x040 DC PERF_CTL[5:0]
|
|
|
|
* 0x050 DC PERF_CTL[5:0]
|
|
|
|
* 0x060 CU PERF_CTL[2:0]
|
|
|
|
* 0x070 CU PERF_CTL[2:0]
|
|
|
|
* 0x080 IC/DE PERF_CTL[2:0]
|
|
|
|
* 0x090 IC/DE PERF_CTL[2:0]
|
|
|
|
* 0x0A0 ---
|
|
|
|
* 0x0B0 ---
|
|
|
|
* 0x0C0 EX/LS PERF_CTL[5:0]
|
|
|
|
* 0x0D0 DE PERF_CTL[2:0]
|
|
|
|
* 0x0E0 NB NB_PERF_CTL[3:0]
|
|
|
|
* 0x0F0 NB NB_PERF_CTL[3:0]
|
|
|
|
*
|
|
|
|
* Exceptions:
|
|
|
|
*
|
2011-04-16 08:27:54 +08:00
|
|
|
* 0x000 FP PERF_CTL[3], PERF_CTL[5:3] (*)
|
2011-02-03 00:36:12 +08:00
|
|
|
* 0x003 FP PERF_CTL[3]
|
2011-04-16 08:27:54 +08:00
|
|
|
* 0x004 FP PERF_CTL[3], PERF_CTL[5:3] (*)
|
2011-02-03 00:36:12 +08:00
|
|
|
* 0x00B FP PERF_CTL[3]
|
|
|
|
* 0x00D FP PERF_CTL[3]
|
|
|
|
* 0x023 DE PERF_CTL[2:0]
|
|
|
|
* 0x02D LS PERF_CTL[3]
|
|
|
|
* 0x02E LS PERF_CTL[3,0]
|
2012-05-18 18:40:42 +08:00
|
|
|
* 0x031 LS PERF_CTL[2:0] (**)
|
2011-02-03 00:36:12 +08:00
|
|
|
* 0x043 CU PERF_CTL[2:0]
|
|
|
|
* 0x045 CU PERF_CTL[2:0]
|
|
|
|
* 0x046 CU PERF_CTL[2:0]
|
|
|
|
* 0x054 CU PERF_CTL[2:0]
|
|
|
|
* 0x055 CU PERF_CTL[2:0]
|
|
|
|
* 0x08F IC PERF_CTL[0]
|
|
|
|
* 0x187 DE PERF_CTL[0]
|
|
|
|
* 0x188 DE PERF_CTL[0]
|
|
|
|
* 0x0DB EX PERF_CTL[5:0]
|
|
|
|
* 0x0DC LS PERF_CTL[5:0]
|
|
|
|
* 0x0DD LS PERF_CTL[5:0]
|
|
|
|
* 0x0DE LS PERF_CTL[5:0]
|
|
|
|
* 0x0DF LS PERF_CTL[5:0]
|
2012-05-18 18:40:42 +08:00
|
|
|
* 0x1C0 EX PERF_CTL[5:3]
|
2011-02-03 00:36:12 +08:00
|
|
|
* 0x1D6 EX PERF_CTL[5:0]
|
|
|
|
* 0x1D8 EX PERF_CTL[5:0]
|
2011-04-16 08:27:54 +08:00
|
|
|
*
|
2012-05-18 18:40:42 +08:00
|
|
|
* (*) depending on the umask all FPU counters may be used
|
|
|
|
* (**) only one unitmask enabled at a time
|
2011-02-03 00:36:12 +08:00
|
|
|
*/
|
|
|
|
|
|
|
|
static struct event_constraint amd_f15_PMC0 = EVENT_CONSTRAINT(0, 0x01, 0);
|
|
|
|
static struct event_constraint amd_f15_PMC20 = EVENT_CONSTRAINT(0, 0x07, 0);
|
|
|
|
static struct event_constraint amd_f15_PMC3 = EVENT_CONSTRAINT(0, 0x08, 0);
|
2011-11-18 19:35:22 +08:00
|
|
|
static struct event_constraint amd_f15_PMC30 = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0);
|
2011-02-03 00:36:12 +08:00
|
|
|
static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0);
|
|
|
|
static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0);
|
|
|
|
|
|
|
|
static struct event_constraint *
|
2014-11-18 03:06:56 +08:00
|
|
|
amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, int idx,
|
|
|
|
struct perf_event *event)
|
2011-02-03 00:36:12 +08:00
|
|
|
{
|
2011-04-16 08:27:54 +08:00
|
|
|
struct hw_perf_event *hwc = &event->hw;
|
|
|
|
unsigned int event_code = amd_get_event_code(hwc);
|
2011-02-03 00:36:12 +08:00
|
|
|
|
|
|
|
switch (event_code & AMD_EVENT_TYPE_MASK) {
|
|
|
|
case AMD_EVENT_FP:
|
|
|
|
switch (event_code) {
|
2011-04-16 08:27:54 +08:00
|
|
|
case 0x000:
|
|
|
|
if (!(hwc->config & 0x0000F000ULL))
|
|
|
|
break;
|
|
|
|
if (!(hwc->config & 0x00000F00ULL))
|
|
|
|
break;
|
|
|
|
return &amd_f15_PMC3;
|
|
|
|
case 0x004:
|
|
|
|
if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1)
|
|
|
|
break;
|
|
|
|
return &amd_f15_PMC3;
|
2011-02-03 00:36:12 +08:00
|
|
|
case 0x003:
|
|
|
|
case 0x00B:
|
|
|
|
case 0x00D:
|
|
|
|
return &amd_f15_PMC3;
|
|
|
|
}
|
2011-04-16 08:27:54 +08:00
|
|
|
return &amd_f15_PMC53;
|
2011-02-03 00:36:12 +08:00
|
|
|
case AMD_EVENT_LS:
|
|
|
|
case AMD_EVENT_DC:
|
|
|
|
case AMD_EVENT_EX_LS:
|
|
|
|
switch (event_code) {
|
|
|
|
case 0x023:
|
|
|
|
case 0x043:
|
|
|
|
case 0x045:
|
|
|
|
case 0x046:
|
|
|
|
case 0x054:
|
|
|
|
case 0x055:
|
|
|
|
return &amd_f15_PMC20;
|
|
|
|
case 0x02D:
|
|
|
|
return &amd_f15_PMC3;
|
|
|
|
case 0x02E:
|
|
|
|
return &amd_f15_PMC30;
|
2012-05-18 18:40:42 +08:00
|
|
|
case 0x031:
|
|
|
|
if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1)
|
|
|
|
return &amd_f15_PMC20;
|
|
|
|
return &emptyconstraint;
|
|
|
|
case 0x1C0:
|
|
|
|
return &amd_f15_PMC53;
|
2011-02-03 00:36:12 +08:00
|
|
|
default:
|
|
|
|
return &amd_f15_PMC50;
|
|
|
|
}
|
|
|
|
case AMD_EVENT_CU:
|
|
|
|
case AMD_EVENT_IC_DE:
|
|
|
|
case AMD_EVENT_DE:
|
|
|
|
switch (event_code) {
|
|
|
|
case 0x08F:
|
|
|
|
case 0x187:
|
|
|
|
case 0x188:
|
|
|
|
return &amd_f15_PMC0;
|
|
|
|
case 0x0DB ... 0x0DF:
|
|
|
|
case 0x1D6:
|
|
|
|
case 0x1D8:
|
|
|
|
return &amd_f15_PMC50;
|
|
|
|
default:
|
|
|
|
return &amd_f15_PMC20;
|
|
|
|
}
|
|
|
|
case AMD_EVENT_NB:
|
2013-04-16 01:21:22 +08:00
|
|
|
/* moved to perf_event_amd_uncore.c */
|
|
|
|
return &emptyconstraint;
|
2011-02-03 00:36:12 +08:00
|
|
|
default:
|
|
|
|
return &emptyconstraint;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-10-10 20:53:14 +08:00
|
|
|
static ssize_t amd_event_sysfs_show(char *page, u64 config)
|
|
|
|
{
|
|
|
|
u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT) |
|
|
|
|
(config & AMD64_EVENTSEL_EVENT) >> 24;
|
|
|
|
|
|
|
|
return x86_event_sysfs_show(page, config, event);
|
|
|
|
}
|
|
|
|
|
2012-06-21 02:46:35 +08:00
|
|
|
static __initconst const struct x86_pmu amd_pmu = {
|
|
|
|
.name = "AMD",
|
2011-02-03 00:36:12 +08:00
|
|
|
.handle_irq = x86_pmu_handle_irq,
|
|
|
|
.disable_all = x86_pmu_disable_all,
|
|
|
|
.enable_all = x86_pmu_enable_all,
|
|
|
|
.enable = x86_pmu_enable_event,
|
|
|
|
.disable = x86_pmu_disable_event,
|
|
|
|
.hw_config = amd_pmu_hw_config,
|
|
|
|
.schedule_events = x86_schedule_events,
|
2012-06-21 02:46:35 +08:00
|
|
|
.eventsel = MSR_K7_EVNTSEL0,
|
|
|
|
.perfctr = MSR_K7_PERFCTR0,
|
2013-02-07 01:26:27 +08:00
|
|
|
.addr_offset = amd_pmu_addr_offset,
|
2011-02-03 00:36:12 +08:00
|
|
|
.event_map = amd_pmu_event_map,
|
|
|
|
.max_events = ARRAY_SIZE(amd_perfmon_event_map),
|
2012-06-21 02:46:35 +08:00
|
|
|
.num_counters = AMD64_NUM_COUNTERS,
|
2011-02-03 00:36:12 +08:00
|
|
|
.cntval_bits = 48,
|
|
|
|
.cntval_mask = (1ULL << 48) - 1,
|
|
|
|
.apic = 1,
|
|
|
|
/* use highest bit to detect overflow */
|
|
|
|
.max_period = (1ULL << 47) - 1,
|
2012-06-21 02:46:35 +08:00
|
|
|
.get_event_constraints = amd_get_event_constraints,
|
2011-02-03 00:36:12 +08:00
|
|
|
.put_event_constraints = amd_put_event_constraints,
|
|
|
|
|
2012-06-21 02:46:35 +08:00
|
|
|
.format_attrs = amd_format_attr,
|
2012-10-10 20:53:14 +08:00
|
|
|
.events_sysfs_show = amd_event_sysfs_show,
|
2012-06-21 02:46:35 +08:00
|
|
|
|
2011-02-03 00:36:12 +08:00
|
|
|
.cpu_prepare = amd_pmu_cpu_prepare,
|
2012-02-29 21:57:32 +08:00
|
|
|
.cpu_starting = amd_pmu_cpu_starting,
|
2012-06-21 02:46:35 +08:00
|
|
|
.cpu_dead = amd_pmu_cpu_dead,
|
2016-03-25 22:52:35 +08:00
|
|
|
|
|
|
|
.amd_nb_constraints = 1,
|
2011-02-03 00:36:12 +08:00
|
|
|
};
|
|
|
|
|
2013-05-21 19:05:37 +08:00
|
|
|
static int __init amd_core_pmu_init(void)
|
2012-06-21 02:46:35 +08:00
|
|
|
{
|
2015-12-07 17:39:41 +08:00
|
|
|
if (!boot_cpu_has(X86_FEATURE_PERFCTR_CORE))
|
2013-05-21 19:05:37 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
switch (boot_cpu_data.x86) {
|
|
|
|
case 0x15:
|
|
|
|
pr_cont("Fam15h ");
|
2012-06-21 02:46:35 +08:00
|
|
|
x86_pmu.get_event_constraints = amd_get_event_constraints_f15h;
|
2013-05-21 19:05:37 +08:00
|
|
|
break;
|
2016-11-18 00:15:06 +08:00
|
|
|
case 0x17:
|
|
|
|
pr_cont("Fam17h ");
|
|
|
|
/*
|
|
|
|
* In family 17h, there are no event constraints in the PMC hardware.
|
|
|
|
* We fallback to using default amd_get_event_constraints.
|
|
|
|
*/
|
|
|
|
break;
|
2013-05-21 19:05:37 +08:00
|
|
|
default:
|
|
|
|
pr_err("core perfctr but no constraints; unknown hardware!\n");
|
2012-06-21 02:46:35 +08:00
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If core performance counter extensions exists, we must use
|
|
|
|
* MSR_F15H_PERF_CTL/MSR_F15H_PERF_CTR msrs. See also
|
2013-05-21 19:05:37 +08:00
|
|
|
* amd_pmu_addr_offset().
|
2012-06-21 02:46:35 +08:00
|
|
|
*/
|
|
|
|
x86_pmu.eventsel = MSR_F15H_PERF_CTL;
|
|
|
|
x86_pmu.perfctr = MSR_F15H_PERF_CTR;
|
|
|
|
x86_pmu.num_counters = AMD64_NUM_COUNTERS_CORE;
|
2016-03-25 22:52:35 +08:00
|
|
|
/*
|
|
|
|
* AMD Core perfctr has separate MSRs for the NB events, see
|
|
|
|
* the amd/uncore.c driver.
|
|
|
|
*/
|
|
|
|
x86_pmu.amd_nb_constraints = 0;
|
2012-06-21 02:46:35 +08:00
|
|
|
|
2013-05-21 19:05:37 +08:00
|
|
|
pr_cont("core perfctr, ");
|
2012-06-21 02:46:35 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-08-31 07:41:05 +08:00
|
|
|
__init int amd_pmu_init(void)
|
2010-02-26 19:05:05 +08:00
|
|
|
{
|
2013-05-21 19:05:37 +08:00
|
|
|
int ret;
|
|
|
|
|
2010-02-26 19:05:05 +08:00
|
|
|
/* Performance-monitoring supported from K7 and later: */
|
|
|
|
if (boot_cpu_data.x86 < 6)
|
|
|
|
return -ENODEV;
|
|
|
|
|
2012-06-21 02:46:35 +08:00
|
|
|
x86_pmu = amd_pmu;
|
|
|
|
|
2013-05-21 19:05:37 +08:00
|
|
|
ret = amd_core_pmu_init();
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2010-02-26 19:05:05 +08:00
|
|
|
|
2016-03-25 22:52:35 +08:00
|
|
|
if (num_possible_cpus() == 1) {
|
|
|
|
/*
|
|
|
|
* No point in allocating data structures to serialize
|
|
|
|
* against other CPUs, when there is only the one CPU.
|
|
|
|
*/
|
|
|
|
x86_pmu.amd_nb_constraints = 0;
|
|
|
|
}
|
|
|
|
|
2010-02-26 19:05:05 +08:00
|
|
|
/* Events are common for all AMDs */
|
|
|
|
memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
|
|
|
|
sizeof(hw_cache_event_ids));
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2012-02-29 21:57:32 +08:00
|
|
|
|
|
|
|
void amd_pmu_enable_virt(void)
|
|
|
|
{
|
x86: Replace __get_cpu_var uses
__get_cpu_var() is used for multiple purposes in the kernel source. One of
them is address calculation via the form &__get_cpu_var(x). This calculates
the address for the instance of the percpu variable of the current processor
based on an offset.
Other use cases are for storing and retrieving data from the current
processors percpu area. __get_cpu_var() can be used as an lvalue when
writing data or on the right side of an assignment.
__get_cpu_var() is defined as :
#define __get_cpu_var(var) (*this_cpu_ptr(&(var)))
__get_cpu_var() always only does an address determination. However, store
and retrieve operations could use a segment prefix (or global register on
other platforms) to avoid the address calculation.
this_cpu_write() and this_cpu_read() can directly take an offset into a
percpu area and use optimized assembly code to read and write per cpu
variables.
This patch converts __get_cpu_var into either an explicit address
calculation using this_cpu_ptr() or into a use of this_cpu operations that
use the offset. Thereby address calculations are avoided and less registers
are used when code is generated.
Transformations done to __get_cpu_var()
1. Determine the address of the percpu instance of the current processor.
DEFINE_PER_CPU(int, y);
int *x = &__get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(&y);
2. Same as #1 but this time an array structure is involved.
DEFINE_PER_CPU(int, y[20]);
int *x = __get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(y);
3. Retrieve the content of the current processors instance of a per cpu
variable.
DEFINE_PER_CPU(int, y);
int x = __get_cpu_var(y)
Converts to
int x = __this_cpu_read(y);
4. Retrieve the content of a percpu struct
DEFINE_PER_CPU(struct mystruct, y);
struct mystruct x = __get_cpu_var(y);
Converts to
memcpy(&x, this_cpu_ptr(&y), sizeof(x));
5. Assignment to a per cpu variable
DEFINE_PER_CPU(int, y)
__get_cpu_var(y) = x;
Converts to
__this_cpu_write(y, x);
6. Increment/Decrement etc of a per cpu variable
DEFINE_PER_CPU(int, y);
__get_cpu_var(y)++
Converts to
__this_cpu_inc(y)
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: x86@kernel.org
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Acked-by: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
2014-08-18 01:30:40 +08:00
|
|
|
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
|
2012-02-29 21:57:32 +08:00
|
|
|
|
|
|
|
cpuc->perf_ctr_virt_mask = 0;
|
|
|
|
|
|
|
|
/* Reload all events */
|
|
|
|
x86_pmu_disable_all();
|
|
|
|
x86_pmu_enable_all(0);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(amd_pmu_enable_virt);
|
|
|
|
|
|
|
|
void amd_pmu_disable_virt(void)
|
|
|
|
{
|
x86: Replace __get_cpu_var uses
__get_cpu_var() is used for multiple purposes in the kernel source. One of
them is address calculation via the form &__get_cpu_var(x). This calculates
the address for the instance of the percpu variable of the current processor
based on an offset.
Other use cases are for storing and retrieving data from the current
processors percpu area. __get_cpu_var() can be used as an lvalue when
writing data or on the right side of an assignment.
__get_cpu_var() is defined as :
#define __get_cpu_var(var) (*this_cpu_ptr(&(var)))
__get_cpu_var() always only does an address determination. However, store
and retrieve operations could use a segment prefix (or global register on
other platforms) to avoid the address calculation.
this_cpu_write() and this_cpu_read() can directly take an offset into a
percpu area and use optimized assembly code to read and write per cpu
variables.
This patch converts __get_cpu_var into either an explicit address
calculation using this_cpu_ptr() or into a use of this_cpu operations that
use the offset. Thereby address calculations are avoided and less registers
are used when code is generated.
Transformations done to __get_cpu_var()
1. Determine the address of the percpu instance of the current processor.
DEFINE_PER_CPU(int, y);
int *x = &__get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(&y);
2. Same as #1 but this time an array structure is involved.
DEFINE_PER_CPU(int, y[20]);
int *x = __get_cpu_var(y);
Converts to
int *x = this_cpu_ptr(y);
3. Retrieve the content of the current processors instance of a per cpu
variable.
DEFINE_PER_CPU(int, y);
int x = __get_cpu_var(y)
Converts to
int x = __this_cpu_read(y);
4. Retrieve the content of a percpu struct
DEFINE_PER_CPU(struct mystruct, y);
struct mystruct x = __get_cpu_var(y);
Converts to
memcpy(&x, this_cpu_ptr(&y), sizeof(x));
5. Assignment to a per cpu variable
DEFINE_PER_CPU(int, y)
__get_cpu_var(y) = x;
Converts to
__this_cpu_write(y, x);
6. Increment/Decrement etc of a per cpu variable
DEFINE_PER_CPU(int, y);
__get_cpu_var(y)++
Converts to
__this_cpu_inc(y)
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: x86@kernel.org
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Acked-by: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
2014-08-18 01:30:40 +08:00
|
|
|
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
|
2012-02-29 21:57:32 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We only mask out the Host-only bit so that host-only counting works
|
|
|
|
* when SVM is disabled. If someone sets up a guest-only counter when
|
|
|
|
* SVM is disabled the Guest-only bits still gets set and the counter
|
|
|
|
* will not count anything.
|
|
|
|
*/
|
2013-02-07 01:26:26 +08:00
|
|
|
cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
|
2012-02-29 21:57:32 +08:00
|
|
|
|
|
|
|
/* Reload all events */
|
|
|
|
x86_pmu_disable_all();
|
|
|
|
x86_pmu_enable_all(0);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(amd_pmu_disable_virt);
|