perf: set event period
Change-Id: Ibf569de7af8697e766c10b8d70905b8cdc4df083
This commit is contained in:
@ -232,3 +232,43 @@ int hw_perf_event_init(struct mc_perf_event *event)
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ihk_mc_event_set_period(struct mc_perf_event *event)
|
||||
{
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
int64_t left = ihk_atomic64_read(&hwc->period_left);
|
||||
int64_t period = hwc->sample_period;
|
||||
uint64_t max_period;
|
||||
int ret = 0;
|
||||
|
||||
max_period = arm_pmu_event_max_period(event);
|
||||
if (unlikely(left <= -period)) {
|
||||
left = period;
|
||||
ihk_atomic64_set(&hwc->period_left, left);
|
||||
hwc->last_period = period;
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
if (unlikely(left <= 0)) {
|
||||
left += period;
|
||||
ihk_atomic64_set(&hwc->period_left, left);
|
||||
hwc->last_period = period;
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Limit the maximum period to prevent the counter value
|
||||
* from overtaking the one we are about to program. In
|
||||
* effect we are reducing max_period to account for
|
||||
* interrupt latency (and we are being very conservative).
|
||||
*/
|
||||
if (left > (max_period >> 1))
|
||||
left = (max_period >> 1);
|
||||
|
||||
ihk_atomic64_set(&hwc->prev_count, (uint64_t)-left);
|
||||
|
||||
cpu_pmu.write_counter(event->counter_id,
|
||||
(uint64_t)(-left) & max_period);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -788,6 +788,7 @@ static void armv8pmu_handle_irq(void *priv)
|
||||
long irqstate;
|
||||
struct mckfd *fdp;
|
||||
struct pt_regs *regs = (struct pt_regs *)priv;
|
||||
struct mc_perf_event *event = NULL;
|
||||
|
||||
/*
|
||||
* Get and reset the IRQ flags
|
||||
@ -821,6 +822,11 @@ static void armv8pmu_handle_irq(void *priv)
|
||||
else {
|
||||
set_signal(SIGIO, regs, NULL);
|
||||
}
|
||||
|
||||
if (event) {
|
||||
ihk_mc_event_set_period(event);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
static void armv8pmu_enable_user_access_pmu_regs(void)
|
||||
|
||||
@ -508,3 +508,8 @@ int hw_perf_event_init(struct mc_perf_event *event)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ihk_mc_event_set_period(struct mc_perf_event *event)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -4068,6 +4068,7 @@ void perf_start(struct mc_perf_event *event)
|
||||
leader->base_system_tsc = thread->system_tsc;
|
||||
}
|
||||
else {
|
||||
ihk_mc_event_set_period(leader);
|
||||
perf_counter_set(leader);
|
||||
counter_mask |= 1UL << counter_id;
|
||||
}
|
||||
@ -4097,6 +4098,7 @@ void perf_start(struct mc_perf_event *event)
|
||||
sub->base_system_tsc = thread->system_tsc;
|
||||
}
|
||||
else {
|
||||
ihk_mc_event_set_period(sub);
|
||||
perf_counter_set(sub);
|
||||
counter_mask |= 1UL << counter_id;
|
||||
}
|
||||
|
||||
@ -91,6 +91,7 @@ unsigned long ihk_mc_hw_cache_extra_reg_map(unsigned long hw_cache_event);
|
||||
unsigned long ihk_mc_raw_event_map(unsigned long raw_event);
|
||||
int ihk_mc_validate_event(unsigned long hw_config);
|
||||
int hw_perf_event_init(struct mc_perf_event *event);
|
||||
int ihk_mc_event_set_period(struct mc_perf_event *event);
|
||||
|
||||
static inline int is_sampling_event(struct mc_perf_event *event)
|
||||
{
|
||||
|
||||
Reference in New Issue
Block a user