diff --git a/lisa/_assets/kmodules/lisa/ftrace_events.h b/lisa/_assets/kmodules/lisa/ftrace_events.h index 78897a3d0d7d79f6af5eda80731ad7669251183e..802d211cf96c1ebb36a09622a84e5252fe8700a1 100644 --- a/lisa/_assets/kmodules/lisa/ftrace_events.h +++ b/lisa/_assets/kmodules/lisa/ftrace_events.h @@ -312,15 +312,24 @@ TRACE_EVENT_CONDITION(lisa__uclamp_util_se, __entry->cpu = rq_cpu(rq); __entry->util_avg = p->se.avg.util_avg; __entry->uclamp_avg = uclamp_rq_util_with(rq, p->se.avg.util_avg); + +# if HAS_KERNEL_FEATURE(CFS_UCLAMP) __entry->uclamp_min = rq->uclamp[UCLAMP_MIN].value; __entry->uclamp_max = rq->uclamp[UCLAMP_MAX].value; +# endif ), - TP_printk("pid=%d comm=%s cpu=%d util_avg=%lu uclamp_avg=%lu " - "uclamp_min=%lu uclamp_max=%lu", + TP_printk("pid=%d comm=%s cpu=%d util_avg=%lu uclamp_avg=%lu" +# if HAS_KERNEL_FEATURE(CFS_UCLAMP) + " uclamp_min=%lu uclamp_max=%lu" +# endif + , __entry->pid, __entry->comm, __entry->cpu, - __entry->util_avg, __entry->uclamp_avg, - __entry->uclamp_min, __entry->uclamp_max) + __entry->util_avg, __entry->uclamp_avg +# if HAS_KERNEL_FEATURE(CFS_UCLAMP) + ,__entry->uclamp_min, __entry->uclamp_max +# endif + ) ); #else #define trace_lisa__uclamp_util_se(is_task, p, rq) while(false) {} @@ -372,23 +381,18 @@ TRACE_EVENT(lisa__sched_cpu_capacity, TP_STRUCT__entry( __field( int, cpu ) __field( unsigned long, capacity ) - __field( unsigned long, capacity_orig ) #if HAS_KERNEL_FEATURE(FREQ_INVARIANCE) + __field( unsigned long, capacity_orig ) __field( unsigned long, capacity_curr ) #endif ), - unsigned long scale_cpu = rq->cpu_capacity_orig; -#if HAS_KERNEL_FEATURE(FREQ_INVARIANCE) - unsigned long scale_freq = arch_scale_freq_capacity(rq->cpu); -#endif - TP_fast_assign( __entry->cpu = rq->cpu; __entry->capacity = rq->cpu_capacity; - __entry->capacity_orig = scale_cpu; #if HAS_KERNEL_FEATURE(FREQ_INVARIANCE) - __entry->capacity_curr = cap_scale(scale_cpu, scale_freq); + __entry->capacity_orig = rq_cpu_orig_capacity(rq); + __entry->capacity_curr = rq_cpu_current_capacity(rq); #endif ), diff --git a/lisa/_assets/kmodules/lisa/kernel_features.json b/lisa/_assets/kmodules/lisa/kernel_features.json index 8525bca7d7ee832d17b85b842f452293b7926531..89ea45d537f58c414c22d0607d636a510028e68d 100644 --- a/lisa/_assets/kmodules/lisa/kernel_features.json +++ b/lisa/_assets/kmodules/lisa/kernel_features.json @@ -22,5 +22,5 @@ "SCHED_AVG_RBL": "HAS_MEMBER(struct, sched_avg, runnable_load_avg) || HAS_MEMBER(struct, sched_avg, runnable_avg)", "FILE_IO": "HAS_SYMBOL(kernel_read) && HAS_SYMBOL(kernel_write) && HAS_SYMBOL(filp_open)", - "FREQ_INVARIANCE": "HAS_SYMBOL(arch_freq_scale)" + "FREQ_INVARIANCE": "HAS_SYMBOL(arch_freq_scale) && HAS_MEMBER(struct, rq, cpu_capacity_orig)" } diff --git a/lisa/_assets/kmodules/lisa/sched_helpers.h b/lisa/_assets/kmodules/lisa/sched_helpers.h index 48a90557f8373eed7c07c1a2de494801c2d8d449..db58ec19ad6a55c8ec4ef7836a7e1dbb1ecc7040 100644 --- a/lisa/_assets/kmodules/lisa/sched_helpers.h +++ b/lisa/_assets/kmodules/lisa/sched_helpers.h @@ -79,7 +79,7 @@ static int autogroup_path(struct task_group *tg, char *buf, int buflen) static __always_inline unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util) { -# if HAS_KERNEL_FEATURE(SE_UCLAMP) +# if HAS_KERNEL_FEATURE(CFS_UCLAMP) unsigned long min_util; unsigned long max_util; @@ -200,13 +200,43 @@ static inline int rq_cpu(struct rq *rq) static inline int rq_cpu_capacity(struct rq *rq) { - return rq ? -#if HAS_KERNEL_FEATURE(RQ_CAPACITY) + return +# if HAS_KERNEL_FEATURE(RQ_CAPACITY) rq->cpu_capacity # else SCHED_CAPACITY_SCALE # endif - : -1; + ; +} + +static inline int rq_cpu_orig_capacity(struct rq *rq) +{ + return +# if HAS_KERNEL_FEATURE(FREQ_INVARIANCE) + rq->cpu_capacity_orig; +# else + rq_cpu_capacity(rq) +# endif + ; +} + +# if HAS_KERNEL_FEATURE(FREQ_INVARIANCE) +DECLARE_PER_CPU(unsigned long, arch_freq_scale); +# endif + +static inline int rq_cpu_current_capacity(struct rq *rq) +{ + return +# if HAS_KERNEL_FEATURE(FREQ_INVARIANCE) + ({ + unsigned long capacity_orig = rq_cpu_orig_capacity(rq); + unsigned long scale_freq = per_cpu(arch_freq_scale, rq->cpu); + cap_scale(capacity_orig, scale_freq); + }) +# else + rq_cpu_orig_capacity(rq) +# endif + ; } static inline int rq_nr_running(struct rq *rq)