diff --git a/lisa/_assets/kmodules/sched_tp/sched_events.h b/lisa/_assets/kmodules/sched_tp/sched_events.h index 4bb0156afe318aae105cec5604b62d373a602f25..3e77f4ce42cee73dc1a801ee8e895e886b5f1391 100644 --- a/lisa/_assets/kmodules/sched_tp/sched_events.h +++ b/lisa/_assets/kmodules/sched_tp/sched_events.h @@ -148,6 +148,7 @@ TRACE_EVENT(sched_overutilized, __entry->overutilized, __entry->span) ); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,9,0) TRACE_EVENT(sched_update_nr_running, TP_PROTO(int cpu, int change, unsigned int nr_running), @@ -227,6 +228,7 @@ TRACE_EVENT(sched_util_est_cfs, __entry->cpu, __entry->path, __entry->enqueued, __entry->ewma, __entry->util) ); +#endif #ifdef CONFIG_UCLAMP_TASK diff --git a/lisa/_assets/kmodules/sched_tp/sched_tp.c b/lisa/_assets/kmodules/sched_tp/sched_tp.c index f7b7fc70993533eb103064e3945e3c6078cb32ca..11880d4c5a7b0b31d4691fd39685e101a4948667 100644 --- a/lisa/_assets/kmodules/sched_tp/sched_tp.c +++ b/lisa/_assets/kmodules/sched_tp/sched_tp.c @@ -138,6 +138,7 @@ static void sched_overutilized(void *data, struct root_domain *rd, bool overutil } } +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,9,0) static void sched_update_nr_running(void *data, struct rq *rq, int change) { if (trace_sched_update_nr_running_enabled()) { @@ -159,6 +160,7 @@ static void sched_util_est_se(void *data, struct sched_entity *se) if (trace_sched_util_est_se_enabled()) _trace_se(se, trace_sched_util_est_se); } +#endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(5,10,0) && (defined(CONFIG_ARM64) || defined(CONFIG_ARM)) static void sched_cpu_capacity(void *data, struct rq *rq) @@ -175,9 +177,11 @@ static int sched_tp_init(void) register_trace_pelt_irq_tp(sched_pelt_irq, NULL); register_trace_pelt_se_tp(sched_pelt_se, NULL); register_trace_sched_overutilized_tp(sched_overutilized, NULL); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,9,0) register_trace_sched_update_nr_running_tp(sched_update_nr_running, NULL); register_trace_sched_util_est_cfs_tp(sched_util_est_cfs, NULL); register_trace_sched_util_est_se_tp(sched_util_est_se, NULL); +#endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(5,10,0) && (defined(CONFIG_ARM64) || defined(CONFIG_ARM)) register_trace_sched_cpu_capacity_tp(sched_cpu_capacity, NULL); #endif @@ -193,9 +197,11 @@ static void sched_tp_exit(void) unregister_trace_pelt_irq_tp(sched_pelt_irq, NULL); unregister_trace_pelt_se_tp(sched_pelt_se, NULL); unregister_trace_sched_overutilized_tp(sched_overutilized, NULL); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,9,0) unregister_trace_sched_update_nr_running_tp(sched_update_nr_running, NULL); unregister_trace_sched_util_est_cfs_tp(sched_util_est_cfs, NULL); unregister_trace_sched_util_est_se_tp(sched_util_est_se, NULL); +#endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(5,10,0) && (defined(CONFIG_ARM64) || defined(CONFIG_ARM)) unregister_trace_sched_cpu_capacity_tp(sched_cpu_capacity, NULL); #endif