diff --git a/lisa/_assets/kmodules/lisa/ftrace_events.h b/lisa/_assets/kmodules/lisa/ftrace_events.h index 6282d205efc26bf6d71ad49dd1a2bba0ede16255..fefa34bc25e59e427d393fb475d4f889a2a169df 100644 --- a/lisa/_assets/kmodules/lisa/ftrace_events.h +++ b/lisa/_assets/kmodules/lisa/ftrace_events.h @@ -35,14 +35,14 @@ TRACE_EVENT(lisa__sched_pelt_cfs, TP_ARGS(cpu, path, avg), TP_STRUCT__entry( - __field( int, cpu ) - __array( char, path, PATH_SIZE ) - __field( unsigned long, load ) + __field( unsigned long long, update_time ) #if HAS_KERNEL_FEATURE(SCHED_AVG_RBL) __field( unsigned long, RBL_LOAD_ENTRY ) #endif __field( unsigned long, util ) - __field( unsigned long long, update_time ) + __field( unsigned long, load ) + __field( int, cpu ) + __array( char, path, PATH_SIZE ) ), TP_fast_assign( @@ -79,13 +79,13 @@ DECLARE_EVENT_CLASS(lisa__sched_pelt_rq_template, TP_ARGS(cpu, avg), TP_STRUCT__entry( - __field( int, cpu ) + __field( unsigned long long, update_time ) __field( unsigned long, load ) #if HAS_KERNEL_FEATURE(SCHED_AVG_RBL) __field( unsigned long, RBL_LOAD_ENTRY ) #endif __field( unsigned long, util ) - __field( unsigned long long, update_time ) + __field( int, cpu ) ), TP_fast_assign( @@ -134,21 +134,21 @@ DEFINE_EVENT(lisa__sched_pelt_rq_template, lisa__sched_pelt_irq, #if HAS_KERNEL_FEATURE(SE_PELT) TRACE_EVENT(lisa__sched_pelt_se, - TP_PROTO(int cpu, char *path, char *comm, int pid, const struct sched_avg *avg), + TP_PROTO(int cpu, const char *path, const char *comm, int pid, const struct sched_avg *avg), TP_ARGS(cpu, path, comm, pid, avg), TP_STRUCT__entry( - __field( int, cpu ) - __array( char, path, PATH_SIZE ) - __array( char, comm, TASK_COMM_LEN ) - __field( int, pid ) + __field( unsigned long long, update_time ) __field( unsigned long, load ) #if HAS_KERNEL_FEATURE(SCHED_AVG_RBL) __field( unsigned long, RBL_LOAD_ENTRY ) #endif __field( unsigned long, util ) - __field( unsigned long long, update_time ) + __field( int, cpu ) + __field( int, pid ) + __array( char, path, PATH_SIZE ) + __array( char, comm, TASK_COMM_LEN ) ), TP_fast_assign( @@ -182,7 +182,7 @@ TRACE_EVENT(lisa__sched_pelt_se, #if HAS_KERNEL_FEATURE(SCHED_OVERUTILIZED) TRACE_EVENT(lisa__sched_overutilized, - TP_PROTO(int overutilized, char *span), + TP_PROTO(int overutilized, const char *span), TP_ARGS(overutilized, span), @@ -204,42 +204,42 @@ TRACE_EVENT(lisa__sched_overutilized, #if HAS_KERNEL_FEATURE(RQ_NR_RUNNING) TRACE_EVENT(lisa__sched_update_nr_running, - TP_PROTO(int cpu, int change, unsigned int nr_running), + TP_PROTO(int cpu, int change, unsigned int nr_running), - TP_ARGS(cpu, change, nr_running), + TP_ARGS(cpu, change, nr_running), - TP_STRUCT__entry( - __field( int, cpu ) - __field( int, change ) - __field(unsigned int, nr_running ) - ), + TP_STRUCT__entry( + __field( int, cpu ) + __field( int, change ) + __field(unsigned int, nr_running ) + ), - TP_fast_assign( - __entry->cpu = cpu; - __entry->change = change; - __entry->nr_running = nr_running; - ), + TP_fast_assign( + __entry->cpu = cpu; + __entry->change = change; + __entry->nr_running = nr_running; + ), - TP_printk("cpu=%d change=%d nr_running=%d", __entry->cpu, __entry->change, __entry->nr_running) - ); + TP_printk("cpu=%d change=%d nr_running=%d", __entry->cpu, __entry->change, __entry->nr_running) +); #endif #if HAS_KERNEL_FEATURE(SE_UTIL_EST) TRACE_EVENT(lisa__sched_util_est_se, - TP_PROTO(int cpu, char *path, char *comm, int pid, + TP_PROTO(int cpu, const char *path, const char *comm, int pid, const struct sched_avg *avg), TP_ARGS(cpu, path, comm, pid, avg), TP_STRUCT__entry( + __field( unsigned long, util ) + __field( unsigned int, enqueued ) + __field( unsigned int, ewma ) __field( int, cpu ) + __field( int, pid ) __array( char, path, PATH_SIZE ) __array( char, comm, TASK_COMM_LEN ) - __field( int, pid ) - __field( unsigned int, enqueued ) - __field( unsigned int, ewma ) - __field( unsigned long, util ) ), TP_fast_assign( @@ -266,11 +266,11 @@ TRACE_EVENT(lisa__sched_util_est_cfs, TP_ARGS(cpu, path, avg), TP_STRUCT__entry( - __field( int, cpu ) - __array( char, path, PATH_SIZE ) + __field( unsigned long, util ) __field( unsigned int, enqueued ) __field( unsigned int, ewma ) - __field( unsigned long, util ) + __field( int, cpu ) + __array( char, path, PATH_SIZE ) ), TP_fast_assign( @@ -290,20 +290,20 @@ TRACE_EVENT(lisa__sched_util_est_cfs, #if HAS_KERNEL_FEATURE(SE_UCLAMP) TRACE_EVENT_CONDITION(lisa__uclamp_util_se, - TP_PROTO(bool is_task, struct task_struct *p, struct rq *rq), + TP_PROTO(bool is_task, const struct task_struct *p, const struct rq *rq), TP_ARGS(is_task, p, rq), TP_CONDITION(is_task), TP_STRUCT__entry( - __field( pid_t, pid ) - __array( char, comm, TASK_COMM_LEN ) - __field( int, cpu ) __field(unsigned long, util_avg ) __field(unsigned long, uclamp_avg ) __field(unsigned long, uclamp_min ) __field(unsigned long, uclamp_max ) + __field( int, cpu ) + __field( pid_t, pid ) + __array( char, comm, TASK_COMM_LEN ) ), TP_fast_assign( @@ -313,20 +313,20 @@ TRACE_EVENT_CONDITION(lisa__uclamp_util_se, __entry->util_avg = p->se.avg.util_avg; __entry->uclamp_avg = uclamp_rq_util_with(rq, p->se.avg.util_avg); -# if HAS_KERNEL_FEATURE(CFS_UCLAMP) +# if HAS_KERNEL_FEATURE(RQ_UCLAMP) __entry->uclamp_min = rq->uclamp[UCLAMP_MIN].value; __entry->uclamp_max = rq->uclamp[UCLAMP_MAX].value; # endif ), TP_printk("pid=%d comm=%s cpu=%d util_avg=%lu uclamp_avg=%lu" -# if HAS_KERNEL_FEATURE(CFS_UCLAMP) +# if HAS_KERNEL_FEATURE(RQ_UCLAMP) " uclamp_min=%lu uclamp_max=%lu" # endif , __entry->pid, __entry->comm, __entry->cpu, __entry->util_avg, __entry->uclamp_avg -# if HAS_KERNEL_FEATURE(CFS_UCLAMP) +# if HAS_KERNEL_FEATURE(RQ_UCLAMP) ,__entry->uclamp_min, __entry->uclamp_max # endif ) @@ -336,21 +336,21 @@ TRACE_EVENT_CONDITION(lisa__uclamp_util_se, #define trace_lisa__uclamp_util_se_enabled() (false) #endif -#if HAS_KERNEL_FEATURE(CFS_UCLAMP) +#if HAS_KERNEL_FEATURE(RQ_UCLAMP) TRACE_EVENT_CONDITION(lisa__uclamp_util_cfs, - TP_PROTO(bool is_root, struct rq *rq, struct cfs_rq *cfs_rq), + TP_PROTO(bool is_root, const struct rq *rq, const struct cfs_rq *cfs_rq), TP_ARGS(is_root, rq, cfs_rq), TP_CONDITION(is_root), TP_STRUCT__entry( - __field( int, cpu ) __field(unsigned long, util_avg ) __field(unsigned long, uclamp_avg ) __field(unsigned long, uclamp_min ) __field(unsigned long, uclamp_max ) + __field( int, cpu ) ), TP_fast_assign( @@ -379,12 +379,12 @@ TRACE_EVENT(lisa__sched_cpu_capacity, TP_ARGS(rq), TP_STRUCT__entry( - __field( int, cpu ) __field( unsigned long, capacity ) #if HAS_KERNEL_FEATURE(FREQ_INVARIANCE) __field( unsigned long, capacity_orig ) __field( unsigned long, capacity_curr ) #endif + __field( int, cpu ) ), TP_fast_assign( @@ -417,7 +417,7 @@ TRACE_EVENT(lisa__sched_cpu_capacity, #define PIXEL6_EMETER_CHAN_NAME_MAX_SIZE 64 TRACE_EVENT(lisa__pixel6_emeter, - TP_PROTO(unsigned long ts, unsigned int device, unsigned int chan, char *chan_name, unsigned long value), + TP_PROTO(unsigned long ts, unsigned int device, unsigned int chan, const char *chan_name, unsigned long value), TP_ARGS(ts, device, chan, chan_name, value), TP_STRUCT__entry( diff --git a/lisa/_assets/kmodules/lisa/introspection.json b/lisa/_assets/kmodules/lisa/introspection.json index fdef05c67513c824960da45a8e9027c85650c61a..1955511b5dd885c308883625bf55a5f3e7c45a78 100644 --- a/lisa/_assets/kmodules/lisa/introspection.json +++ b/lisa/_assets/kmodules/lisa/introspection.json @@ -12,7 +12,7 @@ "CFS_PELT": "HAS_MEMBER(struct, cfs_rq, avg) && HAS_TYPE(struct, sched_avg)", "CFS_UTIL_EST": "HAS_KERNEL_FEATURE(CFS_PELT) && HAS_MEMBER(struct, sched_avg, util_est)", - "CFS_UCLAMP": "IS_ENABLED(CONFIG_UCLAMP_TASK) && HAS_MEMBER(struct, rq, uclamp)", + "RQ_UCLAMP": "IS_ENABLED(CONFIG_UCLAMP_TASK) && HAS_MEMBER(struct, rq, uclamp)", "SE_PELT": "HAS_TYPE(struct, sched_avg) && HAS_MEMBER(struct, sched_entity, avg)", "SE_UCLAMP": "HAS_KERNEL_FEATURE(SE_PELT) && HAS_MEMBER(struct, uclamp_se, value)", diff --git a/lisa/_assets/kmodules/lisa/sched_helpers.h b/lisa/_assets/kmodules/lisa/sched_helpers.h index ff0ddc8107e96ea6480e7a48f5795f00dbc3a1b2..2e4cb771e9d4248cf422074077c9a28278432e6f 100644 --- a/lisa/_assets/kmodules/lisa/sched_helpers.h +++ b/lisa/_assets/kmodules/lisa/sched_helpers.h @@ -13,7 +13,7 @@ #if HAS_TYPE(struct, cfs_rq) # if defined(CONFIG_FAIR_GROUP_SCHED) && HAS_MEMBER(struct, cfs_rq, rq) -static inline struct rq *rq_of(struct cfs_rq *cfs_rq) +static inline const struct rq *rq_of(const struct cfs_rq *cfs_rq) { return cfs_rq->rq; } @@ -28,7 +28,7 @@ static inline struct rq *rq_of(struct cfs_rq *cfs_rq) #endif -static inline bool entity_is_task(struct sched_entity *se) +static inline bool entity_is_task(const struct sched_entity *se) { return #if HAS_MEMBER(struct, sched_entity, my_q) @@ -41,7 +41,7 @@ static inline bool entity_is_task(struct sched_entity *se) #if HAS_TYPE(struct, rq) -static inline int cpu_of(struct rq *rq) +static inline int cpu_of(const struct rq *rq) { # if defined(CONFIG_SMP) && HAS_MEMBER(struct, rq, cpu) return rq->cpu; @@ -55,7 +55,7 @@ static inline int cpu_of(struct rq *rq) #if HAS_TYPE(struct, task_group) -static inline bool task_group_is_autogroup(struct task_group *tg) +static inline bool task_group_is_autogroup(const struct task_group *tg) { # if HAS_KERNEL_FEATURE(SCHED_AUTOGROUP) return !!tg->autogroup; @@ -66,7 +66,7 @@ static inline bool task_group_is_autogroup(struct task_group *tg) #endif #if HAS_TYPE(struct, task_group) -static int autogroup_path(struct task_group *tg, char *buf, int buflen) +static int autogroup_path(const struct task_group *tg, char *buf, int buflen) { # if HAS_KERNEL_FEATURE(SCHED_AUTOGROUP) && HAS_MEMBER(struct, autogroup, id) if (!task_group_is_autogroup(tg)) @@ -82,9 +82,9 @@ static int autogroup_path(struct task_group *tg, char *buf, int buflen) #if HAS_TYPE(struct, rq) /* A cut down version of the original. @p MUST be NULL */ -static inline unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util) +static inline unsigned long uclamp_rq_util_with(const struct rq *rq, unsigned long util) { -# if HAS_KERNEL_FEATURE(CFS_UCLAMP) +# if HAS_KERNEL_FEATURE(RQ_UCLAMP) unsigned long min_util; unsigned long max_util; @@ -103,7 +103,7 @@ static inline unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long uti #if HAS_TYPE(struct, cfs_rq) -static inline void cfs_rq_tg_path(struct cfs_rq *cfs_rq, char *path, int len) +static inline void cfs_rq_tg_path(const struct cfs_rq *cfs_rq, char *path, int len) { if (!path) return; @@ -120,7 +120,7 @@ static inline void cfs_rq_tg_path(struct cfs_rq *cfs_rq, char *path, int len) #endif #if HAS_TYPE(struct, sched_entity) -static inline struct cfs_rq *get_group_cfs_rq(struct sched_entity *se) +static inline const struct cfs_rq *get_group_cfs_rq(const struct sched_entity *se) { # if defined(CONFIG_FAIR_GROUP_SCHED) && HAS_MEMBER(struct, sched_entity, my_q) return se->my_q; @@ -129,7 +129,7 @@ static inline struct cfs_rq *get_group_cfs_rq(struct sched_entity *se) # endif } -static inline struct cfs_rq *get_se_cfs_rq(struct sched_entity *se) +static inline const struct cfs_rq *get_se_cfs_rq(const struct sched_entity *se) { # if defined(CONFIG_FAIR_GROUP_SCHED) && HAS_MEMBER(struct, sched_entity, cfs_rq) return se->cfs_rq; @@ -141,7 +141,7 @@ static inline struct cfs_rq *get_se_cfs_rq(struct sched_entity *se) #if HAS_TYPE(struct, cfs_rq) -static inline const struct sched_avg *cfs_rq_avg(struct cfs_rq *cfs_rq) +static inline const struct sched_avg *cfs_rq_avg(const struct cfs_rq *cfs_rq) { # if HAS_KERNEL_FEATURE(CFS_PELT) return cfs_rq ? (struct sched_avg *)&cfs_rq->avg : NULL; @@ -150,7 +150,7 @@ static inline const struct sched_avg *cfs_rq_avg(struct cfs_rq *cfs_rq) # endif } -static inline char *cfs_rq_path(struct cfs_rq *cfs_rq, char *str, int len) +static inline char *cfs_rq_path(const struct cfs_rq *cfs_rq, char *str, int len) { if (!cfs_rq) { if (str) @@ -163,7 +163,7 @@ static inline char *cfs_rq_path(struct cfs_rq *cfs_rq, char *str, int len) return str; } -static inline int cfs_rq_cpu(struct cfs_rq *cfs_rq) +static inline int cfs_rq_cpu(const struct cfs_rq *cfs_rq) { return cpu_of(rq_of(cfs_rq)); } @@ -180,7 +180,7 @@ static inline const struct sched_avg *rq_avg_rt(struct rq *rq) # endif } -static inline const struct sched_avg *rq_avg_dl(struct rq *rq) +static inline const struct sched_avg *rq_avg_dl(const struct rq *rq) { # if HAS_KERNEL_FEATURE(DL_PELT) return rq ? (struct sched_avg *)&rq->avg_dl : NULL; @@ -189,7 +189,7 @@ static inline const struct sched_avg *rq_avg_dl(struct rq *rq) # endif } -static inline const struct sched_avg *rq_avg_irq(struct rq *rq) +static inline const struct sched_avg *rq_avg_irq(const struct rq *rq) { # if HAS_KERNEL_FEATURE(IRQ_PELT) return rq ? (struct sched_avg *)&rq->avg_irq : NULL; @@ -198,12 +198,12 @@ static inline const struct sched_avg *rq_avg_irq(struct rq *rq) # endif } -static inline int rq_cpu(struct rq *rq) +static inline int rq_cpu(const struct rq *rq) { return cpu_of(rq); } -static inline int rq_cpu_capacity(struct rq *rq) +static inline int rq_cpu_capacity(const struct rq *rq) { return # if HAS_KERNEL_FEATURE(RQ_CAPACITY) @@ -214,7 +214,7 @@ static inline int rq_cpu_capacity(struct rq *rq) ; } -static inline int rq_cpu_orig_capacity(struct rq *rq) +static inline int rq_cpu_orig_capacity(const struct rq *rq) { return # if HAS_KERNEL_FEATURE(FREQ_INVARIANCE) @@ -229,7 +229,7 @@ static inline int rq_cpu_orig_capacity(struct rq *rq) DECLARE_PER_CPU(unsigned long, arch_freq_scale); # endif -static inline int rq_cpu_current_capacity(struct rq *rq) +static inline int rq_cpu_current_capacity(const struct rq *rq) { return # if HAS_KERNEL_FEATURE(FREQ_INVARIANCE) @@ -245,7 +245,7 @@ static inline int rq_cpu_current_capacity(struct rq *rq) } # if HAS_KERNEL_FEATURE(RQ_NR_RUNNING) -static inline int rq_nr_running(struct rq *rq) +static inline int rq_nr_running(const struct rq *rq) { return rq->nr_running; } @@ -254,7 +254,7 @@ static inline int rq_nr_running(struct rq *rq) #endif #if HAS_TYPE(struct, root_domain) -static inline const struct cpumask *rd_span(struct root_domain *rd) +static inline const struct cpumask *rd_span(const struct root_domain *rd) { # if defined(CONFIG_SMP) && HAS_MEMBER(struct, root_domain, span) return rd ? (struct cpumask *)rd->span : NULL; diff --git a/lisa/_assets/kmodules/lisa/tp.c b/lisa/_assets/kmodules/lisa/tp.c index 4e3cad9ca3b51edb75a5a8f90b088c8b5d9f8399..9d369eee076054341330cc924cc81ed6aaa8068c 100644 --- a/lisa/_assets/kmodules/lisa/tp.c +++ b/lisa/_assets/kmodules/lisa/tp.c @@ -30,11 +30,11 @@ static inline void _trace_cfs(struct cfs_rq *cfs_rq, #if HAS_KERNEL_FEATURE(SE_PELT) static inline void _trace_se(struct sched_entity *se, - void (*trace_event)(int, char*, char*, int, + void (*trace_event)(int, const char*, const char*, int, const struct sched_avg*)) { - void *gcfs_rq = get_group_cfs_rq(se); - void *cfs_rq = get_se_cfs_rq(se); + const struct cfs_rq *gcfs_rq = get_group_cfs_rq(se); + const struct cfs_rq *cfs_rq = get_se_cfs_rq(se); char path[PATH_SIZE]; cfs_rq_path(gcfs_rq, path, PATH_SIZE); @@ -53,15 +53,15 @@ static void sched_pelt_cfs_probe(void *feature, struct cfs_rq *cfs_rq) { _trace_cfs(cfs_rq, trace_lisa__sched_pelt_cfs); } -DEFINE_TP_EVENT_FEATURE(lisa__sched_pelt_cfs, pelt_cfs_tp, sched_pelt_cfs_probe); +DEFINE_TP_EVENT_FEATURE(lisa__sched_pelt_cfs, TP_PROBES(TP_PROBE("pelt_cfs_tp", sched_pelt_cfs_probe))); #endif -#if HAS_KERNEL_FEATURE(CFS_UCLAMP) +#if HAS_KERNEL_FEATURE(RQ_UCLAMP) static void uclamp_util_cfs_probe(void *feature, struct cfs_rq *cfs_rq) { bool __maybe_unused is_root_rq = ((struct cfs_rq *)&rq_of(cfs_rq)->cfs == cfs_rq); trace_lisa__uclamp_util_cfs(is_root_rq, rq_of(cfs_rq), cfs_rq); } -DEFINE_TP_EVENT_FEATURE(lisa__uclamp_util_cfs, pelt_cfs_tp, uclamp_util_cfs_probe); +DEFINE_TP_EVENT_FEATURE(lisa__uclamp_util_cfs, TP_PROBES(TP_PROBE("pelt_cfs_tp", uclamp_util_cfs_probe))); #endif #if HAS_KERNEL_FEATURE(RT_PELT) @@ -77,7 +77,7 @@ static void sched_pelt_rt_probe(void *feature, struct rq *rq) trace_lisa__sched_pelt_rt(cpu, avg); } } -DEFINE_TP_EVENT_FEATURE(lisa__sched_pelt_rt, pelt_rt_tp, sched_pelt_rt_probe); +DEFINE_TP_EVENT_FEATURE(lisa__sched_pelt_rt, TP_PROBES(TP_PROBE("pelt_rt_tp", sched_pelt_rt_probe))); #endif #if HAS_KERNEL_FEATURE(DL_PELT) @@ -93,7 +93,7 @@ static void sched_pelt_dl_probe(void *feature, struct rq *rq) trace_lisa__sched_pelt_dl(cpu, avg); } } -DEFINE_TP_EVENT_FEATURE(lisa__sched_pelt_dl, pelt_dl_tp, sched_pelt_dl_probe); +DEFINE_TP_EVENT_FEATURE(lisa__sched_pelt_dl, TP_PROBES(TP_PROBE("pelt_dl_tp", sched_pelt_dl_probe))); #endif #if HAS_KERNEL_FEATURE(IRQ_PELT) @@ -109,7 +109,7 @@ static void sched_pelt_irq_probe(void *feature, struct rq *rq) trace_lisa__sched_pelt_irq(cpu, avg); } } -DEFINE_TP_EVENT_FEATURE(lisa__sched_pelt_irq, pelt_irq_tp, sched_pelt_irq_probe); +DEFINE_TP_EVENT_FEATURE(lisa__sched_pelt_irq, TP_PROBES(TP_PROBE("pelt_irq_tp", sched_pelt_irq_probe))); #endif #if HAS_KERNEL_FEATURE(SE_PELT) @@ -117,20 +117,20 @@ static void sched_pelt_se_probe(void *feature, struct sched_entity *se) { _trace_se(se, trace_lisa__sched_pelt_se); } -DEFINE_TP_EVENT_FEATURE(lisa__sched_pelt_se, pelt_se_tp, sched_pelt_se_probe); +DEFINE_TP_EVENT_FEATURE(lisa__sched_pelt_se, TP_PROBES(TP_PROBE("pelt_se_tp", sched_pelt_se_probe))); #endif #if HAS_KERNEL_FEATURE(SE_UCLAMP) static void uclamp_util_se_probe(void *feature, struct sched_entity *se) { - struct cfs_rq __maybe_unused *cfs_rq = get_se_cfs_rq(se); + const struct cfs_rq __maybe_unused *cfs_rq = get_se_cfs_rq(se); trace_lisa__uclamp_util_se(entity_is_task(se), container_of(se, struct task_struct, se), rq_of(cfs_rq)); } -DEFINE_TP_EVENT_FEATURE(lisa__uclamp_util_se, pelt_se_tp, uclamp_util_se_probe); +DEFINE_TP_EVENT_FEATURE(lisa__uclamp_util_se, TP_PROBES(TP_PROBE("pelt_se_tp", uclamp_util_se_probe))); #endif #if HAS_KERNEL_FEATURE(SCHED_OVERUTILIZED) @@ -144,7 +144,7 @@ static void sched_overutilized_probe(void *feature, struct root_domain *rd, bool trace_lisa__sched_overutilized(overutilized, span); } } -DEFINE_TP_EVENT_FEATURE(lisa__sched_overutilized, sched_overutilized_tp, sched_overutilized_probe); +DEFINE_TP_EVENT_FEATURE(lisa__sched_overutilized, TP_PROBES(TP_PROBE("sched_overutilized_tp", sched_overutilized_probe))); #endif #if HAS_KERNEL_FEATURE(RQ_NR_RUNNING) @@ -157,7 +157,7 @@ static void sched_update_nr_running_probe(void *feature, struct rq *rq, int chan trace_lisa__sched_update_nr_running(cpu, change, nr_running); } } -DEFINE_TP_EVENT_FEATURE(lisa__sched_update_nr_running, sched_update_nr_running_tp, sched_update_nr_running_probe); +DEFINE_TP_EVENT_FEATURE(lisa__sched_update_nr_running, TP_PROBES(TP_PROBE("sched_update_nr_running_tp", sched_update_nr_running_probe))); #endif #if HAS_KERNEL_FEATURE(CFS_UTIL_EST) @@ -165,7 +165,7 @@ static void sched_util_est_cfs_probe(void *feature, struct cfs_rq *cfs_rq) { _trace_cfs(cfs_rq, trace_lisa__sched_util_est_cfs); } -DEFINE_TP_EVENT_FEATURE(lisa__sched_util_est_cfs, sched_util_est_cfs_tp, sched_util_est_cfs_probe); +DEFINE_TP_EVENT_FEATURE(lisa__sched_util_est_cfs, TP_PROBES(TP_PROBE("sched_util_est_cfs_tp", sched_util_est_cfs_probe))); #endif #if HAS_KERNEL_FEATURE(SE_UTIL_EST) @@ -173,7 +173,7 @@ static void sched_util_est_se_probe(void *feature, struct sched_entity *se) { _trace_se(se, trace_lisa__sched_util_est_se); } -DEFINE_TP_EVENT_FEATURE(lisa__sched_util_est_se, sched_util_est_se_tp, sched_util_est_se_probe); +DEFINE_TP_EVENT_FEATURE(lisa__sched_util_est_se, TP_PROBES(TP_PROBE("sched_util_est_se_tp", sched_util_est_se_probe))); #endif #if HAS_KERNEL_FEATURE(RQ_CAPACITY) @@ -181,7 +181,7 @@ static void sched_cpu_capacity_probe(void *feature, struct rq *rq) { trace_lisa__sched_cpu_capacity(rq); } -DEFINE_TP_EVENT_FEATURE(lisa__sched_cpu_capacity, sched_cpu_capacity_tp, sched_cpu_capacity_probe); +DEFINE_TP_EVENT_FEATURE(lisa__sched_cpu_capacity, TP_PROBES(TP_PROBE("sched_cpu_capacity_tp", sched_cpu_capacity_probe))); #endif diff --git a/lisa/_assets/kmodules/lisa/tp.h b/lisa/_assets/kmodules/lisa/tp.h index 27e54be876e38803bdda9f269582920c5fa0ff99..ab487fea7a9350a7d9f67ce17ccd821d067ed61d 100644 --- a/lisa/_assets/kmodules/lisa/tp.h +++ b/lisa/_assets/kmodules/lisa/tp.h @@ -22,8 +22,16 @@ __attribute__((unused)) static struct tracepoint *__find_tracepoint(const char * return res.found; } -#define DEFINE_TP_ENABLE_DISABLE(feature_name, tp_name, probe, enable_name, enable_f, disable_name, disable_f) \ - static bool __feature_tp_registered_##feature_name = false; \ +struct __tp_probe { + void *probe; + const char *tp_name; +}; + +#define TP_PROBE(_tp_name, _probe) (&(const struct __tp_probe){.tp_name=_tp_name, .probe=_probe}) +#define TP_PROBES(...) ((const struct __tp_probe **)(const struct __tp_probe *[]){__VA_ARGS__, NULL}) + +#define DEFINE_TP_ENABLE_DISABLE(feature_name, tp_probes, enable_name, enable_f, disable_name, disable_f) \ + static u64 CONCATENATE(__feature_tp_registered_, feature_name) = 0; \ static int enable_name(struct feature* feature) { \ int ret = 0; \ int __ret; \ @@ -34,25 +42,29 @@ __attribute__((unused)) static struct tracepoint *__find_tracepoint(const char * if (ret) { \ pr_err(#feature_name ": could not enable tracepoint support: %i\n", __ret); \ } else { \ - tp = __find_tracepoint(#tp_name); \ - if (tp) { \ - if (_enable_f) { \ - __ret = _enable_f(feature); \ - ret |= __ret; \ - if (__ret) \ - pr_err(#feature_name ": init function " #enable_f "() failed with error: %i\n", __ret); \ - } \ - if (!ret) { \ - __ret = tracepoint_probe_register(tp, (void *)probe, feature); \ - ret |= __ret; \ - if (__ret) \ - pr_err(#feature_name ": could not attach " #probe "() to tracepoint " #tp_name "\n"); \ - } \ - __feature_tp_registered_##feature_name = !ret; \ - return ret; \ - } else { \ - pr_err(#feature_name ": could not attach " #probe "() to undefined tracepoint " #tp_name "\n"); \ - ret |= 1; \ + if (_enable_f) { \ + __ret = _enable_f(feature); \ + ret |= __ret; \ + if (__ret) \ + pr_err(#feature_name ": init function " #enable_f "() failed with error: %i\n", __ret); \ + } \ + if (!ret) { \ + const struct __tp_probe **__tp_probes = tp_probes; \ + for (size_t i=0; __tp_probes[i]; i++) { \ + BUG_ON(i > (sizeof(CONCATENATE(__feature_tp_registered_, feature_name)) * 8 - 1));\ + const struct __tp_probe *probe = __tp_probes[i];\ + tp = __find_tracepoint(probe->tp_name); \ + if (tp) { \ + __ret = tracepoint_probe_register(tp, probe->probe, feature); \ + ret |= __ret; \ + if (__ret) \ + pr_err(#feature_name ": could not attach probe to tracepoint %s\n", probe->tp_name); \ + CONCATENATE(__feature_tp_registered_, feature_name) |= (!ret) << i; \ + } else { \ + pr_err(#feature_name ": could not attach probe to undefined tracepoint %s\n", probe->tp_name); \ + ret |= 1; \ + } \ + } \ } \ } \ return ret; \ @@ -61,17 +73,22 @@ __attribute__((unused)) static struct tracepoint *__find_tracepoint(const char * int ret = 0; \ int __ret; \ int (*_disable_f)(struct feature*) = disable_f; \ - struct tracepoint *tp = __find_tracepoint(#tp_name); \ - if (tp) { \ - if(__feature_tp_registered_##feature_name) { \ - __ret = tracepoint_probe_unregister(tp, (void *)probe, feature); \ - ret |= __ret; \ - if (__ret) \ - pr_err(#feature_name ": failed to unregister function " #probe "() on tracepoint " #tp_name "\n"); \ + const struct __tp_probe **__tp_probes = tp_probes; \ + for (size_t i=0; __tp_probes[i]; i++) { \ + BUG_ON(i > (sizeof(CONCATENATE(__feature_tp_registered_, feature_name)) * 8 - 1));\ + const struct __tp_probe *probe = __tp_probes[i]; \ + struct tracepoint *tp = __find_tracepoint(probe->tp_name); \ + if (tp) { \ + if(CONCATENATE(__feature_tp_registered_, feature_name) | (1ull << i)) { \ + __ret = tracepoint_probe_unregister(tp, probe->probe, feature); \ + ret |= __ret; \ + if (__ret) \ + pr_err(#feature_name ": failed to unregister function probe on tracepoint %s\n", probe->tp_name); \ + } \ } \ - if (_disable_f) \ - ret |= _disable_f(feature); \ } \ + if (_disable_f) \ + ret |= _disable_f(feature); \ ret |= DISABLE_FEATURE(__tp); \ return ret; \ } \ @@ -79,8 +96,7 @@ __attribute__((unused)) static struct tracepoint *__find_tracepoint(const char * /** * DEFINE_EXTENDED_TP_FEATURE() - Define a feature linked to a tracepoint. * @feature_name: Name of the feature. - * @tp_name: Name of the tracepoint to attach to. - * @probe: Probe function passed to the relevant tracepoint registering function register_trace_*(). + * @probes: List of tracepoint probes built using TP_PROBES(TP_PROBE("my_tp", my_probe), ...) * @enable_f: Additional enable function for the feature. It must take a struct * feature * and return a non-zero int in case of failure. * @disable_f: Additional disable function for the feature. Same signature as enable_f(). @@ -89,26 +105,40 @@ __attribute__((unused)) static struct tracepoint *__find_tracepoint(const char * * user-defined enable/disable functions. If the tracepoint is not found, the * user functions will not be called. */ -#define DEFINE_EXTENDED_TP_FEATURE(feature_name, tp_name, probe, enable_f, disable_f) \ - DEFINE_TP_ENABLE_DISABLE(feature_name, tp_name, probe, __tp_feature_enable_##feature_name, enable_f, __tp_feature_disable_##feature_name, disable_f); \ - DEFINE_FEATURE(feature_name, __tp_feature_enable_##feature_name, __tp_feature_disable_##feature_name); +#define DEFINE_EXTENDED_TP_FEATURE(feature_name, probes, enable_f, disable_f) \ + DEFINE_TP_ENABLE_DISABLE(feature_name, probes, CONCATENATE(__tp_feature_enable_, feature_name), enable_f, CONCATENATE(__tp_feature_disable_, feature_name), disable_f); \ + DEFINE_FEATURE(feature_name, CONCATENATE(__tp_feature_enable_, feature_name), CONCATENATE(__tp_feature_disable_, feature_name)); /** * DEFINE_TP_FEATURE() - Same as DEFINE_EXTENDED_TP_FEATURE() without custom * enable/disable functions. */ -#define DEFINE_TP_FEATURE(feature_name, tp_name, probe) DEFINE_EXTENDED_TP_FEATURE(feature_name, tp_name, probe, NULL, NULL) +#define DEFINE_TP_FEATURE(feature_name, probes) DEFINE_EXTENDED_TP_FEATURE(feature_name, probes, NULL, NULL) -#define __EVENT_FEATURE(event_name) event__##event_name +#define __EVENT_FEATURE(event_name) CONCATENATE(event__, event_name) /** * DEFINE_TP_EVENT_FEATURE() - Same as DEFINE_TP_FEATURE() with automatic * "event__" prefixing of the feature name. */ -#define DEFINE_TP_EVENT_FEATURE(event_name, tp_name, probe) DEFINE_TP_FEATURE(__EVENT_FEATURE(event_name), tp_name, probe) +#define DEFINE_TP_EVENT_FEATURE(event_name, probes) DEFINE_TP_FEATURE(__EVENT_FEATURE(event_name), probes) + /** * DEFINE_EXTENDED_TP_EVENT_FEATURE() - Same as DEFINE_EXTENDED_TP_FEATURE() * with automatic "event__" prefixing of the feature name. */ -#define DEFINE_EXTENDED_TP_EVENT_FEATURE(event_name, tp_name, probe, enable_f, disable_f) DEFINE_EXTENDED_TP_FEATURE(__EVENT_FEATURE(event_name), tp_name, probe, enable_f, disable_f) +#define DEFINE_EXTENDED_TP_EVENT_FEATURE(event_name, probes, enable_f, disable_f) DEFINE_EXTENDED_TP_FEATURE(__EVENT_FEATURE(event_name), probes, enable_f, disable_f) + +#define __DEPRECATED_EVENT_ENABLE(event_name) CONCATENATE(__enable_deprecated_feature_, __EVENT_FEATURE(event_name)) +/** + * DEFINE_TP_DEPRECATED_EVENT_FEATURE() - Same as DEFINE_TP_EVENT_FEATURE() + * with extra deprecation warnings upon init. + */ +#define DEFINE_TP_DEPRECATED_EVENT_FEATURE(msg, event_name, probes) \ +static int __DEPRECATED_EVENT_ENABLE(event_name)(struct feature *feature) \ +{ \ + pr_warn("The feature %s is deprecated: " msg, feature->name); \ + return 0; \ +} \ +DEFINE_EXTENDED_TP_EVENT_FEATURE(event_name, probes, __DEPRECATED_EVENT_ENABLE(event_name), NULL) #endif